code
stringlengths 1
199k
|
|---|
"""The BZIP2 format analyzer helper implementation."""
from dfvfs.analyzer import analyzer
from dfvfs.analyzer import analyzer_helper
from dfvfs.analyzer import specification
from dfvfs.lib import definitions
class BZIP2AnalyzerHelper(analyzer_helper.AnalyzerHelper):
"""Class that implements the BZIP2 analyzer helper."""
FORMAT_CATEGORIES = frozenset([
definitions.FORMAT_CATEGORY_COMPRESSED_STREAM])
TYPE_INDICATOR = definitions.TYPE_INDICATOR_BZIP2
def GetFormatSpecification(self):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification or None if the format cannot
be defined by a specification object.
"""
format_specification = specification.FormatSpecification(
self.type_indicator)
# TODO: add support for signature chains so that we add the 'BZ' at
# offset 0.
# BZIP2 compressed steam signature.
format_specification.AddNewSignature(b'\x31\x41\x59\x26\x53\x59', offset=4)
return format_specification
analyzer.Analyzer.RegisterHelper(BZIP2AnalyzerHelper())
|
import select
import serial
import socket
WRITE_CHUNK_SIZE = 4096
READ_CHUNK_SIZE = 4096
class RemoteSerialClient(object):
"""
Represents a remote client reading serial data over a socket. This class
handles buffering data and writing data to the client over its socket.
"""
def __init__(self, socket):
self.buffer = ""
self.socket = socket
def write(self):
if self.buffer:
sent = self.socket.send(self.buffer[:WRITE_CHUNK_SIZE])
if sent:
self.buffer = self.buffer[sent:]
def push(self, data):
self.buffer += data
def flush(self):
while self.buffer:
try:
sent = self.socket.send(self.buffer)
except socket.error:
return
if sent:
self.buffer = self.buffer[sent:]
def close(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
def fileno(self):
return self.socket.fileno()
def has_unsent_data(self):
return len(self.buffer) > 0
class FileClient(object):
"""
Represents a file 'client' to write serial data to. This is only usable if
the operating system allows polling on actual file descriptors.
"""
def __init__(self, fname):
self.file = open(fname, "ab+", READ_CHUNK_SIZE)
self.buffer = ""
def write(self):
self.file.write(self.buffer)
self.buffer = ""
def push(self, data):
self.buffer += data
def flush(self):
if self.buffer:
self.file.write(self.buffer)
self.file.flush()
def close(self):
self.file.close()
def fileno(self):
return self.file.fileno()
def has_unsent_data(self):
return len(self.buffer) > 0
class SerialServer(object):
"""
Implements a (mostly) asynchronous Serial server that reads in data from
a serial port and broadcasts it to any listening clients over TCP,
optionally logging data to a file.
"""
def __init__(self, address, log_filename=None, logger=None):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setblocking(False)
#self.socket.setsockopt()
self.socket.bind(address)
self.running = False
self.shutting_down = False
self.clients = {}
self.serial_port = None
def connect(self, port, **settings):
"""
Connect to a serial port named `port` with the same settings as the
serial.Serial object and open it for serial broadcast. Timeout will
be set to 0, if it is not specified
"""
if "timeout" not in settings:
settings["timeout"] = 0
self.serial_port = serial.Serial(port, **settings)
def log_to(self, fname):
"""
Opens the file at path `fname` in binary append mode and records all
serial data to it.
"""
self.log_file = open(fname, "ab+", READ_CHUNK_SIZE)
def set_serial_port(self, serial_port):
"""
Set an already opened port as the port for serial broadcast. To
avoid blocking when no data is being read, the timeout should be set.
"""
self.serial_port = serial_port
def set_log_file(self, f):
"""
Set an already opened file as the log file to record serial data to.
"""
if self.log_file is not None:
self.log_file.flush()
self.log_file.close()
self.log_file = f
def select_setup(self):
"Setup level trigger state for select(). Currently a nop"
pass
def select_add(self, fd):
"Add an fd to level trigger state for select(). Currently a nop"
pass
def select_remove(self, fd):
"Remove an fd from level trigger state for select(). Currently a nop"
pass
def select(self):
"""
Uses select() to check if there are incoming connections or writeable
sockets and returns a tuple of (connections_waiting, writeable_sockets)
"""
rlist, wlist, xlist = select.select([self.socket], self.clients.keys(), [], 0)
return (rlist != [], wlist)
def poll_setup(self):
"Setup level trigger state for poll()"
#Create the poll object that tracks which fds and events we care about.
self.poll_obj = select.poll()
#Register our server socket to check for incoming connections
self.poll_obj.register(self.socket, select.POLLIN)
def poll_add(self, fd):
"Add an fd to level trigger state for poll()"
#Register the fd for write availability events
self.poll_obj.register(fd, select.POLLOUT)
def poll_remove(self, fd):
"Remove an fd from level trigger state for poll()"
#Unregister the fd
self.poll_obj.unregister(fd)
def poll(self):
"""
Uses poll() to check if there are incoming connections or writeable
sockets and returns a tuple of (connections_waiting, writeable_sockets)
"""
connections_waiting = False
writeable = []
ready = self.poll_obj.poll(0)
for (fd, event) in ready:
if fd == self.socket.fileno():
connections_waiting = bool(event & select.POLLIN)
elif event & select.POLLOUT:
writeable.append(fd)
return (connections_waiting, writeable)
def accept(self):
"Accept a socket and return the client to handle it, or None on failure"
pair = self.socket.accept()
if pair is None:
return None
sock, addr = pair
print "Accepted client from %s" % (addr,)
client = RemoteSerialClient(sock)
return client
def run(self):
"Start listening for connections and broadcast serial data"
if self.serial_port is None:
raise ValueError("No serial port is connected - nothing to broadcast")
if hasattr(select, "poll"):
get_ready_io = self.poll
add_client = self.poll_add
remove_client = self.poll_remove
self.poll_setup()
else:
get_ready_io = self.select
add_client = self.select_add
remove_client = self.select_remove
self.select_setup()
self.socket.listen(1)
print "Broadcasting data from serial port '%s'" % self.serial_port.name
print "SerialServer listening on %s" % (self.socket.getsockname(),)
self.running = True
while self.running:
try:
#No way to check for this on windows, so just poll always.
data_waiting = not self.shutting_down
#Do level checking to see which sockets to service
connections_waiting, writeable = get_ready_io()
#Push data to clients
for fd in writeable:
client = self.clients[fd]
should_cleanup = False
try:
client.write()
except socket.error:
should_cleanup = True
if self.shutting_down and not client.has_unsent_data():
should_cleanup = True
if should_cleanup:
client.close()
remove_client(fd)
del self.clients[fd]
#Handle incoming connections from new clients
if connections_waiting and not self.shutting_down:
client = self.accept()
if client is not None:
self.clients[client.fileno()] = client
add_client(client)
#Poll for serial data to push to clients
if data_waiting and not self.shutting_down:
try:
data = self.read_serial_data()
except serial.SerialException:
#If the serial port breaks, stop accepting clients,
#flush existing data
print "ohnoes"
import traceback
traceback.print_exc()
data_waiting = False
self.shutting_down = True
self.socket.close()
else:
for client in self.clients.values():
client.push(data)
#Check if we're done flushing data and should finish shutting down
if self.shutting_down and not self.clients:
self.running = False
except KeyboardInterrupt:
#If we're already shutting down, just close down everything
if self.shutting_down:
for fd, client in self.clients.items():
client.close()
remove_client(fd)
del self.clients[fd]
self.running = False
#Otherwise just start a soft shutdown
else:
self.shutting_down = True
print "SerialServer shutdown"
def read_serial_data(self):
"Read in some serial data, if possible"
return self.serial_port.read(READ_CHUNK_SIZE)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Broadcast serial data over TCP")
parser.add_argument('port_name', nargs='+', metavar="port-name",
help="the serial port device name(s) to listen on")
parser.add_argument('--baud-rate', type=int, nargs='?', default=115200,
help="the baud rate at which the serial port communicates")
parser.add_argument('--host-name', nargs='?', default="localhost",
help="the hostname for the server to bind to")
parser.add_argument('--host-port', type=int, nargs='?', default=6543,
help="the port for the server to listen on")
parser.add_argument('--log-file', nargs='?',
help="the file to append all serial data to")
args = parser.parse_args()
server = SerialServer((args.host_name, args.host_port), args.log_file)
server.connect(args.port_name[0], baudrate=args.baud_rate, timeout=0.1)
server.run()
|
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import \
security_groups as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
class SecurityGroupsClient(rest_client.RestClient):
def list_security_groups(self, **params):
"""List all security groups for a user."""
url = 'os-security-groups'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_security_groups, resp, body)
return rest_client.ResponseBody(resp, body)
def show_security_group(self, security_group_id):
"""Get the details of a Security Group."""
url = "os-security-groups/%s" % security_group_id
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.get_security_group, resp, body)
return rest_client.ResponseBody(resp, body)
def create_security_group(self, **kwargs):
"""Create a new security group.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createSecGroup
"""
post_body = json.dumps({'security_group': kwargs})
resp, body = self.post('os-security-groups', post_body)
body = json.loads(body)
self.validate_response(schema.get_security_group, resp, body)
return rest_client.ResponseBody(resp, body)
def update_security_group(self, security_group_id, **kwargs):
"""Update a security group.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#updateSecGroup
"""
post_body = json.dumps({'security_group': kwargs})
resp, body = self.put('os-security-groups/%s' % security_group_id,
post_body)
body = json.loads(body)
self.validate_response(schema.update_security_group, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_security_group(self, security_group_id):
"""Delete the provided Security Group."""
resp, body = self.delete(
'os-security-groups/%s' % security_group_id)
self.validate_response(schema.delete_security_group, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_security_group(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Return the primary type of resource this client works with."""
return 'security_group'
|
"""
Ring Master Daemon for ring orchestration
"""
import os
import sys
import optparse
import subprocess
import cPickle as pickle
from os.path import exists
from time import time, sleep, gmtime
from tempfile import mkstemp
from datetime import datetime
from os import stat, unlink, rename, close, fdopen, chmod
from swift.common import exceptions
from swift.common.ring import RingBuilder
from swift.common.utils import get_logger, readconf, TRUE_VALUES, json, \
lock_parent_directory
from srm.utils import get_md5sum, make_backup, Daemon, is_valid_ring, \
EmailNotify
class RingMasterServer(object):
def __init__(self, rms_conf):
conf = rms_conf['ringmasterd']
self.swiftdir = conf.get('swiftdir', '/etc/swift')
self.builder_files = \
{'account': conf.get('account_builder',
'/etc/swift/account.builder'),
'container': conf.get('container_builder',
'/etc/swift/container.builder'),
'object': conf.get('object_builder',
'/etc/swift/object.builder')}
self.ring_files = \
{'account': conf.get('account_ring',
'/etc/swift/account.ring.gz'),
'container': conf.get('container_ring',
'/etc/swift/container.ring.gz'),
'object': conf.get('object_ring',
'/etc/swift/object.ring.gz')}
self.debug = conf.get('debug_mode', 'n') in TRUE_VALUES
self.pause_file = conf.get('pause_file_path', '/tmp/.srm-pause')
self.default_weight_shift = float(conf.get('default_weight_shift',
'25.0'))
self.backup_dir = conf.get('backup_dir', '/etc/swift/backups')
self.recheck_interval = int(conf.get('interval', '120'))
self.recheck_after_change_interval = int(conf.get('change_interval',
'3600'))
self.mph_enabled = conf.get('min_part_hours_check', 'n') in TRUE_VALUES
self.sec_since_modified = int(conf.get('min_seconds_since_change',
'120'))
self.balance_threshold = float(conf.get('balance_threshold', '2'))
self.dispersion_cmd = conf.get('dispersion_cmd',
'/usr/bin/swift-dispersion-report')
self.dispersion_pct = {'container': float(conf.get('container_min_pct',
'99.75')),
'object': float(conf.get('object_min_pct',
'99.75'))}
self.lock_timeout = int(conf.get('lock_timeout', '90'))
window = conf.get('change_window', '0000,2400')
self.change_window = [int(x) for x in window.split(',')]
if self.debug:
conf['log_level'] = 'DEBUG'
self.logger = get_logger(conf, 'ringmasterd', self.debug)
if not os.access(self.swiftdir, os.W_OK):
self.logger.error('swift_dir is not writable. exiting!')
sys.exit(1)
if conf.get('email_notify', 'n') in TRUE_VALUES:
self.email_notify = EmailNotify(conf, self.logger)
else:
self.email_notify = None
def _emit_notify(self, source, message):
"Send out any configured notifications"
if self.email_notify:
self.email_notify.send_message(source, message)
def pause_if_asked(self):
"""Check if pause file exists and sleep until its removed if it does"""
if exists(self.pause_file):
self.logger.notice('--> Pause file found. Pausing orchestration!')
while exists(self.pause_file):
sleep(1)
self.logger.notice('--> Pause removed. Resuming orchestration!')
def rebalance_ring(self, builder):
"""Rebalance a ring
:param builder: builder to rebalance
:returns: True on successful rebalance, False if it fails.
"""
self.pause_if_asked()
devs_changed = builder.devs_changed
try:
last_balance = builder.get_balance()
parts, balance = builder.rebalance()
except exceptions.RingBuilderError:
self.logger.error("-> Rebalance failed!")
self.logger.exception('RingBuilderError')
return False
if not parts:
self.logger.notice("-> No partitions reassigned!")
self.logger.notice("-> (%d/%.02f)" % (parts, balance))
return False
if not devs_changed and abs(last_balance - balance) < 1:
self.logger.notice("-> Rebalance failed to change more than 1%!")
return False
self.logger.notice('--> Reassigned %d (%.02f%%) partitions. Balance '
'is %.02f.' % (parts, 100.0 * parts / builder.parts,
balance))
return True
def adjust_ring(self, builder):
"""Adjust device weights in a ring
:param builder: builder to adjust
"""
self.pause_if_asked()
for dev in builder.devs:
if not dev:
continue
if 'target_weight' in dev:
if 'weight_shift' in dev:
weight_shift = dev['weight_shift']
else:
weight_shift = self.default_weight_shift
if dev['weight'] == dev['target_weight']:
continue
elif dev['weight'] < dev['target_weight']:
if dev['weight'] + weight_shift \
< dev['target_weight']:
builder.set_dev_weight(
dev['id'], dev['weight'] + weight_shift)
else:
builder.set_dev_weight(dev['id'], dev['target_weight'])
self.logger.debug(
"--> [%s/%s] ++ weight to %s" % (dev['ip'],
dev['device'],
dev['weight']))
elif dev['weight'] > dev['target_weight']:
if dev['weight'] - weight_shift \
> dev['target_weight']:
builder.set_dev_weight(
dev['id'], dev['weight'] - weight_shift)
else:
builder.set_dev_weight(dev['id'], dev['target_weight'])
self.logger.debug(
"--> [%s/%s] -- weight to %s" % (dev['ip'],
dev['device'],
dev['weight']))
def ring_requires_change(self, builder):
"""Check if a ring requires changes
:param builder: builder who's devices to check
:returns: True if ring requires change
"""
self.pause_if_asked()
if builder.devs_changed:
return True
if not self.ring_balance_ok(builder):
return True
for dev in builder.devs:
if not dev:
continue
if 'target_weight' in dev:
if dev['weight'] != dev['target_weight']:
self.logger.debug("--> [%s] weight %s | target %s"
% (
dev['ip'] + '/' +
dev['device'], dev['weight'],
dev['target_weight']))
return True
return False
def in_change_window(self):
"""Check if we are within the allowed time window for a change"""
start = self.change_window[0]
end = self.change_window[1]
now = gmtime().tm_hour + gmtime().tm_min
if start <= end:
return start <= now <= end
else:
return start <= now or now <= end
def dispersion_ok(self, swift_type):
"""Run a dispersion report and check whether its 'ok'
:param swift_type: either 'container' or 'object'
:returns: True if the dispersion report is 'ok'
"""
self.pause_if_asked()
if swift_type == 'account':
return True
self.logger.debug("--> Running %s dispersion report" % swift_type)
dsp_cmd = [self.dispersion_cmd, '-j', '--%s-only' % swift_type]
try:
result = json.loads(subprocess.Popen(dsp_cmd,
stdout=subprocess.PIPE).communicate()[0])
except Exception:
self.logger.exception('Error running dispersion report')
return False
if not result[swift_type]:
self.logger.notice("--> Dispersion report run returned nothing!")
return False
self.logger.debug("--> Dispersion info: %s" % result)
#the dsp report json output has changed a bit so we have to check for all
if not result[swift_type].get('missing_2', 0) == 0 and \
result[swift_type].get('missing_3', 0) == 0 and \
result[swift_type].get('missing_all', 0) == 0:
return False
if result[swift_type]['pct_found'] > self.dispersion_pct[swift_type]:
return True
else:
return False
def min_part_hours_ok(self, builder):
"""Check if min part hours has elapsed
:param builder: builder to check
:returns: True if min part hours have elapsed
"""
self.pause_if_asked()
elapsed_hours = int(time() - builder._last_part_moves_epoch) / 3600
self.logger.debug('--> partitions last moved %d hours ago [%s]'
% (elapsed_hours, datetime.utcfromtimestamp(
builder._last_part_moves_epoch)))
if elapsed_hours > builder.min_part_hours:
return True
else:
return False
def min_modify_time(self, btype):
"""Check if minimum modify time has passed
:param btype: builder to check one of account|container|object
:returns: True if min modify time has elapsed
"""
self.pause_if_asked()
since_modified = time() - stat(self.builder_files[btype]).st_mtime
self.logger.debug(
'--> Ring last modified %d seconds ago.' % since_modified)
if since_modified > self.sec_since_modified:
return True
else:
return False
def ring_balance_ok(self, builder):
"""Check if ring balance is ok
:param builder: builder to check
:returns: True ring balance is ok
"""
self.pause_if_asked()
self.logger.debug(
'--> Current balance: %.02f' % builder.get_balance())
return builder.get_balance() <= self.balance_threshold
def write_builder(self, btype, builder):
"""Write out new builder file
:param btype: The builder type
:param builder: The builder to dump
:returns: new ring file md5
"""
self.pause_if_asked()
builder_file = self.builder_files[btype]
try:
fd, tmppath = mkstemp(dir=self.swiftdir, suffix='.tmp.builder')
pickle.dump(builder.to_dict(), fdopen(fd, 'wb'), protocol=2)
backup, backup_md5 = make_backup(builder_file, self.backup_dir)
self.logger.notice('--> Backed up %s to %s (%s)' %
(builder_file, backup, backup_md5))
chmod(tmppath, 0644)
rename(tmppath, builder_file)
except Exception as err:
raise Exception('Error writing builder: %s' % err)
finally:
if fd:
try:
close(fd)
except OSError:
pass
if tmppath:
try:
unlink(tmppath)
except OSError:
pass
return get_md5sum(builder_file)
def write_ring(self, btype, builder):
"""Write out new ring files
:param btype: The builder type
:param builder: The builder to dump
:returns: new ring file md5
"""
try:
self.pause_if_asked()
ring_file = self.ring_files[btype]
fd, tmppath = mkstemp(dir=self.swiftdir, suffix='.tmp.ring.gz')
builder.get_ring().save(tmppath)
close(fd)
if not is_valid_ring(tmppath):
unlink(tmppath)
raise Exception('Ring Validate Failed')
backup, backup_md5 = make_backup(ring_file, self.backup_dir)
self.logger.notice('--> Backed up %s to %s (%s)' %
(ring_file, backup, backup_md5))
chmod(tmppath, 0644)
rename(tmppath, ring_file)
except Exception as err:
raise Exception('Error writing builder: %s' % err)
finally:
if fd:
try:
close(fd)
except OSError:
pass
if tmppath:
try:
unlink(tmppath)
except OSError:
pass
return get_md5sum(ring_file)
def orchestration_pass(self, btype):
"""Check the rings, make any needed adjustments, and deploy the ring
:param btype: The builder type to work on.
:return: True if the builder was modified , False if it was not
"""
self.pause_if_asked()
self.logger.debug("=" * 79)
self.logger.notice("Checking on %s ring..." % btype)
self.logger.debug("=" * 79)
builder = RingBuilder.load(self.builder_files[btype])
if self.ring_requires_change(builder):
self.logger.notice("[%s] -> ring requires weight change." % btype)
if self.mph_enabled:
if not self.min_part_hours_ok(builder):
self.logger.notice(
"[%s] -> Ring min_part_hours: not ready!" % btype)
return False
else:
self.logger.notice(
"[%s] -> Ring min_part_hours: ok" % btype)
if not self.min_modify_time(btype):
self.logger.notice(
"[%s] -> Ring last modify time: not ready!" % btype)
return False
else:
self.logger.notice("[%s] -> Ring last modify time: ok" % btype)
if not self.dispersion_ok(btype):
self.logger.notice(
"[%s] -> Dispersion report: not ready!" % btype)
return False
else:
self.logger.notice("[%s] -> Dispersion report: ok" % btype)
if self.ring_balance_ok(builder):
self.logger.notice("[%s] -> Current Ring balance: ok" % btype)
self.logger.notice("[%s] -> Adjusting ring..." % btype)
self.adjust_ring(builder)
self.logger.notice("[%s] -> Rebalancing ring..." % btype)
rebalanced = self.rebalance_ring(builder)
if not rebalanced:
self.logger.notice("[%s] -> Rebalance: not ready!" % btype)
return True # we should sleep a bit longer
else:
self.logger.notice("[%s] -> Rebalance: ok" % btype)
else:
self.logger.notice(
"[%s] -> Current Ring balance: not ready!" % btype)
self.logger.notice('[%s] -> Rebalancing ring with no '
'modifications...' % btype)
rebalanced = self.rebalance_ring(builder)
if not rebalanced:
self.logger.notice(
"[%s] -> Rebalance: not ready!" % btype)
return True # we should sleep a bit longer
else:
self.logger.notice("[%s] -> Rebalance: ok" % btype)
self.logger.notice("[%s] -> Writing builder..." % btype)
try:
builder_md5 = self.write_builder(btype, builder)
self.logger.notice('[%s] --> Wrote new builder with md5: '
'%s' % (btype, builder_md5))
self.logger.notice("[%s] -> Writing ring..." % btype)
ring_md5 = self.write_ring(btype, builder)
self.logger.notice("[%s] --> Wrote new ring with md5: %s" %
(btype, ring_md5))
self._emit_notify('%s ring change' % btype,
'Wrote new ring with md5: %s' % ring_md5)
return True
except Exception:
self.logger.exception('Error dumping builder or ring')
else:
self.logger.notice("[%s] -> No ring change required" % btype)
return False
def start(self):
"""Start up the ring master"""
self.logger.notice("Ring-Master starting up")
self.logger.notice("-> Entering ring orchestration loop.")
while True:
try:
self.pause_if_asked()
if self.in_change_window():
for btype in sorted(self.builder_files.keys()):
with lock_parent_directory(self.builder_files[btype],
self.lock_timeout):
ring_changed = self.orchestration_pass(btype)
if ring_changed:
sleep(self.recheck_after_change_interval)
else:
sleep(self.recheck_interval)
else:
self.logger.debug('Not in change window')
sleep(60)
except exceptions.LockTimeout:
self.logger.exception('Orchestration LockTimeout Encountered')
except Exception:
self.logger.exception('Orchestration Error')
sleep(60)
sleep(1)
class RingMasterd(Daemon):
def run(self, conf):
"""
Startup Ring Management Daemon
"""
rms = RingMasterServer(conf)
rms.start()
def run_server():
usage = '''
%prog start|stop|restart|pause|unpause [--conf=/path/to/some.conf] [-f]
'''
args = optparse.OptionParser(usage)
args.add_option('--foreground', '-f', action="store_true",
help="Run in foreground, in debug mode")
args.add_option('--conf', default="/etc/swift/ring-master.conf",
help="path to config. default /etc/swift/ring-master.conf")
args.add_option('--pid', default="/var/run/swift-ring-master.pid",
help="default: /var/run/swift-ring-master.pid")
options, arguments = args.parse_args()
if len(sys.argv) <= 1:
args.print_help()
if options.foreground:
conf = readconf(options.conf)
tap = RingMasterServer(conf)
tap.start()
sys.exit(0)
if len(sys.argv) >= 2:
conf = readconf(options.conf)
user = conf['ringmasterd'].get('user', 'swift')
pfile = conf['ringmasterd'].get('pause_file_path', '/tmp/.srm-pause')
daemon = RingMasterd(options.pid, user=user)
if 'start' == sys.argv[1]:
daemon.start(conf)
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart(conf)
elif 'pause' == sys.argv[1]:
print "Writing pause file"
with open(pfile, 'w') as f:
f.write("")
elif 'unpause':
print "Removing pause file"
unlink(pfile)
else:
args.print_help()
sys.exit(2)
sys.exit(0)
else:
args.print_help()
sys.exit(2)
if __name__ == '__main__':
run_server()
|
from java.lang import System
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.report import GeneralReportModuleAdapter
class SampleGeneralReportModule(GeneralReportModuleAdapter):
# TODO: Rename this. Will be shown to users when making a report
def getName(self):
return "Sample Jython Report Module"
# TODO: rewrite this
def getDescription(self):
return "A sample Jython report module"
# TODO: Update this to reflect where the report file will be written to
def getRelativeFilePath(self):
return "sampleReport.txt"
# TODO: Update this method to make a report
def generateReport(self, baseReportDir, progressBar):
# For an example, we write a file with the number of files created in the past 2 weeks
# Configure progress bar for 2 tasks
progressBar.setIndeterminate(False)
progressBar.start()
progressBar.setMaximumProgress(2)
# Get files by created in last two weeks.
fileCount = 0
autopsyCase = Case.getCurrentCase()
sleuthkitCase = autopsyCase.getSleuthkitCase()
currentTime = System.currentTimeMillis() / 1000
minTime = currentTime - (14 * 24 * 60 * 60)
otherFiles = sleuthkitCase.findFilesWhere("crtime > %d" % minTime)
for otherFile in otherFiles:
fileCount += 1
progressBar.increment()
# Write the result to the report file.
report = open(baseReportDir + '\\' + self.getRelativeFilePath(), 'w')
report.write("file count = %d" % fileCount)
Case.getCurrentCase().addReport(report.name, "SampleGeneralReportModule", "Sample Python Report");
report.close()
progressBar.increment()
progressBar.complete()
|
class Handler(object):
def __init__(self, configuration):
self.configuration = configuration
def cleanup(self):
pass
def clients(self):
return {}
|
"""Data generation script for Maze environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from .env import environment
import h5py
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string('savepath', '/tmp/test.hdf5',
'Path to save the HDF5 dataset')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
forward_ep = 10
steps_per_ep = 100
datapath = FLAGS.savepath
f = h5py.File(datapath, 'w')
sim_data = f.create_group('sim')
sim_data.create_dataset('ims', (forward_ep, steps_per_ep, 64, 64, 3),
dtype='f')
sim_data.create_dataset('actions', (forward_ep, steps_per_ep, 2), dtype='f')
env = environment.Environment()
for ep in range(forward_ep):
time_step = env.reset()
_, im = env.get_observation()
step = 0
while not time_step.last():
action = np.random.uniform(-3, 3, size=(2,))
f['sim']['ims'][ep, step] = im
f['sim']['actions'][ep, step] = action
step += 1
time_step = env.step(action)
_, im = env.get_observation()
print(ep)
if __name__ == '__main__':
app.run(main)
|
"""This module contains the general information for ChassisPowerBudget ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ChassisPowerBudgetConsts:
ADMIN_ACTION_RESET_POWER_PROFILE_DEFAULT = "reset-power-profile-default"
ADMIN_ACTION_START_POWER_CHAR = "start-power-char"
class ChassisPowerBudget(ManagedObject):
"""This is ChassisPowerBudget class."""
consts = ChassisPowerBudgetConsts()
naming_props = set([])
mo_meta = {
"modular": MoMeta("ChassisPowerBudget", "chassisPowerBudget", "budget", VersionMeta.Version2013e, "InputOutput", 0xff, [], ["admin", "read-only", "user"], [u'equipmentChassis'], [u'autoPowerProfile'], ["Get"])
}
prop_meta = {
"modular": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["reset-power-profile-default", "start-power-char"], []),
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"auto_min_budget": MoPropertyMeta("auto_min_budget", "autoMinBudget", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"budget": MoPropertyMeta("budget", "budget", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 510, None, [], ["0-4294967295"]),
"cap_budget": MoPropertyMeta("cap_budget", "capBudget", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"max_power": MoPropertyMeta("max_power", "maxPower", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"min_power": MoPropertyMeta("min_power", "minPower", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"pow_char_enable": MoPropertyMeta("pow_char_enable", "powCharEnable", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"power_char_status": MoPropertyMeta("power_char_status", "powerCharStatus", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
}
prop_map = {
"modular": {
"adminAction": "admin_action",
"adminState": "admin_state",
"autoMinBudget": "auto_min_budget",
"budget": "budget",
"capBudget": "cap_budget",
"childAction": "child_action",
"dn": "dn",
"maxPower": "max_power",
"minPower": "min_power",
"powCharEnable": "pow_char_enable",
"powerCharStatus": "power_char_status",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.admin_action = None
self.admin_state = None
self.auto_min_budget = None
self.budget = None
self.cap_budget = None
self.child_action = None
self.max_power = None
self.min_power = None
self.pow_char_enable = None
self.power_char_status = None
self.status = None
ManagedObject.__init__(self, "ChassisPowerBudget", parent_mo_or_dn, **kwargs)
|
"""
Fitmarket
Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama.
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import fitmarket_api
from fitmarket_api.rest import ApiException
from fitmarket_api.models.status import Status
class TestStatus(unittest.TestCase):
""" Status unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testStatus(self):
"""
Test Status
"""
model = fitmarket_api.models.status.Status()
if __name__ == '__main__':
unittest.main()
|
from neutron_lib.api.definitions import servicetype
from neutron_lib.tests.unit.api.definitions import base
class ServiceTypeDefinitionTestCase(base.DefinitionBaseTestCase):
extension_module = servicetype
extension_resources = (servicetype.COLLECTION_NAME,)
extension_attributes = ('default', servicetype.SERVICE_ATTR,)
|
def test_create_supply_agreement(app, json_agreements):
agreement = json_agreements
app.session.ensure_login(email=app.target["userEmail"], password=app.target["userPassword"])
app.agreement.create(agreement)
|
from __future__ import unicode_literals
import collections
from datetime import datetime
from django.conf import settings
from django.db import models
import reversion
from base.model_utils import TimeStampedModel
def default_moderate_state():
return ModerateState.pending()
class CmsError(Exception):
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr('%s, %s' % (self.__class__.__name__, self.value))
class ModerateState(models.Model):
"""Accept, remove or pending."""
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
class Meta:
ordering = ['name']
verbose_name = 'Moderate'
verbose_name_plural = 'Moderated'
def __str__(self):
return '{}'.format(self.name)
@staticmethod
def pending():
return ModerateState.objects.get(slug='pending')
@staticmethod
def published():
return ModerateState.objects.get(slug='published')
@staticmethod
def removed():
return ModerateState.objects.get(slug='removed')
reversion.register(ModerateState)
class PageManager(models.Manager):
def menu(self):
"""Return page objects for a menu."""
return self.model.objects.all().order_by('order')
class Page(TimeStampedModel):
"""Which page on the web site.
An order of zero (0) indicates that the page should be excluded from a
menu.
"""
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True)
order = models.IntegerField(default=0)
is_home = models.BooleanField(default=False)
objects = PageManager()
class Meta:
ordering = ['name']
verbose_name = 'Page'
verbose_name_plural = 'Pages'
def __str__(self):
return '{}'.format(self.name)
reversion.register(Page)
class Layout(TimeStampedModel):
"""Layout area e.g. content, header, footer."""
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True)
class Meta:
ordering = ('name',)
verbose_name = 'Layout'
verbose_name_plural = 'Layout'
def __str__(self):
return '{}'.format(self.name)
reversion.register(Layout)
class Section(TimeStampedModel):
"""Section of a web page e.g. content, header, footer."""
page = models.ForeignKey(Page)
layout = models.ForeignKey(Layout)
class Meta:
ordering = ['page', 'modified']
unique_together = ('page', 'layout')
verbose_name = 'Section'
verbose_name_plural = 'Sections'
def __str__(self):
return '{}'.format(self.page.name)
def next_order(self):
qs = self.container_set.all().order_by(
'-order'
)[:1]
if qs:
return qs[0].order + 1
else:
return 1
reversion.register(Section)
class Container(TimeStampedModel):
"""Manage one piece of content which can be in various states.
e.g. pending, published and removed.
"""
section = models.ForeignKey(Section)
# TODO I am not sure we need 'order' on this model at all. The 'order' of
# an object should probably be the responsibility of the content object.
# In fact, in some cases the ordering might be by something else e.g. date.
order = models.IntegerField()
class Meta:
verbose_name = 'Container'
verbose_name_plural = 'Containers'
def __str__(self):
return '{}'.format(self.section.page.name)
reversion.register(Container)
class ContentManager(models.Manager):
def pending(self, section, kwargs=None):
"""Return a list of pending content for a section.
Note: we return a list of content instances not a queryset.
"""
pending = ModerateState.pending()
published = ModerateState.published()
qs = self.model.objects.filter(
container__section=section,
moderate_state__in=[published, pending],
)
order_by = None
if kwargs:
order_by = kwargs.pop('order_by', None)
qs = qs.filter(**kwargs)
if order_by:
qs = qs.order_by(order_by)
else:
qs = qs.order_by('container__order')
result = collections.OrderedDict()
for c in qs:
if c.container.pk in result:
if c.moderate_state == pending:
result[c.container.pk] = c
else:
result[c.container.pk] = c
return list(result.values())
def published(self, section):
"""Return a published content for a page."""
published = ModerateState.published()
return self.model.objects.filter(
container__section=section,
moderate_state=published,
).order_by(
'container__order',
)
class ContentModel(TimeStampedModel):
container = models.ForeignKey(Container)
moderate_state = models.ForeignKey(
ModerateState,
default=default_moderate_state
)
date_moderated = models.DateTimeField(blank=True, null=True)
user_moderated = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, related_name='+'
)
objects = ContentManager()
class Meta:
abstract = True
ordering = [
'container__section__page__name',
'order',
'moderate_state__slug',
]
verbose_name = 'Content'
verbose_name_plural = 'Content'
def __str__(self):
return '{}: {}, order {}'.format(
self.pk, self.moderate_state, self.order
)
def _delete_removed_content(self):
"""delete content which was previously removed."""
try:
c = self._get_content_set().get(
moderate_state=ModerateState.removed()
)
c.delete()
except self.DoesNotExist:
pass
def _get_content_set(self):
raise CmsError(
"Concrete class must implement the '_get_content_set' method"
)
def _is_pending(self):
return self.moderate_state == ModerateState.pending()
is_pending = property(_is_pending)
def _is_published(self):
return self.moderate_state == ModerateState.published()
is_published = property(_is_published)
def _is_removed(self):
return self.moderate_state == ModerateState.removed()
is_removed = property(_is_removed)
def _set_moderated(self, user, moderate_state):
self.date_moderated = datetime.now()
self.user_moderated = user
self.moderate_state = moderate_state
def _set_published_to_remove(self, user):
"""publishing new content, so remove currently published content."""
try:
c = self._get_content_set().get(
moderate_state=ModerateState.published()
)
c.set_removed(user)
c.save()
except self.DoesNotExist:
pass
def set_pending(self, user):
if self.moderate_state == ModerateState.published():
try:
self._get_content_set().get(
moderate_state=ModerateState.pending()
)
raise CmsError(
"Section already has pending content so "
"published content should not be edited."
)
except self.DoesNotExist:
self._set_moderated(user, ModerateState.pending())
self.pk = None
elif self.moderate_state == ModerateState.pending():
return
else:
raise CmsError(
"Cannot edit content which has been removed"
)
def set_published(self, user):
"""Publish content."""
if not self.moderate_state == ModerateState.pending():
raise CmsError(
"Cannot publish content unless it is 'pending'"
)
self._delete_removed_content()
self._set_published_to_remove(user)
self._set_moderated(user, ModerateState.published())
def set_removed(self, user):
"""Remove content."""
if self.moderate_state == ModerateState.removed():
raise CmsError(
"Cannot remove content which has already been removed"
)
self._delete_removed_content()
self._set_moderated(user, ModerateState.removed())
def url_publish(self):
raise CmsError("class must implement 'url_publish' method")
def url_remove(self):
raise CmsError("class must implement 'url_remove' method")
def url_update(self):
raise CmsError("class must implement 'url_update' method")
|
import logging
import os
from jobslave.generators import bootable_image, constants
from jobslave.util import logCall
from conary.lib import util
from jobslave import buildtypes
log = logging.getLogger(__name__)
class Tarball(bootable_image.BootableImage):
fileType = buildtypes.typeNames[buildtypes.TARBALL]
def write(self):
self.swapSize = self.getBuildData("swapSize") * 1048576
basePath = os.path.join(self.workDir, self.basefilename)
util.mkdirChain(basePath)
outputDir = os.path.join(constants.finishedDir, self.UUID)
util.mkdirChain(outputDir)
tarball = os.path.join(outputDir, self.basefilename + '.tar.gz')
self.installFileTree(basePath, no_mbr=True)
sizes = os.statvfs(basePath)
installedSize = (sizes.f_blocks - sizes.f_bavail) * sizes.f_frsize
log.info("Installed size: %.1f MB", installedSize / 1e6)
self.status('Creating tarball')
logCall('tar -C %s -cpPsS --to-stdout ./ | gzip > %s' % \
(basePath, tarball))
self.postOutput(((tarball, 'Tar File'),),
attributes={'installed_size': installedSize})
|
"""Code to test the db module."""
import logging
import unittest
import core.config
import core.db as db
class TestDBprivate(unittest.TestCase):
"""Test the private methods of the DB class."""
def setUp(self):
"""Set up common test harness for this class."""
self.config = core.config.Config()
self.config['dbfile'] = ':memory:'
self.db = db.DB(initialize=False)
def tearDown(self):
"""Tear down the common test harness for this class."""
self.config.reset()
def test_00_init(self):
"""DB - initialized and connection established."""
self.assertTrue(self.db._db is not None)
# Do some SQL and make sure we don't get an error
self.db._db.execute('CREATE TABLE TestTable(name TEXT);')
self.assertTrue(True)
def test_01__query(self):
"""DB - _query returns all the results."""
# Setup some test data
test_values = ['hi rory', 'bye amy']
self.db._db.execute('CREATE TABLE TestTable(name TEXT);')
for i in test_values:
self.db._db.execute('INSERT INTO TestTable VALUES(?);', (i,))
# Call _query
results = self.db._query('SELECT * FROM TestTable;')
# Check the values in the returned cursor
for (row,value) in zip(results,test_values):
self.assertEqual(row[0], value)
# Call _query
results = self.db._query('SELECT * FROM TestTable WHERE name=:n;',
{'n': test_values[0]})
# Check the values in the returned cursor
for (row,value) in zip(results,test_values[0:1]):
self.assertEqual(row[0], value)
def test_02__change(self):
"""DB - _change alters the database when SQL is good."""
test_values = ['hi rory', 'bye amy']
sql_list = ['CREATE TABLE TestTable(name TEXT);']
sql_params_list = [{}]
for i in test_values:
sql_list.append('INSERT INTO TestTable VALUES(?);')
sql_params_list.append((i,))
# Call _change
result = self.db._change(sql_list, sql_params_list)
self.assertEqual(result, 2)
# Try adding more without using parameter substitution
result = self.db._change(['INSERT INTO TestTable VALUES("who?");'])
self.assertEqual(result, 3)
def test_03__change_bad(self):
"""DB - _change rollsback when 1st statement is bad."""
# Setup some test data
test_values = ['hi rory', 'bye amy']
sql_list = []
sql_params_list = []
for i in test_values:
sql_list.append('INSERT INTO BadTable VALUES(?);')
sql_params_list.append((i,))
# Call _change
result = self.db._change(sql_list, sql_params_list)
# Check return value
self.assertTrue(result is None)
# Check database status
result = self.db._query('SELECT name FROM sqlite_master WHERE ' +
'type="table" AND name="table_name";')
self.assertEqual(len(result.fetchall()), 0)
def test_04__change_rollback(self):
"""DB - _change rollsback when 2nd statement is bad."""
# Setup some test data
test_values = ['hi rory', 'bye amy']
sql_list = ['CREATE TABLE TestTable(name TEXT);']
for i in test_values:
sql_list.append('INSERT INTO BadTable VALUES("{}");'.format(i))
# Call _change
result = self.db._change(sql_list)
# Check return value
self.assertTrue(result is None)
# Check database status
result = self.db._query('SELECT name FROM sqlite_master WHERE ' +
'type="table" AND name="table_name";')
self.assertEqual(len(result.fetchall()), 0)
def test_10__initialize(self):
"""DB - _initialize creates the correct database schema."""
# Initialize the database
self.db._initialize()
# Check the list of created tables.
tables = ['Tags', 'Tasks']
result = self.db._query('SELECT name FROM sqlite_master ' +
'WHERE type="table" AND name NOT LIKE "sqlite%";')
result_tables = []
for r in result:
result_tables.append(r[0])
result_tables.sort()
self.assertEqual(result_tables, tables)
# Check the schema for each table.
schema = {
'Tags':
"""CREATE TABLE Tags(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
name TEXT UNIQUE NOT NULL
)""",
'Tasks':
"""CREATE TABLE Tasks(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
name TEXT NOT NULL,
created DATETIME DEFAULT CURRENT_TIMESTAMP
)""",
}
for table in tables:
result = self.db._query('SELECT sql FROM sqlite_master ' +
'WHERE type="table" AND name=?;', (table,))
sql = result.fetchone()[0]
self.assertEqual(sql, schema[table])
class TestDBpublic(unittest.TestCase):
"""Test the public methods of the DB class."""
def setUp(self):
"""Set up common test harness for this class."""
self.config = core.config.Config()
self.config['dbfile'] = ':memory:'
self.db = db.DB()
def tearDown(self):
"""Tear down the common test harness for this class."""
self.config.reset()
def test_add_tag(self):
"""DB - add_tag adds a tag."""
result = self.db._query('SELECT id FROM Tags;')
self.assertEqual(len(result.fetchall()), 0)
new_id = self.db.add_tag("new tag")
self.assertEqual(new_id, 1)
result = self.db._query('SELECT id FROM Tags;')
results = result.fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0][0], new_id)
def test_add_tag_already_exists(self):
"""DB - add_tag returns id of tag if already exists."""
result = self.db._query('SELECT id FROM Tags;')
self.assertEqual(len(result.fetchall()), 0)
new_id = self.db.add_tag("new tag")
new_id_second = self.db.add_tag("new tag")
self.assertEqual(new_id, new_id_second)
def test_edit_tag(self):
"""DB - edit_tag changes the name of a tag."""
new_tag_text = "edited tag"
the_id = self.db.add_tag("new tag")
self.db.edit_tag(the_id, new_tag_text)
result = self.db._query('SELECT id FROM Tags WHERE name=?;',
(new_tag_text,))
results = result.fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0][0], the_id)
|
"""Constants for export/import.
See: https://goo.gl/OIDCqz for these constants and directory structure.
"""
VERSION_FORMAT_SPECIFIER = "%08d"
ASSETS_DIRECTORY = "assets"
EXPORT_BASE_NAME = "export"
EXPORT_SUFFIX_NAME = "meta"
META_GRAPH_DEF_FILENAME = EXPORT_BASE_NAME + "." + EXPORT_SUFFIX_NAME
VARIABLES_FILENAME = EXPORT_BASE_NAME
VARIABLES_FILENAME_PATTERN = VARIABLES_FILENAME + "-?????-of-?????"
INIT_OP_KEY = "serving_init_op"
SIGNATURES_KEY = "serving_signatures"
ASSETS_KEY = "serving_assets"
GRAPH_KEY = "serving_graph"
|
import settings
from flask import Flask
from atompark import SmsManager
from flask.ext.script import Manager
app = Flask(__name__)
manager = Manager(app)
@manager.option('-n', '--name', dest='name', default=None)
@manager.option('-d', '--description', dest='description', default="")
def add_address_book(name, description):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.add_address_book(name, description)
@manager.option('-i', '--id', dest='id_address_book', default=None)
def del_address_book(id_address_book):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.del_address_book(id)
@manager.option('-i', '--id', dest='id_address_book')
@manager.option('-n', '--name', dest='name')
@manager.option('-d', '--description', dest='description', default=None)
def edit_address_book(id_address_book, name, description):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.edit_address_book(id_address_book, name, description)
@manager.option('-i', '--id', dest='id_address_book', default=None)
@manager.option('-o', '--offset', dest='offset', default=0)
def get_address_book(id_address_book=None, offset=0):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.get_address_book(id_address_book, offset)
@manager.option('-s', '--search_fields', dest='search_fields')
@manager.option('-o', '--offset', dest='offset', default=0)
def search_address_book(search_fields, offset=0):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.search_address_book(search_fields, offset)
@manager.option('-i', '--id', dest='id_address_book')
@manager.option('-p', '--phone', dest='phone')
@manager.option('-v', '--variables', dest='variables', default=None)
def add_phone_to_address_book(id_address_book, phone, variables):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.add_phone_to_address_book(id_address_book, phone, variables)
@manager.option('-i', '--id', dest='id_address_book')
@manager.option('-d', '--data', dest='data')
def add_phones_to_address_book(id_address_book, data):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.add_phones_to_address_book(id_address_book, data)
@manager.option('-a', '--id-address-book', dest='id_address_book', default=None)
@manager.option('-i', '--id-phone', dest='id_phone', default=None)
@manager.option('-p', '--phone', dest='phone', default=None)
@manager.option('-o', '--offset', dest='offset', default=0)
def get_phone_from_address_book(id_address_book, id_phone, phone, offset):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.get_phone_from_address_book(id_address_book, id_phone, phone, offset)
@manager.option('-s', '--sender', dest='sender')
@manager.option('-t', '--text', dest='text')
@manager.option('-p', '--phone', dest='phone')
@manager.option('-d', '--datetime', dest='datetime', default=None)
@manager.option('-l', '--lifetime', dest='sms_lifetime', default=0)
def send_sms(sender, text, phone, datetime, sms_lifetime):
smsmanager = SmsManager(settings.SMS_PUB_KEY, settings.SMS_SECRET_KEY, settings.SMS_API_VERSION)
print smsmanager.send_sms(sender, text, phone, datetime, sms_lifetime)
if __name__ == "__main__":
manager.run()
|
from datetime import datetime
from sqlalchemy import (
Column,
Index,
Integer,
Text,
String,
DateTime
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
Base.query = DBSession.query_property()
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
identifier = Column(String, unique=True)
github_id = Column(String, unique=True)
email = Column(Text, unique=True, nullable=True)
signup_date = Column(DateTime, nullable=False, default=datetime.utcnow)
auth_token = Column(String, unique=True)
@classmethod
def social(cls, *args, **kwargs):
pass
def update_social(self, token, social_id):
self.github_id = social_id
self.auth_token = token
return self
|
from dsl_parser import functions
from mock import MagicMock
from dsl_parser import exceptions
from dsl_parser.tasks import prepare_deployment_plan
from dsl_parser.tests.abstract_test_parser import AbstractTestParser
class TestGetSecret(AbstractTestParser):
secrets_yaml = """
data_types:
agent_config_type:
properties:
user:
type: string
required: false
key:
type: string
required: false
relationships:
cloudify.relationships.contained_in: {}
plugins:
p:
executor: central_deployment_agent
install: false
node_types:
webserver_type:
properties:
ip:
default: ''
agent_config:
type: agent_config_type
node_templates:
node:
type: webserver_type
webserver:
type: webserver_type
properties:
ip: { get_secret: ip }
agent_config:
key: { get_secret: agent_key }
user: { get_secret: user }
interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
op_with_get_secret:
implementation: p.p
inputs:
a: { get_secret: node_template_secret_id }
relationships:
- type: cloudify.relationships.contained_in
target: node
source_interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
op_with_get_secret:
implementation: p.p
inputs:
a: { get_secret: source_op_secret_id }
target_interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
op_with_get_secret:
implementation: p.p
inputs:
a: { get_secret: target_op_secret_id }
outputs:
webserver_url:
description: Web server url
value: { concat: ['http://', { get_secret: ip }, ':',
{ get_secret: webserver_port }] }
"""
def test_has_intrinsic_functions_property(self):
yaml = """
relationships:
cloudify.relationships.contained_in: {}
plugins:
p:
executor: central_deployment_agent
install: false
node_types:
webserver_type: {}
node_templates:
node:
type: webserver_type
webserver:
type: webserver_type
interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
op_with_get_secret:
implementation: p.p
inputs:
a: { get_secret: node_template_secret_id }
relationships:
- type: cloudify.relationships.contained_in
target: node
source_interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
op_with_get_secret:
implementation: p.p
inputs:
a: { get_secret: source_op_secret_id }
target_interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
op_with_get_secret:
implementation: p.p
inputs:
a: { get_secret: target_op_secret_id }
"""
parsed = prepare_deployment_plan(self.parse(yaml),
self._get_secret_mock)
webserver_node = None
for node in parsed.node_templates:
if node['id'] == 'webserver':
webserver_node = node
break
self.assertIsNotNone(webserver_node)
def assertion(operations):
op = operations['test.op_with_no_get_secret']
self.assertIs(False, op.get('has_intrinsic_functions'))
op = operations['test.op_with_get_secret']
self.assertIs(True, op.get('has_intrinsic_functions'))
assertion(webserver_node['operations'])
assertion(webserver_node['relationships'][0]['source_operations'])
assertion(webserver_node['relationships'][0]['target_operations'])
def test_validate_secrets_all_valid(self):
get_secret_mock = MagicMock(return_value='secret_value')
parsed = prepare_deployment_plan(self.parse_1_3(self.secrets_yaml),
get_secret_mock)
self.assertTrue(get_secret_mock.called)
self.assertFalse(hasattr(parsed, 'secrets'))
def test_validate_secrets_all_invalid(self):
expected_message = "Required secrets \['target_op_secret_id', " \
"'node_template_secret_id', 'ip', 'agent_key', " \
"'user', 'webserver_port', " \
"'source_op_secret_id'\] don't exist in this tenant"
get_secret_not_found = MagicMock(side_effect=TestNotFoundException)
self.assertRaisesRegexp(exceptions.UnknownSecretError,
expected_message,
prepare_deployment_plan,
self.parse_1_3(self.secrets_yaml),
get_secret_not_found)
def test_validate_secrets_unexpected_exception(self):
get_secret_exception = MagicMock(side_effect=TypeError)
self.assertRaisesRegexp(TypeError,
'',
prepare_deployment_plan,
self.parse_1_3(self.secrets_yaml),
get_secret_exception)
def test_validate_secrets_some_invalid(self):
expected_message = "Required secrets \['ip', 'source_op_secret_id'\]" \
" don't exist in this tenant"
get_secret_not_found = MagicMock()
get_secret_not_found.side_effect = [None, None, TestNotFoundException,
None, None, None,
TestNotFoundException]
self.assertRaisesRegexp(exceptions.UnknownSecretError,
expected_message,
prepare_deployment_plan,
self.parse_1_3(self.secrets_yaml),
get_secret_not_found)
def test_validate_secrets_without_secrets(self):
no_secrets_yaml = """
relationships:
cloudify.relationships.contained_in: {}
plugins:
p:
executor: central_deployment_agent
install: false
node_types:
webserver_type: {}
node_templates:
node:
type: webserver_type
webserver:
type: webserver_type
interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
relationships:
- type: cloudify.relationships.contained_in
target: node
source_interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
target_interfaces:
test:
op_with_no_get_secret:
implementation: p.p
inputs:
a: 1
"""
get_secret_mock = MagicMock(return_value='secret_value')
parsed = prepare_deployment_plan(self.parse_1_3(no_secrets_yaml),
get_secret_mock)
self.assertFalse(get_secret_mock.called)
self.assertFalse(hasattr(parsed, 'secrets'))
class TestNotFoundException(Exception):
http_code = 404
class TestEvaluateFunctions(AbstractTestParser):
def test_evaluate_functions(self):
payload = {
'a': {'get_secret': 'id_a'},
'b': {'get_secret': 'id_b'},
'c': {'get_secret': 'id_c'},
'd': {'get_secret': 'id_d'},
'f': {'concat': [
{'get_secret': 'id_a'},
{'get_secret': 'id_b'},
{'get_secret': 'id_c'},
{'get_secret': 'id_d'}
]}
}
functions.evaluate_functions(payload,
{},
None,
None,
None,
self._get_secret_mock)
self.assertEqual(payload['a'], 'id_a_value')
self.assertEqual(payload['b'], 'id_b_value')
self.assertEqual(payload['c'], 'id_c_value')
self.assertEqual(payload['d'], 'id_d_value')
self.assertEqual(payload['f'], 'id_a_valueid_b_value'
'id_c_valueid_d_value')
def test_node_template_properties_simple(self):
yaml = """
node_types:
type:
properties:
property: {}
node_templates:
node:
type: type
properties:
property: { get_secret: secret }
"""
parsed = prepare_deployment_plan(self.parse_1_3(yaml),
self._get_secret_mock)
node = self.get_node_by_name(parsed, 'node')
self.assertEqual({'get_secret': 'secret'},
node['properties']['property'])
functions.evaluate_functions(
parsed,
{},
None,
None,
None,
self._get_secret_mock
)
self.assertEqual(node['properties']['property'], 'secret_value')
|
from api.utils.run_utils import run_model_job
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch import receiver
import django_rq
from rq.job import Job
class ModelRun(models.Model):
INITIALIZED = "initialized"
STARTED = "started"
FAILED = "failed"
FINISHED = "finished"
STATE_CHOICES = [
(STARTED, STARTED),
(FAILED, FAILED),
(FINISHED, FINISHED)]
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
finished_at = models.DateTimeField(default=None, blank=True, null=True)
state = models.CharField(
max_length=20,
choices=STATE_CHOICES,
default=INITIALIZED)
job_id = models.CharField(max_length=38, default="")
cpu_limit = models.CharField(max_length=20, default="12000m")
num_workers = models.IntegerField(default=2)
network_bandwidth_limit = models.IntegerField(default=10000)
job_metadata = {}
def start(self):
"""Saves the model run and starts the RQ job
Raises:
ValueError -- Raised if state is not initialized
"""
if self.job_id != "" or self.state != self.INITIALIZED:
raise ValueError("Wrong State")
self.save()
run_model_job.delay(self)
@receiver(pre_delete, sender=ModelRun, dispatch_uid='run_delete_job')
def remove_run_job(sender, instance, using, **kwargs):
"""Signal to delete job when ModelRun is deleted"""
redis_conn = django_rq.get_connection()
job = Job.fetch(instance.job_id, redis_conn)
job.delete()
|
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.export.export import build_all_signature_defs
from tensorflow.python.estimator.export.export import get_timestamped_export_dir
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'config'])
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The `Estimator` object wraps a model which is specified by a `model_fn`,
which, given inputs and a number of other parameters, returns the ops
necessary to perform training, evaluation, or predictions.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `RunConfig` object containing information
about the execution environment. It is passed on to the `model_fn`, if the
`model_fn` has a parameter named "config" (and input functions in the same
manner). If the `config` parameter is not passed, it is instantiated by the
`Estimator`. Not passing config means that defaults useful for local execution
are used. `Estimator` makes config available to the model (for instance, to
allow specialization based on the number of workers available), and also uses
some of its fields to control internals, especially regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. `Estimator` only passes params along, it does
not inspect it. The structure of `params` is therefore entirely up to the
developer.
None of `Estimator`'s methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use `model_fn` to configure
the base class, and may add methods implementing specialized functionality.
"""
def __init__(self, model_fn, model_dir=None, config=None, params=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: This is the first item returned from the `input_fn`
passed to `train`, 'evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, 'evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same (for multi-head models). If
mode is `ModeKeys.PREDICT`, `labels=None` will be passed. If
the `model_fn`'s signature does not accept `mode`, the
`model_fn` must still be able to handle `labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`, or `model_dir`.
* Returns:
`EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
Estimator._assert_members_are_not_overridden(self)
if config is None:
self._config = run_config.RunConfig()
logging.info('Using default config.')
else:
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of RunConfig, but provided %s.' %
config)
self._config = config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
self._device_fn = _get_replica_device_setter(self._config)
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = params or {}
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
def train(self, input_fn, hooks=None, steps=None, max_steps=None):
"""Trains a model given training data input_fn.
Args:
input_fn: Input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the training loop.
steps: Number of steps for which to train model. If `None`, train forever
or train until input_fn generates the `OutOfRange` or `StopIteration`
error. 'steps' works incrementally. If you call two times
train(steps=10) then training occurs in total 20 steps. If `OutOfRange`
or `StopIteration` error occurs in the middle, training stops before 20
steps. If you don't want to have incremental behaviour please set
`max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until input_fn generates the `OutOfRange` or
`StopIteration` error. If set, `steps` must be `None`. If `OutOfRange`
or `StopIteration` error occurs in the middle, training stops before
`max_steps` steps.
Two calls to `train(steps=100)` means 200 training
iterations. On the other hand, two calls to `train(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps` is <= 0.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = _check_hooks_type(hooks)
if steps is not None or max_steps is not None:
hooks.append(training.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data input_fn.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or
`SparseTensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed.
Raises:
ValueError: If `steps <= 0`.
ValueError: If no model has been trained, namely `model_dir`, or the
given `checkpoint_path` is empty.
"""
hooks = _check_hooks_type(hooks)
if steps is not None:
if steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
hooks.append(evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps))
return self._evaluate_model(
input_fn=input_fn,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None):
"""Returns predictions for given features.
Args:
input_fn: Input function returning features which is a dictionary of
string feature name to `Tensor` or `SparseTensor`. If it returns a
tuple, first item is extracted as features. Prediction continues until
`input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
predict_keys: list of `str`, name of the keys to predict. It is used if
the `EstimatorSpec.predictions` is a `dict`. If `predict_keys` is used
then rest of the predictions will be filtered from the dictionary. If
`None`, returns all.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: Could not find a trained model in model_dir.
ValueError: if batch length of predictions are not same.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`EstimatorSpec.predictions` is not a `dict`.
"""
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError('Could not find trained model in model_dir: {}.'.format(
self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
estimator_spec = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.PREDICT)
predictions = self._extract_keys(estimator_spec.predictions, predict_keys)
with training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
estimator_members = set([m for m in Estimator.__dict__.keys()
if not m.startswith('__')])
subclass_members = set(self.__class__.__dict__.keys())
common_members = estimator_members & subclass_members
overriden_members = [m for m in common_members
if Estimator.__dict__[m] != self.__class__.__dict__[m]]
if overriden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(self.__class__, overriden_members))
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
This method builds a new graph by first calling the
serving_input_receiver_fn to obtain feature `Tensor`s, and then calling
this `Estimator`'s model_fn to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given export_dir_base, and writes
a `SavedModel` into it containing a single `MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the export_outputs dict returned from the model_fn, named using
the same keys. One of these keys is always
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`ExportOutput`s, and the inputs are always the input receivers provided by
the serving_input_receiver_fn.
Extra assets may be written into the SavedModel via the extra_assets
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and
returns a `ServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if no serving_input_receiver_fn is provided, no export_outputs
are provided, or no checkpoint can be found.
"""
if serving_input_receiver_fn is None:
raise ValueError('serving_input_receiver_fn must be defined.')
with ops.Graph().as_default() as g:
training.create_global_step(g)
random_seed.set_random_seed(self._config.tf_random_seed)
serving_input_receiver = serving_input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=serving_input_receiver.features,
labels=None,
mode=model_fn_lib.ModeKeys.PREDICT)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = build_all_signature_defs(
serving_input_receiver.receiver_tensors,
estimator_spec.export_outputs)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s." % self._model_dir)
export_dir = get_timestamped_export_dir(export_dir_base)
# TODO(soergel): Consider whether MonitoredSession makes sense here
with tf_session.Session() as session:
saver_for_restore = estimator_spec.scaffold.saver or saver.Saver(
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# TODO(b/36111876): replace legacy_init_op with main_op mechanism
# pylint: disable=protected-access
local_init_op = (
estimator_spec.scaffold.local_init_op or
monitored_session.Scaffold._default_local_init_op())
# pylint: enable=protected-access
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=local_init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if not ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
logging.warning('Input graph does not contain a QueueRunner. '
'That means predict yields forever. '
'This is probably a mistake.')
if isinstance(result, (list, tuple)):
return result[0]
return result
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length then others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _call_model_fn(self, features, labels, mode):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
model_fn_results = self._model_fn(
features=features, labels=labels, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks):
all_hooks = []
with ops.Graph().as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = training.create_global_step(g)
with ops.device('/cpu:0'):
features, labels = input_fn()
estimator_spec = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.TRAIN)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
all_hooks.extend([
training.NanTensorHook(estimator_spec.loss),
training.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=100)
])
all_hooks.extend(hooks)
all_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(ops.GraphKeys.SAVERS,
training.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, training.CheckpointSaverHook)
for h in (all_hooks + chief_hooks +
list(estimator_spec.training_chief_hooks))
])
if not saver_hook_exists:
chief_hooks = [
training.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold)
]
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=all_hooks,
chief_only_hooks=(
tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)),
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
return loss
def _evaluate_model(self,
input_fn,
hooks=None,
checkpoint_path=None,
name=''):
"""Evaluates the model using the training.evaluation library."""
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise ValueError('Could not find trained model in model_dir: {}.'.
format(self._model_dir))
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = training.create_global_step(g)
features, labels = input_fn()
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
if model_fn_lib.MetricKeys.LOSS in estimator_spec.eval_metric_ops:
raise ValueError(
'Metric with name "%s" is not allowed, because Estimator ' % (
model_fn_lib.MetricKeys.LOSS) +
'already defines a default metric with the same name.')
estimator_spec.eval_metric_ops[
model_fn_lib.MetricKeys.LOSS] = metrics_lib.mean(estimator_spec.loss)
update_op, eval_dict = _extract_metric_update_ops(
estimator_spec.eval_metric_ops)
if ops.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because Estimator '
'already defines a default metric with the same name.')
eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=estimator_spec.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
_write_dict_to_summary(
output_dir=eval_dir,
dictionary=eval_results,
current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])
return eval_results
def _check_hooks_type(hooks):
"""Returns hooks if all are SessionRunHook, raises TypeError otherwise."""
hooks = list(hooks or [])
for h in hooks:
if not isinstance(h, training.SessionRunHook):
raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))
return hooks
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _verify_model_fn_args(model_fn, params):
"""Verifies model fn arguments."""
args = set(_model_fn_args(model_fn))
if 'features' not in args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if 'labels' not in args:
raise ValueError('model_fn (%s) must include labels argument.' % model_fn)
if params is not None and 'params' not in args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' % (model_fn,
params))
if params is None and 'params' in args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
if tf_inspect.ismethod(model_fn):
if 'self' in args:
args.remove('self')
non_valid_args = list(args - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = training.NewCheckpointReader(
training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, metric_ops in sorted(six.iteritems(eval_dict)):
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
if update_ops:
update_op = control_flow_ops.group(*update_ops)
else:
update_op = None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary)))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
|
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from jinja2 import Environment
from jinja2 import FileSystemLoader
from hybridLogger import hybridLogger
from exception import *
import os
import yaml
class createJuniperConfig(hybridLogger):
def __init__(self, **kwargs):
logLevel = kwargs.get('logLevel', 'INFO')
self.log = super(createJuniperConfig, self).log(level=logLevel, name=createJuniperConfig.__name__)
_requiredArgs = ['awsVpnConfigXml', 'awsCidrBlock', 'custRoutableIntf']
try:
self.awsVpnConfigXml = kwargs['awsVpnConfigXml']
self.awsCidrBlock = kwargs['awsCidrBlock']
self.custRoutableIntf = kwargs['custRoutableIntf']
except KeyError as e:
raise ArguementError(_requiredArgs, createJuniperConfig.__name__)
self.juniperConfig = {}
self.juniperConfig['aws_private_cidr'] = self.awsCidrBlock
self.juniperConfig['routable_interface'] = self.custRoutableIntf
self.juniperConfig['ipsec_tunnels'] = []
def parseXml(self, **kwargs):
try:
awsVpnRoot = ET.fromstring(self.awsVpnConfigXml)
except Exception as e:
self.log.error("Function: parseXml Message: Error while converting xml string to xml element", exc_info=True)
return False
self.juniperConfig['vpn_connection_id'] = awsVpnRoot.attrib['id']
tunnelNum = 0
for ipsecTun in awsVpnRoot.findall('ipsec_tunnel'):
tunnelNum += 1
tunnelInfo = {}
tunnelInfo['id'] = tunnelNum
tunnelInfo['IKE'] = {}
tunnelInfo['IKE']['authentication_protocol'] = ipsecTun.find('ike/authentication_protocol').text
tunnelInfo['IKE']['encryption_protocol'] = ipsecTun.find('ike/encryption_protocol').text
tunnelInfo['IKE']['lifetime'] = ipsecTun.find('ike/lifetime').text
tunnelInfo['IKE']['perfect_forward_secrecy'] = ipsecTun.find('ike/perfect_forward_secrecy').text
tunnelInfo['IKE']['mode'] = ipsecTun.find('ike/mode').text
tunnelInfo['IKE']['pre_shared_key'] = ipsecTun.find('ike/pre_shared_key').text
tunnelInfo['IPsec'] = {}
tunnelInfo['IPsec']['protocol'] = ipsecTun.find('ipsec/protocol').text
tunnelInfo['IPsec']['authentication_protocol'] = ipsecTun.find('ipsec/authentication_protocol').text
tunnelInfo['IPsec']['encryption_protocol'] = ipsecTun.find('ipsec/encryption_protocol').text
tunnelInfo['IPsec']['lifetime'] = ipsecTun.find('ipsec/lifetime').text
tunnelInfo['IPsec']['perfect_forward_secrecy'] = ipsecTun.find('ipsec/perfect_forward_secrecy').text
tunnelInfo['Tunnel'] = {}
tunnelInfo['Tunnel']['bind_interface'] = "st0." + str(tunnelNum)
tunnelInfo['Tunnel']['tcp_mss_adjustment'] = ipsecTun.find('ipsec/tcp_mss_adjustment').text
tunnelInfo['Tunnel']['vpn_gateway_outside_address'] = ipsecTun.find('vpn_gateway/tunnel_outside_address/ip_address').text
tunnelInfo['Tunnel']['vpn_gateway_inside_address'] = ipsecTun.find('vpn_gateway/tunnel_inside_address/ip_address').text
tunnelInfo['Tunnel']['customer_gateway_inside_address'] = ipsecTun.find('customer_gateway/tunnel_inside_address/ip_address').text
self.juniperConfig['ipsec_tunnels'].append(tunnelInfo)
return self.juniperConfig
def outputConfig(self, **kwargs):
_requiredArgs = ['templatePath', 'outputConfFile']
try:
templatePath = kwargs['templatePath']
outputConfFile = kwargs['outputConfFile']
except KeyError as e:
raise ArguementError(_requiredArgs, createJuniperConfig.__name__)
try:
directory = templatePath.rpartition('/')[0]
fileName = templatePath.rpartition('/')[2]
except IndexError:
self.log.error("Function: outputConfig Message: Does not have a correct path \nEx: /home/vagrany/vpn.j2, ./vpn.j2", exc_info=True)
return False
if os.path.exists(templatePath):
pass
else:
raise Exception("Function: outputConfig Mesage: Invalid file Path")
try:
env = Environment(loader=FileSystemLoader(directory))
template = env.get_template(fileName)
junosConf = template.render(self.juniperConfig)
self.log.debug("Juniper Configuration built: {0}".format(junosConf))
except Exception as e:
self.log.error("Function: outputConfig Message: Error while rendering the jinja template", exc_info=True)
return False
opFileHandle = open(outputConfFile, 'w+')
opFileHandle.write(junosConf)
opFileHandle.close()
self.log.info("Junos Vpn configuration are written in path: {0}".format(outputConfFile))
|
import pytest
from anchore_engine.analyzers.syft.handlers.java import save_entry
class TestJava:
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"findings": {},
"engine_entry": {
"name": "test",
},
"pkg_key": "basic-test",
"expected": {"name": "test"},
"expected_key": "basic-test",
},
id="basic-case",
),
pytest.param(
{
"findings": {},
"engine_entry": {
"name": "test",
"version": "1.0.0",
},
"pkg_key": None,
"expected": {
"name": "test",
"version": "1.0.0",
},
"expected_key": "/virtual/javapkg/test-1.0.0.jar",
},
id="no-pkgkey-no-location-case",
),
pytest.param(
{
"findings": {},
"engine_entry": {"name": "test", "latest": "1.0.1"},
"pkg_key": None,
"expected": {"name": "test", "latest": "1.0.1"},
"expected_key": "/virtual/javapkg/test-1.0.1.jar",
},
id="no-pkgkey-no-location-no-version-case",
),
pytest.param(
{
"findings": {},
"engine_entry": {"name": "test", "location": "/tmp/pkg-test"},
"pkg_key": None,
"expected": {"name": "test", "location": "/tmp/pkg-test"},
"expected_key": "/tmp/pkg-test",
},
id="no-pkgkey-location-case",
),
],
)
def test_save_entry(self, param):
findings = param["findings"]
save_entry(findings, param["engine_entry"], param["pkg_key"])
findings_key = param["expected_key"]
assert (
findings.get("package_list", {})
.get("pkgs.java", {})
.get("base", {})
.get(findings_key, {})
== param["expected"]
)
|
from __future__ import absolute_import
import logging
from django.conf import settings
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api.neutron import NeutronAPIDictWrapper
from a10_openstack.neutron_ext.api import client as neutron_client
LOG = logging.getLogger(__name__)
class Certificate(NeutronAPIDictWrapper):
"""Wrapper for neutron Certificates"""
def __init__(self, apiresource):
super(Certificate, self).__init__(apiresource)
class CertificateBinding(NeutronAPIDictWrapper):
"""Wrapper for neutron CertificateBindings"""
def __init__(self, apiresource):
super(CertificateBinding, self).__init__(apiresource)
def neutronclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('neutronclient connection created using token "%s" and url "%s"'
% (request.user.token.id, base.url_for(request, 'network')))
LOG.debug('user_id=%(user)s, tenant_id=%(tenant)s' %
{'user': request.user.id, 'tenant': request.user.tenant_id})
c = neutron_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure,
ca_cert=cacert)
return c
def certificate_list(request, **params):
LOG.debug("certificates_list(): params=%s" % (params))
certificates = []
certificates = neutronclient(request).list_certificates(**params).get('certificates')
return map(Certificate, certificates)
def certificate_get(request, certificate_id, **params):
# TODO(mdurrant): Add option to get bindings w/ cert.
LOG.debug("certificate_get(): certificate_id=%s, params=%s" % (certificate_id, params))
certificate = neutronclient(request).show_certificate(certificate_id,
**params).get('certificate')
return Certificate(certificate)
def certificate_create(request, **kwargs):
"""Create specified Certificate"""
body = {"certificate": kwargs}
LOG.debug("certificate_create(): kwargs=%s,body=%s" % (kwargs, body))
certificate = neutronclient(request).create_certificate(body=body).get('certificate')
return Certificate(certificate)
def certificate_update(request, **kwargs):
body = {"certificate": kwargs}
LOG.debug("certificate_update(): kwargs=%s", (kwargs))
certificate = neutronclient(request).update_certificate(body=body).get('certificate')
return Certificate(certificate)
def certificate_delete(request, certificate_id):
LOG.debug("certificate_delete(): certificiate_id:%s" % certificate_id)
# TODO(mmd): Should this return status or do we assume it always works?
neutronclient(request).delete_certificate(certificate_id)
def certificate_bindings_list(request, **params):
LOG.debug("certificate_bindings_list(): params={}".format(params))
bindings = \
neutronclient(request).list_certificate_bindings(**params).get('certificate_bindings')
return map(CertificateBinding, bindings)
def certificate_binding_get(request, binding_id, **params):
LOG.debug("certificate_binding_get(): binding_id=%s, params=%s" % (binding_id, params))
binding = neutronclient(request).show_certificate_binding(binding_id,
**params).get('certificate_binding')
return CertificateBinding(binding)
def certificate_binding_create(request, **kwargs):
"""Binding specified Certificate ID to specified VIP ID"""
LOG.debug("certificate_binding_create(): request=%s, kwargs=%s" % (request, kwargs))
body = {'certificate_binding': kwargs}
binding = \
neutronclient(request).create_certificate_binding(body=body).get('certificate_binding')
return CertificateBinding(binding)
def certificate_binding_delete(request, binding_id):
LOG.debug("certificate_binding_delete(): binding_id=%s" % binding_id)
neutronclient(request).delete_certificate_binding(binding_id)
|
"""Test quantization on keras application models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import tempfile
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized
from tensorflow_model_optimization.python.core.quantization.keras import quantize
from tensorflow_model_optimization.python.core.quantization.keras import utils
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class QuantizeModelsTest(tf.test.TestCase, parameterized.TestCase):
# Derived using
# `inspect.getmembers(tf.keras.applications, inspect.isfunction)`
_KERAS_APPLICATION_MODELS = [
# 'DenseNet121',
# 'DenseNet169',
# 'DenseNet201',
# 'InceptionResNetV2',
'InceptionV3',
'MobileNet',
'MobileNetV2',
# 'NASNetLarge',
# 'NASNetMobile',
'ResNet101',
# 'ResNet101V2',
'ResNet152',
# 'ResNet152V2',
'ResNet50',
# 'ResNet50V2',
# 'VGG16',
# 'VGG19',
# 'Xception'
]
_MODEL_INPUT_SHAPES = {
'InceptionV3': (75, 75, 3)
}
def _batch(self, dims, batch_size):
if dims[0] is None:
dims[0] = batch_size
return dims
def _get_model(self, model_type):
model_fn = [
y for x, y in inspect.getmembers(tf.keras.applications)
if x == model_type
][0]
input_shape = QuantizeModelsTest._MODEL_INPUT_SHAPES.get(
model_type, (32, 32, 3))
return model_fn(weights=None, input_shape=input_shape)
def _create_test_data(self, model):
x_train = np.random.randn(
*self._batch(model.input.get_shape().as_list(), 2)).astype('float32')
y_train = tf.keras.utils.to_categorical(
np.random.randint(1000, size=(2, 1)), 1000)
return x_train, y_train
def _verify_tflite(self, tflite_file, x_test, y_test):
interpreter = tf.lite.Interpreter(model_path=tflite_file)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]['index']
output_index = interpreter.get_output_details()[0]['index']
for x, _ in zip(x_test, y_test):
x = x.reshape((1,) + x.shape)
interpreter.set_tensor(input_index, x)
interpreter.invoke()
interpreter.get_tensor(output_index)
@parameterized.parameters(_KERAS_APPLICATION_MODELS)
def testModelEndToEnd(self, model_type):
# 1. Check whether quantized model graph can be constructed.
model = self._get_model(model_type)
model = quantize.quantize_model(model)
# 2. Sanity check to ensure basic training on random data works.
x_train, y_train = self._create_test_data(model)
model.compile(
loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(x_train, y_train)
# 3. Ensure conversion to TFLite works.
_, tflite_file = tempfile.mkstemp('.tflite')
print('TFLite File: ', tflite_file)
with quantize.quantize_scope():
utils.convert_keras_to_tflite(model, tflite_file)
# 4. Verify input runs on converted model.
self._verify_tflite(tflite_file, x_train, y_train)
if __name__ == '__main__':
tf.test.main()
|
from datetime import datetime, timedelta
from json import dumps, loads
from random import randint
from itertools import groupby
from sqlite3 import IntegrityError
from hashlib import sha1
from fcsite import models
from fcsite.models import schedules as scheds
from fcsite.models import stats
from fcsite.utils import sanitize_html
SPECIAL_USER = -1
SEX_MALE = 1
SEX_FEMALE = 2
PERM_ADMIN = 1
PERM_ADMIN_SCHEDULE = (1 << 1) | PERM_ADMIN
PERM_ADMIN_MEMBER = (1 << 2) | PERM_ADMIN
PERM_ADMIN_NOTICE = (1 << 3) | PERM_ADMIN
PERM_ADMIN_GOD = PERM_ADMIN_SCHEDULE | PERM_ADMIN_MEMBER | PERM_ADMIN_NOTICE
PROFILE_FIELDS = ['email', 'home', 'car', 'comment', 'birthday']
class User(object):
def __init__(self, row):
self.id = row['id']
self.name = row['name']
self.password = row['password']
self.sex = row['sex']
self.permission = row['permission']
self.joined = row['joined']
self.logged_in = row['logged_in']
profile = loads(row['profile'])
self.email = profile.get('email', '')
self.home = profile.get('home', '')
self.car = profile.get('car', '')
self.comment = profile.get('comment', '')
self.birthday = profile.get('birthday', '')
self.entry_rate_cache = dict()
def has_permission(self, permission):
return (self.permission & permission) == permission
def is_admin(self):
return self.has_permission(PERM_ADMIN)
def is_schedule_admin(self):
return self.has_permission(PERM_ADMIN_SCHEDULE)
def is_member_admin(self):
return self.has_permission(PERM_ADMIN_MEMBER)
def is_notice_admin(self):
return self.has_permission(PERM_ADMIN_NOTICE)
def is_god(self):
return self.has_permission(PERM_ADMIN_GOD)
def is_male(self):
return self.sex == SEX_MALE
def is_female(self):
return self.sex == SEX_FEMALE
def is_registered(self, schedule):
cur = models.db().execute("""
SELECT Schedule.id,
Schedule.type,
Schedule.body,
Entry.user_id
FROM Schedule
LEFT OUTER JOIN (SELECT schedule_id,
user_id
FROM Entry
WHERE user_id = ?) Entry ON
Schedule.id = Entry.schedule_id
WHERE Schedule.id = ?""", (self.id, schedule['id']))
s = cur.fetchone()
if not s: # スケジュールが存在しない
return False
if s['user_id'] is not None: # 登録済み
return True
if s['type'] == scheds.TYPE_PRACTICE: # 練習 未登録
return False
# 試合、イベント 未登録 締め切り確認
body = loads(s['body'])
return scheds.is_deadline_overred(body)
def is_entered(self, schedule):
cur = models.db().execute("""
SELECT user_id
FROM Entry
WHERE user_id = ?
AND schedule_id = ?
AND is_entry = 1""", (self.id, schedule['id']))
return cur.fetchone() is not None
def has_not_registered_schedule_yet(self):
cur = models.db().execute("""
SELECT Schedule.id
FROM Schedule
LEFT OUTER JOIN (SELECT *
FROM Entry
WHERE user_id = ?) AS Entry ON
Schedule.id = Entry.schedule_id
WHERE Schedule.when_ >= datetime('now', 'localtime')
AND Schedule.type = ?
GROUP BY Schedule.id
HAVING COUNT(Entry.user_id) = 0""",
(self.id, scheds.TYPE_PRACTICE))
return cur.fetchone() is not None
def is_joined_at(self, year):
dt = datetime(year + 1, 1, 1) - timedelta(seconds=1)
return self.joined < dt
def get_entry_rate(self, year):
r = self.entry_rate_cache.get(year, None)
if not r:
r = stats.get_practice_entry_rate_of_year(self, year)
self.entry_rate_cache[year] = r
return r
def update_logged_in(self):
db = models.db()
db.execute("""
UPDATE User
SET logged_in = datetime('now', 'localtime')
WHERE id = ?""", (self.id, ))
db.commit()
class NotUniquePassword(Exception):
def __init__(self):
pass
def from_row(row):
return User(row) if row else {}
def find_all():
# 特殊ユーザ以外
cur = models.db().execute("SELECT * FROM User WHERE id <> ?", (SPECIAL_USER, ))
return [from_row(r) for r in cur.fetchall()]
def find_by_id(uid):
cur = models.db().execute('SELECT * FROM User WHERE id = ?', (uid, ))
return from_row(cur.fetchone())
def find_by_password(password):
# 特殊ユーザはログインさせないように無視する
cur = models.db().execute('SELECT * FROM User WHERE password = ? AND id <> ?',
(password, SPECIAL_USER))
return from_row(cur.fetchone())
def find_group_by_sex():
# 特殊ユーザは一覧上に表示させないように無視する
users = models.db().execute('''
SELECT *
FROM User
WHERE id <> ?
ORDER BY sex, id''', (SPECIAL_USER, )).fetchall()
bysex = {}
for sex, us in groupby(users, lambda u: u['sex']):
bysex[sex] = [from_row(u) for u in us]
return bysex.get(SEX_MALE, []), bysex.get(SEX_FEMALE, [])
def is_valid_session_id(uid, sid):
cur = models.db().execute("""
SELECT user_id
FROM MobileSession
WHERE user_id = ?
AND session_id = ?
AND expire > CURRENT_TIMESTAMP""", (uid, sid))
return cur.fetchone() is not None
def issue_new_session_id(uid):
for sid in generate_session_id(6):
try:
with models.db():
do_issue_new_session_id(uid, sid)
return sid
except IntegrityError:
pass # not unique
def generate_session_id(length):
while True:
random_value = str(randint(100000, 999999))
hashcode = sha1(random_value).hexdigest()
for i in xrange(0, len(hashcode) - length):
yield hashcode[i:i + length]
def do_issue_new_session_id(uid, sid):
models.db().execute("""
DELETE FROM MobileSession
WHERE user_id = ?""", (uid, ))
models.db().execute("""
INSERT INTO MobileSession (user_id, session_id, expire)
VALUES (?, ?, datetime('now', '+1 month'))""", (uid, sid))
def check_unique_password(password, id=None):
db = models.db()
if id is not None:
cnt = db.execute("""
SELECT COUNT(*)
FROM User
WHERE password = ?
AND id <> ?""", (password, id)).fetchone()
else:
cnt = db.execute("""
SELECT COUNT(*)
FROM User
WHERE password = ?""", (password, )).fetchone()
if cnt[0] > 0:
raise NotUniquePassword()
def insert(name, password, sex, permission):
check_unique_password(password)
try:
db = models.db()
c = db.cursor()
c.execute("""
INSERT INTO User (name, password, sex, permission)
VALUES (?, ?, ?, ?)""", (name, password, sex, permission))
db.commit()
return c.lastrowid
except IntegrityError, e:
raise NotUniquePassword() # そうとは限らないけど。。。
def update(id, password, sex, permission):
check_unique_password(password, id)
try:
db = models.db()
db.execute("""
UPDATE User
SET password = ?,
sex = ?,
permission = ?
WHERE id = ?""", (password, sex, permission, id))
db.commit()
except IntegrityError, e:
raise NotUniquePassword() # そうとは限らないけど。。。
def update_profile(id, form):
password = get_or_gen_password(form)
check_unique_password(password, id)
sex = sex_atoi(form['sex'])
birthday = form['birthday']
email = form['email']
home = form['home']
car = form['car']
comment = sanitize_html(form['comment'])
profile = dumps({
'birthday': birthday,
'email': email,
'home': home,
'car': car,
'comment': comment
})
models.db().execute('''
UPDATE User
SET password = ?,
sex = ?,
profile = ?
WHERE id = ?''', (password, sex, profile, id))
models.db().commit()
def delete_by_id(id):
db = models.db()
db.execute('UPDATE TaxPaymentHistory SET user_id = ? WHERE user_id = ?', (SPECIAL_USER, id))
db.execute('UPDATE TaxPaymentHistory SET updater_user_id = ? WHERE updater_user_id = ?', (SPECIAL_USER, id))
db.execute('DELETE FROM User WHERE id = ?', (id, ))
db.commit()
def make_obj(form, id=-9999):
dummy_row = {'id': id,
'name': form['name'],
'password': get_or_gen_password(form),
'sex': sex_atoi(form['sex']),
'permission': permission_atoi(form),
'joined': datetime.now(),
'logged_in': None,
'profile': '{}'}
return User(dummy_row)
def get_or_gen_password(form):
p = form['password']
return p if p else generate_uniq_password()
def sex_atoi(sex):
return SEX_MALE if sex == u'男性' else SEX_FEMALE
def permission_atoi(form):
permission = 0
checks = form.getlist('permissions')
if 'schedule' in checks:
permission |= PERM_ADMIN_SCHEDULE
if 'member' in checks:
permission |= PERM_ADMIN_MEMBER
if 'notice' in checks:
permission |= PERM_ADMIN_NOTICE
return permission
def generate_uniq_password():
p = randint(100000, 999999)
while find_by_password(p):
p = randint(100000, 999999)
return p
|
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
class UserProfile(AbstractUser):
nickname = models.CharField(max_length=50, verbose_name=u"昵称", default="")
birthday = models.DateField(verbose_name=u"生日", null=True, blank=True)
gender = models.CharField(max_length=6, choices=(("male",u"男"),("female",u"女")), default=u"女")
address = models.CharField(max_length=100, default=u"")
mobile = models.CharField(max_length=11, null=True, blank=True)
image = models.ImageField(upload_to="image/%Y/%m", default=u"image/default.png", max_length=100)
class Meta:
verbose_name = u"用户信息"
verbose_name_plural = verbose_name
def __unicode__(self):
return "{0}".format(self.username)
def unread_nums(self):
# 获取用户未读消息的数量
from operation.models import UserMessage
return UserMessage.objects.filter(user=self.id, has_read=False).count()
class EmailVerifyRecord(models.Model):
code = models.CharField(max_length=20, verbose_name=u"验证码")
email = models.EmailField(max_length=50, verbose_name=u"邮箱")
# "django 1406 Data too long for column..." 错误主要与 choices 的 max_length 有关
send_type = models.CharField(choices=(("register", u"注册"),("forget", u"找回密码"),("update_email", u"修改邮箱")), max_length=15, verbose_name=u"验证码类型")
send_time = models.DateTimeField(default=datetime.now, verbose_name=u"发送时间")
class Meta:
verbose_name = u"邮箱验证码"
# 不加如下内容则 verbose_name 显示带s的后缀
verbose_name_plural = verbose_name
def __unicode__(self):
return '{0},({1})'.format(self.code, self.email)
class Banner(models.Model):
title = models.CharField(max_length=100, verbose_name=u"标题")
image = models.ImageField(upload_to="banner/%Y/%m", verbose_name=u"轮播图", max_length=100)
url = models.URLField(max_length=200, verbose_name=u"访问地址")
index = models.IntegerField(default=100, verbose_name=u"顺序")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"轮播图"
verbose_name_plural = verbose_name
def __unicode__(self):
return "{0}".format(self.title)
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_auto_20170715_1819'),
]
operations = [
migrations.RemoveField(
model_name='chat',
name='user',
),
migrations.DeleteModel(
name='Chat',
),
]
|
from datetime import datetime
from flask import Flask, redirect, render_template, request
from google.cloud import datastore
try:
from urllib import urlencode
except Exception:
from urllib.parse import urlencode
app = Flask(__name__)
client = datastore.Client()
@app.route('/', methods=['GET'])
def display_guestbook():
guestbook_name = request.args.get('guestbook_name', '')
print('GET guestbook name is {}'.format(guestbook_name))
ancestor_key = client.key('Book', guestbook_name or "*notitle*")
greetings = client.query(ancestor=ancestor_key).fetch(limit=20)
greeting_blockquotes = [greeting.get('content', '') for greeting in greetings]
return render_template(
'index.html',
greeting_blockquotes=greeting_blockquotes,
guestbook_name=guestbook_name
)
@app.route('/sign', methods=['POST'])
def update_guestbook():
# We set the parent key on each 'Greeting' to ensure each guestbook's
# greetings are in the same entity group.
guestbook_name = request.form.get('guestbook_name', '')
print('Guestbook name from the form: {}'.format(guestbook_name))
print('Guestbook name from the URL: {}'.format(guestbook_name))
ancestor_key = client.key('Book', guestbook_name or "*notitle*")
key = client.key('Greeting', parent=ancestor_key)
greeting = datastore.Entity(key=key)
greeting['content'] = request.form.get('content', None)
greeting['date'] = datetime.utcnow()
client.put(greeting)
return redirect('/?' + urlencode({'guestbook_name': guestbook_name}))
if __name__ == '__main__':
# This is used when running locally.
app.run(host='127.0.0.1', port=8080, debug=True)
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster2'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster1'],
[TestAction.create_volume, 'volume1', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.create_mini_vm, 'vm3', 'network=random', 'cluster=cluster1'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_volume, 'volume2', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.detach_volume, 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster2', 'flag=thick,scsi'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_volume, 'volume4', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume4'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_volume_backup, 'volume4-backup2'],
[TestAction.start_vm, 'vm2'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup3'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.start_vm, 'vm2'],
[TestAction.migrate_vm, 'vm2'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.detach_volume, 'volume3'],
[TestAction.create_volume, 'volume5', 'cluster=cluster1', 'flag=thick,scsi'],
[TestAction.start_vm, 'vm3'],
[TestAction.create_vm_backup, 'vm3', 'vm3-backup4'],
[TestAction.stop_vm, 'vm3'],
[TestAction.use_vm_backup, 'vm3-backup4'],
[TestAction.create_mini_vm, 'vm4', 'cluster=cluster2', 'flag=thin'],
[TestAction.delete_volume, 'volume5'],
[TestAction.expunge_volume, 'volume5'],
[TestAction.destroy_vm, 'vm2'],
[TestAction.start_vm, 'vm3'],
[TestAction.create_vm_backup, 'vm3', 'vm3-backup5'],
[TestAction.stop_vm, 'vm3'],
[TestAction.create_image_from_volume, 'vm4', 'vm4-image2'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.delete_volume_backup, 'volume4-backup2'],
])
'''
The final status:
Running:['vm1', 'vm4']
Stopped:['vm3']
Enadbled:['volume1-backup1', 'vm1-backup3', 'vm3-backup4', 'vm3-backup5', 'vm4-image2']
attached:[]
Detached:['volume2', 'volume3', 'volume1', 'volume4']
Deleted:['vm2', 'volume4-backup2']
Expunged:['volume5', 'image1']
Ha:[]
Group:
vm_backup2:['vm3-backup4']---vm3@
vm_backup3:['vm3-backup5']---vm3@
vm_backup1:['vm1-backup3']---vm1@
'''
|
"""UniFi POE control platform tests."""
from collections import deque
from unittest.mock import Mock
import pytest
from tests.common import mock_coro
import aiounifi
from aiounifi.clients import Clients, ClientsAll
from aiounifi.devices import Devices
from homeassistant import config_entries
from homeassistant.components import unifi
from homeassistant.components.unifi.const import (
CONF_CONTROLLER,
CONF_SITE_ID,
UNIFI_CONFIG,
)
from homeassistant.helpers import entity_registry
from homeassistant.setup import async_setup_component
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
import homeassistant.components.switch as switch
CLIENT_1 = {
"hostname": "client_1",
"ip": "10.0.0.1",
"is_wired": True,
"mac": "00:00:00:00:00:01",
"name": "POE Client 1",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 1,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
}
CLIENT_2 = {
"hostname": "client_2",
"ip": "10.0.0.2",
"is_wired": True,
"mac": "00:00:00:00:00:02",
"name": "POE Client 2",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 2,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
}
CLIENT_3 = {
"hostname": "client_3",
"ip": "10.0.0.3",
"is_wired": True,
"mac": "00:00:00:00:00:03",
"name": "Non-POE Client 3",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 3,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
}
CLIENT_4 = {
"hostname": "client_4",
"ip": "10.0.0.4",
"is_wired": True,
"mac": "00:00:00:00:00:04",
"name": "Non-POE Client 4",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 4,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
}
CLOUDKEY = {
"hostname": "client_1",
"ip": "mock-host",
"is_wired": True,
"mac": "10:00:00:00:00:01",
"name": "Cloud key",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 1,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
}
POE_SWITCH_CLIENTS = [
{
"hostname": "client_1",
"ip": "10.0.0.1",
"is_wired": True,
"mac": "00:00:00:00:00:01",
"name": "POE Client 1",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 1,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
},
{
"hostname": "client_2",
"ip": "10.0.0.2",
"is_wired": True,
"mac": "00:00:00:00:00:02",
"name": "POE Client 2",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 1,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
},
]
DEVICE_1 = {
"device_id": "mock-id",
"ip": "10.0.1.1",
"mac": "00:00:00:00:01:01",
"type": "usw",
"name": "mock-name",
"port_overrides": [],
"port_table": [
{
"media": "GE",
"name": "Port 1",
"port_idx": 1,
"poe_class": "Class 4",
"poe_enable": True,
"poe_mode": "auto",
"poe_power": "2.56",
"poe_voltage": "53.40",
"portconf_id": "1a1",
"port_poe": True,
"up": True,
},
{
"media": "GE",
"name": "Port 2",
"port_idx": 2,
"poe_class": "Class 4",
"poe_enable": True,
"poe_mode": "auto",
"poe_power": "2.56",
"poe_voltage": "53.40",
"portconf_id": "1a2",
"port_poe": True,
"up": True,
},
{
"media": "GE",
"name": "Port 3",
"port_idx": 3,
"poe_class": "Unknown",
"poe_enable": False,
"poe_mode": "off",
"poe_power": "0.00",
"poe_voltage": "0.00",
"portconf_id": "1a3",
"port_poe": False,
"up": True,
},
{
"media": "GE",
"name": "Port 4",
"port_idx": 4,
"poe_class": "Unknown",
"poe_enable": False,
"poe_mode": "auto",
"poe_power": "0.00",
"poe_voltage": "0.00",
"portconf_id": "1a4",
"port_poe": True,
"up": True,
},
],
}
BLOCKED = {
"blocked": True,
"hostname": "block_client_1",
"ip": "10.0.0.1",
"is_guest": False,
"is_wired": False,
"mac": "00:00:00:00:01:01",
"name": "Block Client 1",
"noted": True,
"oui": "Producer",
}
UNBLOCKED = {
"blocked": False,
"hostname": "block_client_2",
"ip": "10.0.0.2",
"is_guest": False,
"is_wired": True,
"mac": "00:00:00:00:01:02",
"name": "Block Client 2",
"noted": True,
"oui": "Producer",
}
CONTROLLER_DATA = {
CONF_HOST: "mock-host",
CONF_USERNAME: "mock-user",
CONF_PASSWORD: "mock-pswd",
CONF_PORT: 1234,
CONF_SITE_ID: "mock-site",
CONF_VERIFY_SSL: True,
}
ENTRY_CONFIG = {CONF_CONTROLLER: CONTROLLER_DATA}
CONTROLLER_ID = unifi.CONTROLLER_ID.format(host="mock-host", site="mock-site")
@pytest.fixture
def mock_controller(hass):
"""Mock a UniFi Controller."""
hass.data[UNIFI_CONFIG] = {}
controller = unifi.UniFiController(hass, None)
controller._site_role = "admin"
controller.api = Mock()
controller.mock_requests = []
controller.mock_client_responses = deque()
controller.mock_device_responses = deque()
controller.mock_client_all_responses = deque()
async def mock_request(method, path, **kwargs):
kwargs["method"] = method
kwargs["path"] = path
controller.mock_requests.append(kwargs)
if path == "s/{site}/stat/sta":
return controller.mock_client_responses.popleft()
if path == "s/{site}/stat/device":
return controller.mock_device_responses.popleft()
if path == "s/{site}/rest/user":
return controller.mock_client_all_responses.popleft()
return None
controller.api.clients = Clients({}, mock_request)
controller.api.devices = Devices({}, mock_request)
controller.api.clients_all = ClientsAll({}, mock_request)
return controller
async def setup_controller(hass, mock_controller):
"""Load the UniFi switch platform with the provided controller."""
hass.config.components.add(unifi.DOMAIN)
hass.data[unifi.DOMAIN] = {CONTROLLER_ID: mock_controller}
config_entry = config_entries.ConfigEntry(
1,
unifi.DOMAIN,
"Mock Title",
ENTRY_CONFIG,
"test",
config_entries.CONN_CLASS_LOCAL_POLL,
entry_id=1,
)
mock_controller.config_entry = config_entry
await mock_controller.async_update()
await hass.config_entries.async_forward_entry_setup(config_entry, "switch")
# To flush out the service call to update the group
await hass.async_block_till_done()
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a bridge."""
assert (
await async_setup_component(
hass, switch.DOMAIN, {"switch": {"platform": "unifi"}}
)
is True
)
assert unifi.DOMAIN not in hass.data
async def test_no_clients(hass, mock_controller):
"""Test the update_clients function when no clients are found."""
mock_controller.mock_client_responses.append({})
mock_controller.mock_device_responses.append({})
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert not hass.states.async_all()
async def test_controller_not_client(hass, mock_controller):
"""Test that the controller doesn't become a switch."""
mock_controller.mock_client_responses.append([CLOUDKEY])
mock_controller.mock_device_responses.append([DEVICE_1])
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert not hass.states.async_all()
cloudkey = hass.states.get("switch.cloud_key")
assert cloudkey is None
async def test_not_admin(hass, mock_controller):
"""Test that switch platform only work on an admin account."""
mock_controller.mock_client_responses.append([CLIENT_1])
mock_controller.mock_device_responses.append([])
mock_controller._site_role = "viewer"
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert len(hass.states.async_all()) == 0
async def test_switches(hass, mock_controller):
"""Test the update_items function with some clients."""
mock_controller.mock_client_responses.append([CLIENT_1, CLIENT_4])
mock_controller.mock_device_responses.append([DEVICE_1])
mock_controller.mock_client_all_responses.append([BLOCKED, UNBLOCKED, CLIENT_1])
mock_controller.unifi_config = {
unifi.CONF_BLOCK_CLIENT: [BLOCKED["mac"], UNBLOCKED["mac"]]
}
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 3
assert len(hass.states.async_all()) == 5
switch_1 = hass.states.get("switch.poe_client_1")
assert switch_1 is not None
assert switch_1.state == "on"
assert switch_1.attributes["power"] == "2.56"
assert switch_1.attributes["received"] == 1234
assert switch_1.attributes["sent"] == 5678
assert switch_1.attributes["switch"] == "00:00:00:00:01:01"
assert switch_1.attributes["port"] == 1
assert switch_1.attributes["poe_mode"] == "auto"
switch_4 = hass.states.get("switch.poe_client_4")
assert switch_4 is None
blocked = hass.states.get("switch.block_client_1")
assert blocked is not None
assert blocked.state == "off"
unblocked = hass.states.get("switch.block_client_2")
assert unblocked is not None
assert unblocked.state == "on"
async def test_new_client_discovered(hass, mock_controller):
"""Test if 2nd update has a new client."""
mock_controller.mock_client_responses.append([CLIENT_1])
mock_controller.mock_device_responses.append([DEVICE_1])
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert len(hass.states.async_all()) == 2
mock_controller.mock_client_responses.append([CLIENT_1, CLIENT_2])
mock_controller.mock_device_responses.append([DEVICE_1])
# Calling a service will trigger the updates to run
await hass.services.async_call(
"switch", "turn_off", {"entity_id": "switch.poe_client_1"}, blocking=True
)
assert len(mock_controller.mock_requests) == 5
assert len(hass.states.async_all()) == 3
assert mock_controller.mock_requests[2] == {
"json": {
"port_overrides": [{"port_idx": 1, "portconf_id": "1a1", "poe_mode": "off"}]
},
"method": "put",
"path": "s/{site}/rest/device/mock-id",
}
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.poe_client_1"}, blocking=True
)
assert len(mock_controller.mock_requests) == 7
assert mock_controller.mock_requests[5] == {
"json": {
"port_overrides": [
{"port_idx": 1, "portconf_id": "1a1", "poe_mode": "auto"}
]
},
"method": "put",
"path": "s/{site}/rest/device/mock-id",
}
switch_2 = hass.states.get("switch.poe_client_2")
assert switch_2 is not None
assert switch_2.state == "on"
async def test_failed_update_successful_login(hass, mock_controller):
"""Running update can login when requested."""
mock_controller.available = False
mock_controller.api.clients.update = Mock()
mock_controller.api.clients.update.side_effect = aiounifi.LoginRequired
mock_controller.api.login = Mock()
mock_controller.api.login.return_value = mock_coro()
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 0
assert mock_controller.available is True
async def test_failed_update_failed_login(hass, mock_controller):
"""Running update can handle a failed login."""
mock_controller.api.clients.update = Mock()
mock_controller.api.clients.update.side_effect = aiounifi.LoginRequired
mock_controller.api.login = Mock()
mock_controller.api.login.side_effect = aiounifi.AiounifiException
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 0
assert mock_controller.available is False
async def test_failed_update_unreachable_controller(hass, mock_controller):
"""Running update can handle a unreachable controller."""
mock_controller.mock_client_responses.append([CLIENT_1, CLIENT_2])
mock_controller.mock_device_responses.append([DEVICE_1])
await setup_controller(hass, mock_controller)
mock_controller.api.clients.update = Mock()
mock_controller.api.clients.update.side_effect = aiounifi.AiounifiException
# Calling a service will trigger the updates to run
await hass.services.async_call(
"switch", "turn_off", {"entity_id": "switch.poe_client_1"}, blocking=True
)
assert len(mock_controller.mock_requests) == 3
assert len(hass.states.async_all()) == 3
assert mock_controller.available is False
async def test_ignore_multiple_poe_clients_on_same_port(hass, mock_controller):
"""Ignore when there are multiple POE driven clients on same port.
If there is a non-UniFi switch powered by POE,
clients will be transparently marked as having POE as well.
"""
mock_controller.mock_client_responses.append(POE_SWITCH_CLIENTS)
mock_controller.mock_device_responses.append([DEVICE_1])
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
# 1 All Lights group, 2 lights
assert len(hass.states.async_all()) == 0
switch_1 = hass.states.get("switch.poe_client_1")
switch_2 = hass.states.get("switch.poe_client_2")
assert switch_1 is None
assert switch_2 is None
async def test_restoring_client(hass, mock_controller):
"""Test the update_items function with some clients."""
mock_controller.mock_client_responses.append([CLIENT_2])
mock_controller.mock_device_responses.append([DEVICE_1])
mock_controller.mock_client_all_responses.append([CLIENT_1])
mock_controller.unifi_config = {unifi.CONF_BLOCK_CLIENT: ["random mac"]}
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
switch.DOMAIN,
unifi.DOMAIN,
"poe-{}".format(CLIENT_1["mac"]),
suggested_object_id=CLIENT_1["hostname"],
config_entry_id=1,
)
registry.async_get_or_create(
switch.DOMAIN,
unifi.DOMAIN,
"poe-{}".format(CLIENT_2["mac"]),
suggested_object_id=CLIENT_2["hostname"],
config_entry_id=1,
)
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 3
assert len(hass.states.async_all()) == 3
device_1 = hass.states.get("switch.client_1")
assert device_1 is not None
|
"""
Identifier Class for ip-reverse-dns
"""
import dns
import ipaddress
import re
import sre_constants
from ...iso8601 import *
from ...psjson import *
from ...pstime import *
from ...stringmatcher import *
data_validator = {
"type": "object",
"properties": {
"match": { "$ref": "#/pScheduler/StringMatch" },
"timeout": { "$ref": "#/pScheduler/Duration" }
},
"additionalProperties": False,
"required": [ "match", "timeout" ]
}
def data_is_valid(data):
"""Check to see if data is valid for this class. Returns a tuple of
(bool, string) indicating valididty and any error message.
"""
return json_validate(data, data_validator)
class IdentifierIPReverseDNS(object):
"""
Class that does reverse DNS identification
"""
def __init__(self,
data # Data suitable for this class
):
valid, message = data_is_valid(data)
if not valid:
raise ValueError("Invalid data: %s" % message)
self.matcher = StringMatcher(data['match'])
timeout = timedelta_as_seconds(
iso8601_as_timedelta(data['timeout']))
self.resolver = dns.resolver.Resolver()
self.resolver.timeout = timeout
self.resolver.lifetime = timeout
def evaluate(self,
hints # Information used for doing identification
):
"""Given a set of hints, evaluate this identifier and return True if
an identification is made.
"""
try:
ip = hints['requester']
except KeyError:
return False
addr = ipaddress.ip_address(str(ip))
ip_reverse = dns.reversename.from_address(ip)
# Resolve to a FQDN
try:
reverse = str(self.resolver.query(ip_reverse, 'PTR')[0])
except (dns.resolver.NXDOMAIN,
dns.exception.Timeout,
dns.resolver.NoAnswer,
dns.resolver.NoNameservers):
return False
# Resolve the FQDN back to an IP and see if they match. This
# prevents someone in control over their reverse resolution
# from claiming they're someone they're not.
# TODO: Check against _all_ returned IPs
record = 'A' if addr.version == 4 else 'AAAA'
try:
forwards = self.resolver.query(reverse, record)
except (dns.resolver.NXDOMAIN,
dns.exception.Timeout,
dns.resolver.NoAnswer,
dns.resolver.NoNameservers):
return False
if ip not in [ str(f) for f in forwards ]:
return False
# Try to match with and without the dot at the end.
for reverse_candidate in [ reverse, reverse.rstrip('.') ]:
if self.matcher.matches(reverse_candidate):
return True
# No match, no dice.
return False
|
import os
import yaml
from twisted.trial.unittest import SkipTest
from flocker.node.agents.test.test_blockdevice import (
make_iblockdeviceapi_tests)
from openvstorage_flocker_plugin.openvstorage_blockdevice import (
OpenvStorageBlockDeviceAPI)
def read_config():
config_file = os.getenv('VPOOL_FLOCKER_CONFIG_FILE',
'/etc/flocker/agent.yml')
with open(config_file) as fh:
config = yaml.load(fh.read())
return config['dataset']['vpool_conf_file']
raise SkipTest('Could not open config file')
def openvstorageblockdeviceapi_for_test(test_case):
conf = read_config()
ovsapi = OpenvStorageBlockDeviceAPI(conf)
test_case.addCleanup(ovsapi.destroy_all_flocker_volumes)
return ovsapi
class OpenvStorageBlockDeviceAPIInterfaceTests(
make_iblockdeviceapi_tests(
blockdevice_api_factory=(
lambda test_case: openvstorageblockdeviceapi_for_test(
test_case=test_case,
)
),
minimum_allocatable_size=int(1024*1024*1024),
device_allocation_unit=int(1024 * 1024),
unknown_blockdevice_id_factory=lambda test: u"voldrv-00000000",
)
):
"""
Acceptance tests for the OpenvStorage driver.
"""
|
"""
Created on Sat Mar 25 20:42:44 2017
@author: Yefee
"""
from .share_constant import *
sday = SHR_CONST_SDAY
omega = SHR_CONST_OMEGA
rearth = SHR_CONST_REARTH
g = SHR_CONST_G
stebol = SHR_CONST_STEBOL
boltz = SHR_CONST_BOLTZ
avogad = SHR_CONST_AVOGAD
rgas = SHR_CONST_RGAS
mwdair = SHR_CONST_MWDAIR
mwwv = SHR_CONST_MWWV
rdair = SHR_CONST_RDAIR
rwv = SHR_CONST_RWV
zvir = SHR_CONST_ZVIR
karman = SHR_CONST_KARMAN
tkfrz = SHR_CONST_TKFRZ
tktrip = SHR_CONST_TKTRIP
rhoair = SHR_CONST_RHODAIR
rhofw = SHR_CONST_RHOFW
rhosw = SHR_CONST_RHOSW
rhoice = SHR_CONST_RHOICE
cpdair = SHR_CONST_CPDAIR
cpfw = SHR_CONST_CPFW
cpsw = SHR_CONST_CPSW
cpwv = SHR_CONST_CPWV
cpice = SHR_CONST_CPICE
latice = SHR_CONST_LATICE
latvap = SHR_CONST_LATVAP
latsub = SHR_CONST_LATSUB
ocn_ref_sal = SHR_CONST_OCN_REF_SAL
ice_ref_sal = SHR_CONST_ICE_REF_SAL
cappa = (SHR_CONST_RGAS/SHR_CONST_MWDAIR)/SHR_CONST_CPDAIR #! R/Cp
|
import os
from pants.backend.native.targets.external_native_library import ExternalNativeLibrary
from pants.backend.native.tasks.conan_fetch import ConanFetch
from pants.backend.native.tasks.conan_prep import ConanPrep
from pants.testutil.task_test_base import TaskTestBase
class ConanFetchTest(TaskTestBase):
@classmethod
def task_type(cls):
return ConanFetch
def test_conan_pex_noop(self):
"""Test that the conan pex is not generated if there are no conan libraries to fetch."""
conan_prep_task_type = self.synthesize_task_subtype(ConanPrep, "conan_prep_scope")
context = self.context(for_task_types=[conan_prep_task_type])
conan_prep = conan_prep_task_type(context, os.path.join(self.pants_workdir, "conan_prep"))
conan_prep.execute()
self.assertIsNone(context.products.get_data(ConanPrep.tool_instance_cls))
def test_rewrites_remotes_according_to_options(self):
self.set_options(conan_remotes={"pants-conan": "https://conan.bintray.com"})
conan_prep_task_type = self.synthesize_task_subtype(ConanPrep, "conan_prep_scope")
# We need at least one library to resolve here so that the conan pex is generated.
dummy_target = self.make_target(
spec="//:dummy-conan-3rdparty-lib", target_type=ExternalNativeLibrary, packages=[]
)
context = self.context(for_task_types=[conan_prep_task_type], target_roots=[dummy_target])
conan_prep = conan_prep_task_type(context, os.path.join(self.pants_workdir, "conan_prep"))
conan_fetch = self.create_task(context, os.path.join(self.pants_workdir, "conan_fetch"))
conan_prep.execute()
conan_fetch.execute()
conan_pex = context.products.get_data(ConanPrep.tool_instance_cls)
user_home = conan_fetch._conan_user_home(conan_pex, in_workdir=True)
(stdout, stderr, exit_code, _) = conan_pex.output(
["remote", "list"], env={"CONAN_USER_HOME": user_home}
)
self.assertEqual(0, exit_code)
self.assertEqual(stdout, "pants-conan: https://conan.bintray.com [Verify SSL: True]\n")
|
"""
Tests For Capacity Weigher.
"""
import mock
from oslo_config import cfg
from cinder import context
from cinder.openstack.common.scheduler.weights import HostWeightHandler
from cinder.scheduler.weights.capacity import CapacityWeigher
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.volume import utils
CONF = cfg.CONF
class CapacityWeigherTestCase(test.TestCase):
def setUp(self):
super(CapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = HostWeightHandler('cinder.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects([CapacityWeigher],
hosts,
weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):
ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic,
disabled=disabled)
host_states = self.host_manager.get_all_host_states(ctxt)
_mock_service_get_all_by_topic.assert_called_once_with(
ctxt, CONF.volume_topic, disabled=disabled)
return host_states
# If thin_provisioning_support = False, use the following formula:
# free = free_space - math.floor(total * reserved)
# Otherwise, use the following formula:
# free = (total * host_state.max_over_subscription_ratio
# - host_state.provisioned_capacity_gb
# - math.floor(total * reserved))
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=1024-math.floor(1024*0.1)=922
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=2048*1.5-1748-math.floor(2048*0.1)=1120
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=256-512*0=256
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=2048*1.0-2047-math.floor(2048*0.05)=-101
# host5: free_capacity_gb=unknown free=-1
# so, host2 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1120.0)
self.assertEqual(
utils.extract_host(weighed_host.obj.host), 'host2')
def test_capacity_weight_multiplier1(self):
self.flags(capacity_weight_multiplier=-1.0)
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=-(1024-math.floor(1024*0.1))=-922
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=-(256-512*0)=-256
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=-(2048*1.0-2047-math.floor(2048*0.05))=101
# host5: free_capacity_gb=unknown free=-float('inf')
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 101.0)
self.assertEqual(
utils.extract_host(weighed_host.obj.host), 'host4')
def test_capacity_weight_multiplier2(self):
self.flags(capacity_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))*2=1844
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)*2=512
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
# host5: free_capacity_gb=unknown free=-2
# so, host2 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1120.0 * 2)
self.assertEqual(
utils.extract_host(weighed_host.obj.host), 'host2')
|
import comtypes
import ctypes
from comtypes import client
from ctypes import wintypes
VDS_QUERY_SOFTWARE_PROVIDERS = 1
VDS_DET_FREE = 1
CLSID_VdsLoader = '{9C38ED61-D565-4728-AEEE-C80952F0ECDE}'
msvcrt = ctypes.cdll.msvcrt
msvcrt.memcmp.restype = ctypes.c_int
msvcrt.memcmp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint]
class GUID(ctypes.Structure):
_fields_ = [
("data1", ctypes.wintypes.DWORD),
("data2", ctypes.wintypes.WORD),
("data3", ctypes.wintypes.WORD),
("data4", ctypes.c_byte * 8)]
def __eq__(self, other):
if type(other) != GUID:
return False
return not msvcrt.memcmp(ctypes.addressof(self),
ctypes.addressof(other),
ctypes.sizeof(GUID))
def __ne__(self, other):
return not self.__eq__(other)
class VDS_DISK_PROP_SWITCH_TYPE(ctypes.Union):
_fields_ = [
("dwSignature", wintypes.DWORD),
("DiskGuid", GUID),
]
class VDS_DISK_PROP(ctypes.Structure):
_fields_ = [
("id", GUID),
("status", ctypes.c_int),
("ReserveMode", ctypes.c_int),
("health", ctypes.c_int),
("dwDeviceType", wintypes.DWORD),
("dwMediaType", wintypes.DWORD),
("ullSize", wintypes.ULARGE_INTEGER),
("ulBytesPerSector", wintypes.ULONG),
("ulSectorsPerTrack", wintypes.ULONG),
("ulTracksPerCylinder", wintypes.ULONG),
("ulFlags", wintypes.ULONG),
("BusType", ctypes.c_int),
("PartitionStyle", ctypes.c_int),
("switch_type", VDS_DISK_PROP_SWITCH_TYPE),
("pwszDiskAddress", wintypes.c_void_p),
("pwszName", wintypes.c_void_p),
("pwszFriendlyName", wintypes.c_void_p),
("pwszAdaptorName", wintypes.c_void_p),
("pwszDevicePath", wintypes.c_void_p),
]
class VDS_DISK_EXTENT(ctypes.Structure):
_fields_ = [
("diskId", GUID),
("type", ctypes.c_int),
("ullOffset", wintypes.ULARGE_INTEGER),
("ullSize", wintypes.ULARGE_INTEGER),
("volumeId", GUID),
("plexId", GUID),
("memberIdx", wintypes.ULONG),
]
class VDS_VOLUME_PROP(ctypes.Structure):
_fields_ = [
("id", GUID),
("type", ctypes.c_int),
("status", ctypes.c_int),
("health", ctypes.c_int),
("TransitionState", ctypes.c_int),
("ullSize", wintypes.ULARGE_INTEGER),
("ulFlags", wintypes.ULONG),
("RecommendedFileSystemType", ctypes.c_int),
("pwszName", wintypes.c_void_p),
]
class VDS_INPUT_DISK(ctypes.Structure):
_fields_ = [
("diskId", GUID),
("ullSize", wintypes.ULARGE_INTEGER),
("plexId", GUID),
("memberIdx", wintypes.ULONG),
]
class VDS_ASYNC_OUTPUT_cp(ctypes.Structure):
_fields_ = [
("ullOffset", wintypes.ULARGE_INTEGER),
("volumeId", GUID),
]
class VDS_ASYNC_OUTPUT_cv(ctypes.Structure):
_fields_ = [
("pVolumeUnk", wintypes.ULARGE_INTEGER),
]
class VDS_ASYNC_OUTPUT_bvp(ctypes.Structure):
_fields_ = [
("pVolumeUnk", ctypes.POINTER(comtypes.IUnknown)),
]
class VDS_ASYNC_OUTPUT_sv(ctypes.Structure):
_fields_ = [
("ullReclaimedBytes", wintypes.ULARGE_INTEGER),
]
class VDS_ASYNC_OUTPUT_cl(ctypes.Structure):
_fields_ = [
("pLunUnk", ctypes.POINTER(comtypes.IUnknown)),
]
class VDS_ASYNC_OUTPUT_ct(ctypes.Structure):
_fields_ = [
("pTargetUnk", ctypes.POINTER(comtypes.IUnknown)),
]
class VDS_ASYNC_OUTPUT_cpg(ctypes.Structure):
_fields_ = [
("pPortalGroupUnk", ctypes.POINTER(comtypes.IUnknown)),
]
class VDS_ASYNC_OUTPUT_SWITCH_TYPE(ctypes.Union):
_fields_ = [
("cp", VDS_ASYNC_OUTPUT_cp),
("cv", VDS_ASYNC_OUTPUT_cv),
("bvp", VDS_ASYNC_OUTPUT_bvp),
("sv", VDS_ASYNC_OUTPUT_sv),
("cl", VDS_ASYNC_OUTPUT_cl),
("ct", VDS_ASYNC_OUTPUT_ct),
("cpg", VDS_ASYNC_OUTPUT_cpg),
]
class VDS_ASYNC_OUTPUT(ctypes.Structure):
_fields_ = [
("type", ctypes.c_int),
("switch_type", VDS_ASYNC_OUTPUT_SWITCH_TYPE),
]
class IEnumVdsObject(comtypes.IUnknown):
_iid_ = comtypes.GUID("{118610b7-8d94-4030-b5b8-500889788e4e}")
_methods_ = [
comtypes.COMMETHOD([], comtypes.HRESULT, 'Next',
(['in'], wintypes.ULONG, 'celt'),
(['out'], ctypes.POINTER(ctypes.POINTER(
comtypes.IUnknown)),
'ppObjectArray'),
(['out'], ctypes.POINTER(wintypes.ULONG),
'pcFetched')),
]
class IVdsService(comtypes.IUnknown):
_iid_ = comtypes.GUID("{0818a8ef-9ba9-40d8-a6f9-e22833cc771e}")
_methods_ = [
comtypes.COMMETHOD([], comtypes.HRESULT, 'IsServiceReady'),
comtypes.COMMETHOD([], comtypes.HRESULT, 'WaitForServiceReady'),
comtypes.COMMETHOD([], comtypes.HRESULT, 'GetProperties',
(['out'], ctypes.c_void_p, 'pServiceProp')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'QueryProviders',
(['in'], wintypes.DWORD, 'masks'),
(['out'],
ctypes.POINTER(ctypes.POINTER(IEnumVdsObject)),
'ppEnum'))
]
class IVdsServiceLoader(comtypes.IUnknown):
_iid_ = comtypes.GUID("{e0393303-90d4-4a97-ab71-e9b671ee2729}")
_methods_ = [
comtypes.COMMETHOD([], comtypes.HRESULT, 'LoadService',
(['in'], wintypes.LPCWSTR, 'pwszMachineName'),
(['out'],
ctypes.POINTER(ctypes.POINTER(IVdsService)),
'ppService'))
]
class IVdsSwProvider(comtypes.IUnknown):
_iid_ = comtypes.GUID("{9aa58360-ce33-4f92-b658-ed24b14425b8}")
_methods_ = [
comtypes.COMMETHOD([], comtypes.HRESULT, 'QueryPacks',
(['out'],
ctypes.POINTER(ctypes.POINTER(IEnumVdsObject)),
'ppEnum'))
]
class IVdsPack(comtypes.IUnknown):
_iid_ = comtypes.GUID("{3b69d7f5-9d94-4648-91ca-79939ba263bf}")
_methods_ = [
comtypes.COMMETHOD([], comtypes.HRESULT, 'GetProperties',
(['out'], ctypes.c_void_p, 'pPackProp')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'GetProvider',
(['out'],
ctypes.POINTER(ctypes.POINTER(comtypes.IUnknown)),
'ppProvider')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'QueryVolumes',
(['out'],
ctypes.POINTER(ctypes.POINTER(IEnumVdsObject)),
'ppEnum')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'QueryDisks',
(['out'],
ctypes.POINTER(ctypes.POINTER(IEnumVdsObject)),
'ppEnum'))
]
class IVdsDisk(comtypes.IUnknown):
_iid_ = comtypes.GUID("{07e5c822-f00c-47a1-8fce-b244da56fd06}")
_methods_ = [
comtypes.COMMETHOD([], comtypes.HRESULT, 'GetProperties',
(['out'], ctypes.POINTER(VDS_DISK_PROP),
'pDiskProperties')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'GetPack',
(['out'], ctypes.POINTER(ctypes.POINTER(IVdsPack)),
'ppPack')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'GetIdentificationData',
(['out'], ctypes.c_void_p, 'pLunInfo')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'QueryExtents',
(['out'], ctypes.POINTER(ctypes.POINTER(
VDS_DISK_EXTENT)),
'ppExtentArray'),
(['out'], ctypes.POINTER(wintypes.LONG),
'plNumberOfExtents')),
]
class IVdsAsync(comtypes.IUnknown):
_iid_ = comtypes.GUID("{d5d23b6d-5a55-4492-9889-397a3c2d2dbc}")
_methods_ = [
comtypes.COMMETHOD([], comtypes.HRESULT, 'Cancel'),
comtypes.COMMETHOD([], comtypes.HRESULT, 'Wait',
(['out'], ctypes.POINTER(
wintypes.HRESULT), 'pHrResult'),
(['out'], ctypes.POINTER(VDS_ASYNC_OUTPUT),
'pAsyncOut')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'QueryStatus',
(['out'], ctypes.POINTER(
wintypes.HRESULT), 'pHrResult'),
(['out'], ctypes.POINTER(wintypes.ULONG),
'pulPercentCompleted')),
]
class IVdsVolume(comtypes.IUnknown):
_iid_ = comtypes.GUID("{88306bb2-e71f-478c-86a2-79da200a0f11}")
_methods_ = [
comtypes.COMMETHOD([], comtypes.HRESULT, 'GetProperties',
(['out'], ctypes.POINTER(VDS_VOLUME_PROP),
'pVolumeProperties')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'GetPack',
(['out'], ctypes.POINTER(ctypes.POINTER(IVdsPack)),
'ppPack')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'QueryPlexes',
(['out'],
ctypes.POINTER(ctypes.POINTER(IEnumVdsObject)),
'ppEnum')),
comtypes.COMMETHOD([], comtypes.HRESULT, 'Extend',
(['in'], ctypes.POINTER(
VDS_INPUT_DISK), 'pInputDiskArray'),
(['in'], wintypes.LONG, 'lNumberOfDisks'),
(['out'], ctypes.POINTER(
ctypes.POINTER(IVdsAsync)), 'ppAsync'),
),
comtypes.COMMETHOD([], comtypes.HRESULT, 'Shrink',
(['in'], wintypes.ULARGE_INTEGER,
'ullNumberOfBytesToRemove'),
(['out'], ctypes.POINTER(ctypes.POINTER(IVdsAsync)),
'ppAsync')),
]
def load_vds_service():
loader = client.CreateObject(CLSID_VdsLoader, interface=IVdsServiceLoader)
svc = loader.LoadService(None)
svc.WaitForServiceReady()
return svc
|
""" Cisco_IOS_XR_ipv6_acl_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv6\-acl package operational data.
This module contains definitions
for the following management objects\:
ipv6\-acl\-and\-prefix\-list\: Root class of IPv6 Oper schema tree
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AclAce1Enum(Enum):
"""
AclAce1Enum
ACE Types
.. data:: normal = 0
This is Normal ACE
.. data:: remark = 1
This is Remark ACE
.. data:: abf = 2
This is ABF ACE
"""
normal = 0
remark = 1
abf = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclAce1Enum']
class AclAce1Enum(Enum):
"""
AclAce1Enum
ACE Types
.. data:: normal = 0
This is Normal ACE
.. data:: remark = 1
This is Remark ACE
.. data:: abf = 2
This is ABF ACE
"""
normal = 0
remark = 1
abf = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclAce1Enum']
class AclActionEnum(Enum):
"""
AclActionEnum
Acl action
.. data:: deny = 0
Deny
.. data:: permit = 1
Permit
.. data:: encrypt = 2
Encrypt
.. data:: bypass = 3
Bypass
.. data:: fallthrough = 4
Fallthrough
.. data:: invalid = 5
Invalid
"""
deny = 0
permit = 1
encrypt = 2
bypass = 3
fallthrough = 4
invalid = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclActionEnum']
class AclLogEnum(Enum):
"""
AclLogEnum
Acl log
.. data:: log_none = 0
Log None
.. data:: log = 1
Log Regular
.. data:: log_input = 2
Log Input
"""
log_none = 0
log = 1
log_input = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclLogEnum']
class AclPortOperatorEnum(Enum):
"""
AclPortOperatorEnum
Acl port operator
.. data:: none = 0
None
.. data:: eq = 1
Equal
.. data:: gt = 2
Greater than
.. data:: lt = 3
Less than
.. data:: neq = 4
Not Equal
.. data:: range = 5
Range
.. data:: onebyte = 8
One Byte
.. data:: twobytes = 9
Two Bytes
"""
none = 0
eq = 1
gt = 2
lt = 3
neq = 4
range = 5
onebyte = 8
twobytes = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclPortOperatorEnum']
class AclPortOperatorEnum(Enum):
"""
AclPortOperatorEnum
Acl port operator
.. data:: none = 0
None
.. data:: eq = 1
Equal
.. data:: gt = 2
Greater than
.. data:: lt = 3
Less than
.. data:: neq = 4
Not Equal
.. data:: range = 5
Range
.. data:: onebyte = 8
One Byte
.. data:: twobytes = 9
Two Bytes
"""
none = 0
eq = 1
gt = 2
lt = 3
neq = 4
range = 5
onebyte = 8
twobytes = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclPortOperatorEnum']
class AclPortOperatorEnum(Enum):
"""
AclPortOperatorEnum
Acl port operator
.. data:: none = 0
None
.. data:: eq = 1
Equal
.. data:: gt = 2
Greater than
.. data:: lt = 3
Less than
.. data:: neq = 4
Not Equal
.. data:: range = 5
Range
.. data:: onebyte = 8
One Byte
.. data:: twobytes = 9
Two Bytes
"""
none = 0
eq = 1
gt = 2
lt = 3
neq = 4
range = 5
onebyte = 8
twobytes = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclPortOperatorEnum']
class AclPortOperatorEnum(Enum):
"""
AclPortOperatorEnum
Acl port operator
.. data:: none = 0
None
.. data:: eq = 1
Equal
.. data:: gt = 2
Greater than
.. data:: lt = 3
Less than
.. data:: neq = 4
Not Equal
.. data:: range = 5
Range
.. data:: onebyte = 8
One Byte
.. data:: twobytes = 9
Two Bytes
"""
none = 0
eq = 1
gt = 2
lt = 3
neq = 4
range = 5
onebyte = 8
twobytes = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclPortOperatorEnum']
class AclTcpflagsOperatorEnum(Enum):
"""
AclTcpflagsOperatorEnum
Acl tcpflags operator
.. data:: match_none = 0
Match None
.. data:: match_all = 1
Match All
.. data:: match_any_old = 2
Match any old
.. data:: match_any = 3
Match any
"""
match_none = 0
match_all = 1
match_any_old = 2
match_any = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['AclTcpflagsOperatorEnum']
class BagAclNhAtStatusEnum(Enum):
"""
BagAclNhAtStatusEnum
Bag acl nh at status
.. data:: unknown = 0
AT State Unknown
.. data:: up = 1
AT State UP
.. data:: down = 2
AT State DOWN
.. data:: not_present = 3
AT State Not Present
.. data:: max = 4
invalid status
"""
unknown = 0
up = 1
down = 2
not_present = 3
max = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['BagAclNhAtStatusEnum']
class BagAclNhEnum(Enum):
"""
BagAclNhEnum
Bag acl nh
.. data:: nexthop_none = 0
Next Hop None
.. data:: nexthop_default = 1
Nexthop Default
.. data:: nexthop = 2
Nexthop
"""
nexthop_none = 0
nexthop_default = 1
nexthop = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['BagAclNhEnum']
class BagAclNhStatusEnum(Enum):
"""
BagAclNhStatusEnum
Bag acl nh status
.. data:: not_present = 0
State Not Present
.. data:: unknown = 1
State Unknown
.. data:: down = 2
State DOWN
.. data:: up = 3
State UP
.. data:: max = 4
invalid status
"""
not_present = 0
unknown = 1
down = 2
up = 3
max = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['BagAclNhStatusEnum']
class Ipv6AclAndPrefixList(object):
"""
Root class of IPv6 Oper schema tree
.. attribute:: access_list_manager
AccessListManager containing ACLs and prefix lists
**type**\: :py:class:`AccessListManager <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager>`
.. attribute:: oor
Out Of Resources, Limits to the resources allocatable
**type**\: :py:class:`Oor <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.access_list_manager = Ipv6AclAndPrefixList.AccessListManager()
self.access_list_manager.parent = self
self.oor = Ipv6AclAndPrefixList.Oor()
self.oor.parent = self
class AccessListManager(object):
"""
AccessListManager containing ACLs and prefix
lists
.. attribute:: accesses
ACL class displaying Usage and Entries
**type**\: :py:class:`Accesses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Accesses>`
.. attribute:: prefixes
Table of prefix lists
**type**\: :py:class:`Prefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Prefixes>`
.. attribute:: usages
Table of Usage statistics of ACLs at different nodes
**type**\: :py:class:`Usages <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Usages>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accesses = Ipv6AclAndPrefixList.AccessListManager.Accesses()
self.accesses.parent = self
self.prefixes = Ipv6AclAndPrefixList.AccessListManager.Prefixes()
self.prefixes.parent = self
self.usages = Ipv6AclAndPrefixList.AccessListManager.Usages()
self.usages.parent = self
class Prefixes(object):
"""
Table of prefix lists
.. attribute:: prefix
Name of the prefix list
**type**\: list of :py:class:`Prefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Prefixes.Prefix>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix = YList()
self.prefix.parent = self
self.prefix.name = 'prefix'
class Prefix(object):
"""
Name of the prefix list
.. attribute:: prefix_list_name <key>
Name of the prefix list
**type**\: str
**length:** 1..65
.. attribute:: prefix_list_sequences
Table of all the SequenceNumbers per prefix list
**type**\: :py:class:`PrefixListSequences <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.prefix_list_sequences = Ipv6AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences()
self.prefix_list_sequences.parent = self
class PrefixListSequences(object):
"""
Table of all the SequenceNumbers per prefix
list
.. attribute:: prefix_list_sequence
Sequence Number of a prefix list entry
**type**\: list of :py:class:`PrefixListSequence <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences.PrefixListSequence>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_sequence = YList()
self.prefix_list_sequence.parent = self
self.prefix_list_sequence.name = 'prefix_list_sequence'
class PrefixListSequence(object):
"""
Sequence Number of a prefix list entry
.. attribute:: sequence_number <key>
Sequence Number of the prefix list entry
**type**\: int
**range:** 1..2147483646
.. attribute:: acl_name
ACL Name
**type**\: str
.. attribute:: hits
Number of hits
**type**\: int
**range:** 0..4294967295
.. attribute:: is_ace_sequence_number
ACLE sequence number
**type**\: int
**range:** 0..4294967295
.. attribute:: is_ace_type
ACE type (acl, remark)
**type**\: :py:class:`AclAce1Enum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclAce1Enum>`
.. attribute:: is_address_in_numbers
IPv6 prefix
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: is_address_mask_length
Prefix length
**type**\: int
**range:** 0..4294967295
.. attribute:: is_comment_for_entry
Remark String
**type**\: str
.. attribute:: is_length_operator
Port Operator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclPortOperatorEnum>`
.. attribute:: is_packet_allow_or_deny
Grant value permit/deny
**type**\: :py:class:`AclActionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclActionEnum>`
.. attribute:: is_packet_maximum_length
Maximum length
**type**\: int
**range:** 0..4294967295
.. attribute:: is_packet_minimum_length
Min length
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sequence_number = None
self.acl_name = None
self.hits = None
self.is_ace_sequence_number = None
self.is_ace_type = None
self.is_address_in_numbers = None
self.is_address_mask_length = None
self.is_comment_for_entry = None
self.is_length_operator = None
self.is_packet_allow_or_deny = None
self.is_packet_maximum_length = None
self.is_packet_minimum_length = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv6-acl-oper:prefix-list-sequence[Cisco-IOS-XR-ipv6-acl-oper:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sequence_number is not None:
return True
if self.acl_name is not None:
return True
if self.hits is not None:
return True
if self.is_ace_sequence_number is not None:
return True
if self.is_ace_type is not None:
return True
if self.is_address_in_numbers is not None:
return True
if self.is_address_mask_length is not None:
return True
if self.is_comment_for_entry is not None:
return True
if self.is_length_operator is not None:
return True
if self.is_packet_allow_or_deny is not None:
return True
if self.is_packet_maximum_length is not None:
return True
if self.is_packet_minimum_length is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences.PrefixListSequence']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv6-acl-oper:prefix-list-sequences'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.prefix_list_sequence is not None:
for child_ref in self.prefix_list_sequence:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences']['meta_info']
@property
def _common_path(self):
if self.prefix_list_name is None:
raise YPYModelError('Key property prefix_list_name is None')
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:access-list-manager/Cisco-IOS-XR-ipv6-acl-oper:prefixes/Cisco-IOS-XR-ipv6-acl-oper:prefix[Cisco-IOS-XR-ipv6-acl-oper:prefix-list-name = ' + str(self.prefix_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.prefix_list_name is not None:
return True
if self.prefix_list_sequences is not None and self.prefix_list_sequences._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Prefixes.Prefix']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:access-list-manager/Cisco-IOS-XR-ipv6-acl-oper:prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.prefix is not None:
for child_ref in self.prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Prefixes']['meta_info']
class Usages(object):
"""
Table of Usage statistics of ACLs at different
nodes
.. attribute:: usage
Usage statistics of an ACL at a node
**type**\: list of :py:class:`Usage <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Usages.Usage>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.usage = YList()
self.usage.parent = self
self.usage.name = 'usage'
class Usage(object):
"""
Usage statistics of an ACL at a node
.. attribute:: access_list_name
Name of the ACL
**type**\: str
**length:** 1..65
.. attribute:: application_id
Application ID
**type**\: :py:class:`AclUsageAppIdEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_common_acl_datatypes.AclUsageAppIdEnumEnum>`
.. attribute:: node_name
Node where ACL is applied
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: usage_details
Usage Statistics Details
**type**\: str
**mandatory**\: True
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.application_id = None
self.node_name = None
self.usage_details = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:access-list-manager/Cisco-IOS-XR-ipv6-acl-oper:usages/Cisco-IOS-XR-ipv6-acl-oper:usage'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_list_name is not None:
return True
if self.application_id is not None:
return True
if self.node_name is not None:
return True
if self.usage_details is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Usages.Usage']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:access-list-manager/Cisco-IOS-XR-ipv6-acl-oper:usages'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.usage is not None:
for child_ref in self.usage:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Usages']['meta_info']
class Accesses(object):
"""
ACL class displaying Usage and Entries
.. attribute:: access
Name of the Access List
**type**\: list of :py:class:`Access <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Accesses.Access>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access = YList()
self.access.parent = self
self.access.name = 'access'
class Access(object):
"""
Name of the Access List
.. attribute:: access_list_name <key>
Name of the Access List
**type**\: str
**length:** 1..65
.. attribute:: access_list_sequences
Table of all the sequence numbers per ACL
**type**\: :py:class:`AccessListSequences <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.access_list_sequences = Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences()
self.access_list_sequences.parent = self
class AccessListSequences(object):
"""
Table of all the sequence numbers per ACL
.. attribute:: access_list_sequence
Sequence number of an ACL entry
**type**\: list of :py:class:`AccessListSequence <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_sequence = YList()
self.access_list_sequence.parent = self
self.access_list_sequence.name = 'access_list_sequence'
class AccessListSequence(object):
"""
Sequence number of an ACL entry
.. attribute:: sequence_number <key>
ACL entry sequence number
**type**\: int
**range:** 1..2147483646
.. attribute:: acl_name
ACL Name
**type**\: str
.. attribute:: capture
Capture option, TRUE if enabled
**type**\: bool
.. attribute:: counter_name
Counter name
**type**\: str
.. attribute:: destination_mask
Destination Mask
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_port_group
Destination port object\-group
**type**\: str
.. attribute:: destination_prefix_group
Destination prefix object\-group
**type**\: str
.. attribute:: hits
hits
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: hw_next_hop_info
HW Next hop info
**type**\: :py:class:`HwNextHopInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo>`
.. attribute:: is_ace_sequence_number
ACLE sequence number
**type**\: int
**range:** 0..4294967295
.. attribute:: is_ace_type
ACE type (acl, remark)
**type**\: :py:class:`AclAce1Enum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclAce1Enum>`
.. attribute:: is_comment_for_entry
IsCommentForEntry
**type**\: str
.. attribute:: is_destination_address_in_numbers
IsDestinationAddressInNumbers
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: is_destination_address_prefix_length
IsDestinationAddressPrefixLength
**type**\: int
**range:** 0..4294967295
.. attribute:: is_destination_operator
eq, ne, lt, etc..
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclPortOperatorEnum>`
.. attribute:: is_destination_port1
IsDestinationPort1
**type**\: int
**range:** 0..4294967295
.. attribute:: is_destination_port2
IsDestinationPort2
**type**\: int
**range:** 0..4294967295
.. attribute:: is_dscp_present
IsDSCPPresent
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: is_dscp_valu
IsDSCPValu
**type**\: int
**range:** 0..4294967295
.. attribute:: is_flow_id
IsFlowId
**type**\: int
**range:** 0..4294967295
.. attribute:: is_header_matches
Match if routing header is presant
**type**\: int
**range:** 0..4294967295
.. attribute:: is_icmp_message_off
Don't generate the icmp message
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: is_ipv6_protocol2_type
Protocol 2
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: is_ipv6_protocol_type
Protocol 1
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: is_log_option
IsLogOption
**type**\: :py:class:`AclLogEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclLogEnum>`
.. attribute:: is_packet_allow_or_deny
Grant value permit/deny
**type**\: :py:class:`AclActionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclActionEnum>`
.. attribute:: is_packet_length_end
IsPacketLengthEnd
**type**\: int
**range:** 0..4294967295
.. attribute:: is_packet_length_operator
Match if routing header is presant
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclPortOperatorEnum>`
.. attribute:: is_packet_length_start
IsPacketLengthStart
**type**\: int
**range:** 0..4294967295
.. attribute:: is_precedence_present
IsPrecedencePresent
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: is_precedence_value
range from 0 to 7
**type**\: int
**range:** 0..4294967295
.. attribute:: is_protocol_operator
Protocol operator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclPortOperatorEnum>`
.. attribute:: is_source_address_in_numbers
IsSourceAddressInNumbers
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: is_source_address_prefix_length
IsSourceAddressPrefixLength
**type**\: int
**range:** 0..4294967295
.. attribute:: is_source_operator
eq, ne, lt, etc..
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclPortOperatorEnum>`
.. attribute:: is_source_port1
IsSourcePort1
**type**\: int
**range:** 0..4294967295
.. attribute:: is_source_port2
IsSourcePort2
**type**\: int
**range:** 0..4294967295
.. attribute:: is_tcp_bits
IsTCPBits
**type**\: int
**range:** 0..4294967295
.. attribute:: is_tcp_bits_mask
IsTCPBitsMask
**type**\: int
**range:** 0..4294967295
.. attribute:: is_tcp_bits_operator
IsTCPBitsOperator
**type**\: :py:class:`AclTcpflagsOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclTcpflagsOperatorEnum>`
.. attribute:: is_time_to_live_end
IsTimeToLiveEnd
**type**\: int
**range:** 0..4294967295
.. attribute:: is_time_to_live_operator
IsTimeToLiveOperator
**type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.AclPortOperatorEnum>`
.. attribute:: is_time_to_live_start
IsTimeToLiveStart
**type**\: int
**range:** 0..4294967295
.. attribute:: next_hop_info
Next hop info
**type**\: list of :py:class:`NextHopInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.NextHopInfo>`
.. attribute:: next_hop_type
Next hop type
**type**\: :py:class:`BagAclNhEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.BagAclNhEnum>`
.. attribute:: no_stats
no stats
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: qos_group
Set qos\-group
**type**\: int
**range:** 0..65535
.. attribute:: sequence_str
Sequence String
**type**\: str
.. attribute:: source_mask
Source Mask
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: source_port_group
Source port object\-group
**type**\: str
.. attribute:: source_prefix_group
Source prefix object\-group
**type**\: str
.. attribute:: udf
UDF
**type**\: list of :py:class:`Udf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.Udf>`
.. attribute:: undetermined_transport
Undetermined transport option, TRUE if enabled
**type**\: bool
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sequence_number = None
self.acl_name = None
self.capture = None
self.counter_name = None
self.destination_mask = None
self.destination_port_group = None
self.destination_prefix_group = None
self.hits = None
self.hw_next_hop_info = Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo()
self.hw_next_hop_info.parent = self
self.is_ace_sequence_number = None
self.is_ace_type = None
self.is_comment_for_entry = None
self.is_destination_address_in_numbers = None
self.is_destination_address_prefix_length = None
self.is_destination_operator = None
self.is_destination_port1 = None
self.is_destination_port2 = None
self.is_dscp_present = None
self.is_dscp_valu = None
self.is_flow_id = None
self.is_header_matches = None
self.is_icmp_message_off = None
self.is_ipv6_protocol2_type = None
self.is_ipv6_protocol_type = None
self.is_log_option = None
self.is_packet_allow_or_deny = None
self.is_packet_length_end = None
self.is_packet_length_operator = None
self.is_packet_length_start = None
self.is_precedence_present = None
self.is_precedence_value = None
self.is_protocol_operator = None
self.is_source_address_in_numbers = None
self.is_source_address_prefix_length = None
self.is_source_operator = None
self.is_source_port1 = None
self.is_source_port2 = None
self.is_tcp_bits = None
self.is_tcp_bits_mask = None
self.is_tcp_bits_operator = None
self.is_time_to_live_end = None
self.is_time_to_live_operator = None
self.is_time_to_live_start = None
self.next_hop_info = YList()
self.next_hop_info.parent = self
self.next_hop_info.name = 'next_hop_info'
self.next_hop_type = None
self.no_stats = None
self.qos_group = None
self.sequence_str = None
self.source_mask = None
self.source_port_group = None
self.source_prefix_group = None
self.udf = YList()
self.udf.parent = self
self.udf.name = 'udf'
self.undetermined_transport = None
class HwNextHopInfo(object):
"""
HW Next hop info
.. attribute:: next_hop
The Next Hop
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: table_id
Table ID
**type**\: int
**range:** 0..4294967295
.. attribute:: type
The next\-hop type
**type**\: :py:class:`BagAclNhEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.BagAclNhEnum>`
.. attribute:: vrf_name
Vrf Name
**type**\: str
**length:** 0..32
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.next_hop = None
self.table_id = None
self.type = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv6-acl-oper:hw-next-hop-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.next_hop is not None:
return True
if self.table_id is not None:
return True
if self.type is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo']['meta_info']
class NextHopInfo(object):
"""
Next hop info
.. attribute:: acl_nh_exist
The nexthop exist
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: at_status
The next hop at status
**type**\: :py:class:`BagAclNhAtStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.BagAclNhAtStatusEnum>`
.. attribute:: next_hop
The next hop
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: status
The next hop status
**type**\: :py:class:`BagAclNhStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.BagAclNhStatusEnum>`
.. attribute:: track_name
Track name
**type**\: str
**length:** 0..33
.. attribute:: vrf_name
Vrf Name
**type**\: str
**length:** 0..32
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acl_nh_exist = None
self.at_status = None
self.next_hop = None
self.status = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv6-acl-oper:next-hop-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acl_nh_exist is not None:
return True
if self.at_status is not None:
return True
if self.next_hop is not None:
return True
if self.status is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.NextHopInfo']['meta_info']
class Udf(object):
"""
UDF
.. attribute:: udf_mask
UDF Mask
**type**\: int
**range:** 0..4294967295
.. attribute:: udf_name
UDF Name
**type**\: str
**length:** 0..17
.. attribute:: udf_value
UDF Value
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.udf_mask = None
self.udf_name = None
self.udf_value = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv6-acl-oper:udf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.udf_mask is not None:
return True
if self.udf_name is not None:
return True
if self.udf_value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.Udf']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv6-acl-oper:access-list-sequence[Cisco-IOS-XR-ipv6-acl-oper:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sequence_number is not None:
return True
if self.acl_name is not None:
return True
if self.capture is not None:
return True
if self.counter_name is not None:
return True
if self.destination_mask is not None:
return True
if self.destination_port_group is not None:
return True
if self.destination_prefix_group is not None:
return True
if self.hits is not None:
return True
if self.hw_next_hop_info is not None and self.hw_next_hop_info._has_data():
return True
if self.is_ace_sequence_number is not None:
return True
if self.is_ace_type is not None:
return True
if self.is_comment_for_entry is not None:
return True
if self.is_destination_address_in_numbers is not None:
return True
if self.is_destination_address_prefix_length is not None:
return True
if self.is_destination_operator is not None:
return True
if self.is_destination_port1 is not None:
return True
if self.is_destination_port2 is not None:
return True
if self.is_dscp_present is not None:
return True
if self.is_dscp_valu is not None:
return True
if self.is_flow_id is not None:
return True
if self.is_header_matches is not None:
return True
if self.is_icmp_message_off is not None:
return True
if self.is_ipv6_protocol2_type is not None:
return True
if self.is_ipv6_protocol_type is not None:
return True
if self.is_log_option is not None:
return True
if self.is_packet_allow_or_deny is not None:
return True
if self.is_packet_length_end is not None:
return True
if self.is_packet_length_operator is not None:
return True
if self.is_packet_length_start is not None:
return True
if self.is_precedence_present is not None:
return True
if self.is_precedence_value is not None:
return True
if self.is_protocol_operator is not None:
return True
if self.is_source_address_in_numbers is not None:
return True
if self.is_source_address_prefix_length is not None:
return True
if self.is_source_operator is not None:
return True
if self.is_source_port1 is not None:
return True
if self.is_source_port2 is not None:
return True
if self.is_tcp_bits is not None:
return True
if self.is_tcp_bits_mask is not None:
return True
if self.is_tcp_bits_operator is not None:
return True
if self.is_time_to_live_end is not None:
return True
if self.is_time_to_live_operator is not None:
return True
if self.is_time_to_live_start is not None:
return True
if self.next_hop_info is not None:
for child_ref in self.next_hop_info:
if child_ref._has_data():
return True
if self.next_hop_type is not None:
return True
if self.no_stats is not None:
return True
if self.qos_group is not None:
return True
if self.sequence_str is not None:
return True
if self.source_mask is not None:
return True
if self.source_port_group is not None:
return True
if self.source_prefix_group is not None:
return True
if self.udf is not None:
for child_ref in self.udf:
if child_ref._has_data():
return True
if self.undetermined_transport is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv6-acl-oper:access-list-sequences'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_list_sequence is not None:
for child_ref in self.access_list_sequence:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences']['meta_info']
@property
def _common_path(self):
if self.access_list_name is None:
raise YPYModelError('Key property access_list_name is None')
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:access-list-manager/Cisco-IOS-XR-ipv6-acl-oper:accesses/Cisco-IOS-XR-ipv6-acl-oper:access[Cisco-IOS-XR-ipv6-acl-oper:access-list-name = ' + str(self.access_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_list_name is not None:
return True
if self.access_list_sequences is not None and self.access_list_sequences._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Accesses.Access']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:access-list-manager/Cisco-IOS-XR-ipv6-acl-oper:accesses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access is not None:
for child_ref in self.access:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager.Accesses']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:access-list-manager'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.accesses is not None and self.accesses._has_data():
return True
if self.prefixes is not None and self.prefixes._has_data():
return True
if self.usages is not None and self.usages._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.AccessListManager']['meta_info']
class Oor(object):
"""
Out Of Resources, Limits to the resources
allocatable
.. attribute:: access_list_summary
Resource Limits pertaining to ACLs only
**type**\: :py:class:`AccessListSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.AccessListSummary>`
.. attribute:: details
Details of the overall out of resource limit
**type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.Details>`
.. attribute:: oor_accesses
Resource occupation details for ACLs
**type**\: :py:class:`OorAccesses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.OorAccesses>`
.. attribute:: oor_prefixes
Resource occupation details for prefix lists
**type**\: :py:class:`OorPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.OorPrefixes>`
.. attribute:: prefix_list_summary
Summary of the prefix Lists resource utilization
**type**\: :py:class:`PrefixListSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.PrefixListSummary>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_summary = Ipv6AclAndPrefixList.Oor.AccessListSummary()
self.access_list_summary.parent = self
self.details = Ipv6AclAndPrefixList.Oor.Details()
self.details.parent = self
self.oor_accesses = Ipv6AclAndPrefixList.Oor.OorAccesses()
self.oor_accesses.parent = self
self.oor_prefixes = Ipv6AclAndPrefixList.Oor.OorPrefixes()
self.oor_prefixes.parent = self
self.prefix_list_summary = Ipv6AclAndPrefixList.Oor.PrefixListSummary()
self.prefix_list_summary.parent = self
class Details(object):
"""
Details of the overall out of resource limit
.. attribute:: is_current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_configured_aces
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_aces
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_acls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.is_current_configured_ac_ls = None
self.is_current_configured_aces = None
self.is_current_maximum_configurable_aces = None
self.is_current_maximum_configurable_acls = None
self.is_default_maximum_configurable_ac_es = None
self.is_default_maximum_configurable_ac_ls = None
self.is_maximum_configurable_ac_es = None
self.is_maximum_configurable_ac_ls = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.is_current_configured_ac_ls is not None:
return True
if self.is_current_configured_aces is not None:
return True
if self.is_current_maximum_configurable_aces is not None:
return True
if self.is_current_maximum_configurable_acls is not None:
return True
if self.is_default_maximum_configurable_ac_es is not None:
return True
if self.is_default_maximum_configurable_ac_ls is not None:
return True
if self.is_maximum_configurable_ac_es is not None:
return True
if self.is_maximum_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.Details']['meta_info']
class PrefixListSummary(object):
"""
Summary of the prefix Lists resource
utilization
.. attribute:: details
Summary Detail of the prefix list Resource Utilisation
**type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.PrefixListSummary.Details>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.details = Ipv6AclAndPrefixList.Oor.PrefixListSummary.Details()
self.details.parent = self
class Details(object):
"""
Summary Detail of the prefix list Resource
Utilisation
.. attribute:: is_current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_configured_aces
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_aces
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_acls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.is_current_configured_ac_ls = None
self.is_current_configured_aces = None
self.is_current_maximum_configurable_aces = None
self.is_current_maximum_configurable_acls = None
self.is_default_maximum_configurable_ac_es = None
self.is_default_maximum_configurable_ac_ls = None
self.is_maximum_configurable_ac_es = None
self.is_maximum_configurable_ac_ls = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:prefix-list-summary/Cisco-IOS-XR-ipv6-acl-oper:details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.is_current_configured_ac_ls is not None:
return True
if self.is_current_configured_aces is not None:
return True
if self.is_current_maximum_configurable_aces is not None:
return True
if self.is_current_maximum_configurable_acls is not None:
return True
if self.is_default_maximum_configurable_ac_es is not None:
return True
if self.is_default_maximum_configurable_ac_ls is not None:
return True
if self.is_maximum_configurable_ac_es is not None:
return True
if self.is_maximum_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.PrefixListSummary.Details']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:prefix-list-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.details is not None and self.details._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.PrefixListSummary']['meta_info']
class OorAccesses(object):
"""
Resource occupation details for ACLs
.. attribute:: oor_access
Resource occupation details for a particular ACL
**type**\: list of :py:class:`OorAccess <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.OorAccesses.OorAccess>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.oor_access = YList()
self.oor_access.parent = self
self.oor_access.name = 'oor_access'
class OorAccess(object):
"""
Resource occupation details for a particular
ACL
.. attribute:: access_list_name <key>
Name of the Access List
**type**\: str
**length:** 1..65
.. attribute:: is_current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_configured_aces
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_aces
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_acls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.is_current_configured_ac_ls = None
self.is_current_configured_aces = None
self.is_current_maximum_configurable_aces = None
self.is_current_maximum_configurable_acls = None
self.is_default_maximum_configurable_ac_es = None
self.is_default_maximum_configurable_ac_ls = None
self.is_maximum_configurable_ac_es = None
self.is_maximum_configurable_ac_ls = None
@property
def _common_path(self):
if self.access_list_name is None:
raise YPYModelError('Key property access_list_name is None')
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:oor-accesses/Cisco-IOS-XR-ipv6-acl-oper:oor-access[Cisco-IOS-XR-ipv6-acl-oper:access-list-name = ' + str(self.access_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_list_name is not None:
return True
if self.is_current_configured_ac_ls is not None:
return True
if self.is_current_configured_aces is not None:
return True
if self.is_current_maximum_configurable_aces is not None:
return True
if self.is_current_maximum_configurable_acls is not None:
return True
if self.is_default_maximum_configurable_ac_es is not None:
return True
if self.is_default_maximum_configurable_ac_ls is not None:
return True
if self.is_maximum_configurable_ac_es is not None:
return True
if self.is_maximum_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.OorAccesses.OorAccess']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:oor-accesses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.oor_access is not None:
for child_ref in self.oor_access:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.OorAccesses']['meta_info']
class OorPrefixes(object):
"""
Resource occupation details for prefix lists
.. attribute:: oor_prefix
Resource occupation details for a particular prefix list
**type**\: list of :py:class:`OorPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.OorPrefixes.OorPrefix>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.oor_prefix = YList()
self.oor_prefix.parent = self
self.oor_prefix.name = 'oor_prefix'
class OorPrefix(object):
"""
Resource occupation details for a particular
prefix list
.. attribute:: prefix_list_name <key>
Name of a prefix list
**type**\: str
**length:** 1..65
.. attribute:: is_current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_configured_aces
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_aces
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_acls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.is_current_configured_ac_ls = None
self.is_current_configured_aces = None
self.is_current_maximum_configurable_aces = None
self.is_current_maximum_configurable_acls = None
self.is_default_maximum_configurable_ac_es = None
self.is_default_maximum_configurable_ac_ls = None
self.is_maximum_configurable_ac_es = None
self.is_maximum_configurable_ac_ls = None
@property
def _common_path(self):
if self.prefix_list_name is None:
raise YPYModelError('Key property prefix_list_name is None')
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:oor-prefixes/Cisco-IOS-XR-ipv6-acl-oper:oor-prefix[Cisco-IOS-XR-ipv6-acl-oper:prefix-list-name = ' + str(self.prefix_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.prefix_list_name is not None:
return True
if self.is_current_configured_ac_ls is not None:
return True
if self.is_current_configured_aces is not None:
return True
if self.is_current_maximum_configurable_aces is not None:
return True
if self.is_current_maximum_configurable_acls is not None:
return True
if self.is_default_maximum_configurable_ac_es is not None:
return True
if self.is_default_maximum_configurable_ac_ls is not None:
return True
if self.is_maximum_configurable_ac_es is not None:
return True
if self.is_maximum_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.OorPrefixes.OorPrefix']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:oor-prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.oor_prefix is not None:
for child_ref in self.oor_prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.OorPrefixes']['meta_info']
class AccessListSummary(object):
"""
Resource Limits pertaining to ACLs only
.. attribute:: details
Details containing the resource limits of the ACLs
**type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_acl_oper.Ipv6AclAndPrefixList.Oor.AccessListSummary.Details>`
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.details = Ipv6AclAndPrefixList.Oor.AccessListSummary.Details()
self.details.parent = self
class Details(object):
"""
Details containing the resource limits of the
ACLs
.. attribute:: is_current_configured_ac_ls
Current configured acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_configured_aces
Current configured aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_aces
Current max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_current_maximum_configurable_acls
Current max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_es
default max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_default_maximum_configurable_ac_ls
default max configurable acls
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_es
max configurable aces
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_configurable_ac_ls
max configurable acls
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ipv6-acl-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.is_current_configured_ac_ls = None
self.is_current_configured_aces = None
self.is_current_maximum_configurable_aces = None
self.is_current_maximum_configurable_acls = None
self.is_default_maximum_configurable_ac_es = None
self.is_default_maximum_configurable_ac_ls = None
self.is_maximum_configurable_ac_es = None
self.is_maximum_configurable_ac_ls = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:access-list-summary/Cisco-IOS-XR-ipv6-acl-oper:details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.is_current_configured_ac_ls is not None:
return True
if self.is_current_configured_aces is not None:
return True
if self.is_current_maximum_configurable_aces is not None:
return True
if self.is_current_maximum_configurable_acls is not None:
return True
if self.is_default_maximum_configurable_ac_es is not None:
return True
if self.is_default_maximum_configurable_ac_ls is not None:
return True
if self.is_maximum_configurable_ac_es is not None:
return True
if self.is_maximum_configurable_ac_ls is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.AccessListSummary.Details']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor/Cisco-IOS-XR-ipv6-acl-oper:access-list-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.details is not None and self.details._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor.AccessListSummary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list/Cisco-IOS-XR-ipv6-acl-oper:oor'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_list_summary is not None and self.access_list_summary._has_data():
return True
if self.details is not None and self.details._has_data():
return True
if self.oor_accesses is not None and self.oor_accesses._has_data():
return True
if self.oor_prefixes is not None and self.oor_prefixes._has_data():
return True
if self.prefix_list_summary is not None and self.prefix_list_summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList.Oor']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv6-acl-oper:ipv6-acl-and-prefix-list'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_list_manager is not None and self.access_list_manager._has_data():
return True
if self.oor is not None and self.oor._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv6_acl_oper as meta
return meta._meta_table['Ipv6AclAndPrefixList']['meta_info']
|
import Image
import cv2
import numpy as np
def set_global():
global test_label_file_a
global w_resize
global h_resize
global crop_x1
global crop_x2
global crop_y1
global crop_y2
global LABELS
global LABEL_names
global LABEL_short
global Misclassified
global CorrectClassified
global maxNumSaveFiles
global patches
global tmp
global tmp2
global data
global data_label_file
global data_label_path
global test_images
global test_label_file
global test_label_path
global absolute_path
h_resize = 184#64#184 #124 has seq error # 28 initialize a value, sometimes it will be changed a bit after performing "imutils.resize()"
w_resize = 184#64#184 # it is still tricky to find proper initial values
crop_x1 = 100 # [350:750,610:1300] #hy: [y1:y2,x1:x2]
crop_x2 = 420
crop_y1 = 100
# crop_y2 = (crop_y1 + (crop_x2-crop_x1))*h_resize/w_resize #according to accepted shape by tensorflow
crop_y2 = crop_y1 + (crop_x2 - crop_x1) * h_resize / w_resize # according to accepted shape by tensorflow
# Here notice to add '/' at the end of subdirectory name
num_of_classes = 6
if num_of_classes == 6:
LABELS = ['hinten/', 'links/', 'oben/', 'rechts/', 'unten/', 'vorn/']
LABEL_names = ['hinten', 'links', 'oben', 'rechts','unten', 'vorn']
LABEL_short = ['H', 'L', 'O', 'R','U', 'V']
if num_of_classes == 82:
LABELS = ['hinten_1/','hinten_2/', 'links/', 'oben/', 'rechts/', 'unten/', 'vorn_1/','vorn_2/']
LABEL_names = ['hinten_1','hinten_2','links', 'oben', 'rechts', 'unten', 'vorn_1','vorn_2']
LABEL_short = ['H1','H2','L', 'O', 'R', 'U', 'V1','V2']
if num_of_classes == 7:
LABELS = ['neg/','hinten/', 'links/', 'oben/', 'rechts/', 'unten/', 'vorn/']
LABEL_names = ['neg','hinten', 'links', 'oben', 'rechts', 'unten', 'vorn']
LABEL_short = ['N','H', 'L', 'O', 'R', 'U', 'V']
if num_of_classes == 2:
LABELS = ['vorn/', 'hinten/']
LABEL_names = ['vorn', 'hinten']
LABEL_short = ['L','U']
if num_of_classes == 5:
LABELS = ['hinten/', 'links/', 'oben/', 'rechts/', 'unten/']
LABEL_names = ['hinten', 'links', 'oben', 'rechts','unten']
LABEL_short = ['H','L', 'O', 'R','U']
Misclassified = '../classified/Misclassified'
CorrectClassified = '../classified/CorrectClassified'
maxNumSaveFiles = 1000
absolute_path = '/home/hamed/Documents/Lego_copy/'
patches = absolute_path + './tmp/input_patches/'
tmp = absolute_path + './tmp/'#
tmp2 = absolute_path + './tmp/tmp2/'
data = absolute_path + './Data/data_2/'
data_label_path = data + '/*/*'
data_label_file = absolute_path + './FileList.txt'
#test_images = absolute_path + './Test_Images/testpkg2_no_bg/'
#test_images = absolute_path + './Test_Images/testpkg3_white_200x200/'
#test_images = absolute_path + './Test_Images/testpkg5_42x42/'
test_images = absolute_path + './Test_Images/testpkg5local_224x224/'
#test_images = absolute_path + './Test_Images/testpkg7_mix_crop/'
#test_images = absolute_path + './Test_Images/7_2/' #mix sizes and dark
#test_images = absolute_path + './Test_Images/testpkg8_frame/'
#test_images = absolute_path + './Test_Images/testpkg8_dark/'
test_label_path = test_images + '/*/*'
test_label_file = absolute_path + './FileList_TEST.txt'
test_label_file_a = absolute_path + './FileList_TEST_act1.txt'
#test_images = absolute_path + './Test_Images/testpkg6big/'
'''
from PIL import Image #hy: create video with images
activation_test_img = Image.open('../hintenTest.jpg')
activation_test_img.show()
activation_test_img.save('../hintenTest2.jpg')
img = Image.open('../tmp/resized/rechts/rechts_t2_1_rz400_d0_0400_1.jpg')
bigsize = (img.size[0]*3, img.size[1]*3)
mask = Image.new('L', bigsize, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0,0) + bigsize, fill=30)
mask = mask.resize(img.size, Image.ANTIALIAS)
img.putalpha(mask)
img.save('../1_bg.jpg')
bg_in = cv2.imread('../tmp/resized/rechts/rechts_t2_1_rz400_d0_0400_1.jpg')
for alpha in np.arange(0,1.1, 0.1)[::-1]:
back = Image.new('RBGA', bg_in.size)
back.paste(bg_in)
poly = Image.new('RGBA', (400,400))
pdraw = ImageDraw.Draw(poly)
back.paste(poly, (0,0), mask=poly)
back.paste(back
#bg = Image.fromarray(bg_out)
'''
|
from .profile_edit import *
|
from connector import channel
from google3.cloud.graphite.mmv2.services.google.composer import environment_pb2
from google3.cloud.graphite.mmv2.services.google.composer import environment_pb2_grpc
from typing import List
class Environment(object):
def __init__(
self,
name: str = None,
config: dict = None,
uuid: str = None,
state: str = None,
create_time: str = None,
update_time: str = None,
labels: dict = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.config = config
self.labels = labels
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = environment_pb2_grpc.ComposerBetaEnvironmentServiceStub(
channel.Channel()
)
request = environment_pb2.ApplyComposerBetaEnvironmentRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if EnvironmentConfig.to_proto(self.config):
request.resource.config.CopyFrom(EnvironmentConfig.to_proto(self.config))
else:
request.resource.ClearField("config")
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyComposerBetaEnvironment(request)
self.name = Primitive.from_proto(response.name)
self.config = EnvironmentConfig.from_proto(response.config)
self.uuid = Primitive.from_proto(response.uuid)
self.state = EnvironmentStateEnum.from_proto(response.state)
self.create_time = Primitive.from_proto(response.create_time)
self.update_time = Primitive.from_proto(response.update_time)
self.labels = Primitive.from_proto(response.labels)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = environment_pb2_grpc.ComposerBetaEnvironmentServiceStub(
channel.Channel()
)
request = environment_pb2.DeleteComposerBetaEnvironmentRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if EnvironmentConfig.to_proto(self.config):
request.resource.config.CopyFrom(EnvironmentConfig.to_proto(self.config))
else:
request.resource.ClearField("config")
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteComposerBetaEnvironment(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = environment_pb2_grpc.ComposerBetaEnvironmentServiceStub(
channel.Channel()
)
request = environment_pb2.ListComposerBetaEnvironmentRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListComposerBetaEnvironment(request).items
def to_proto(self):
resource = environment_pb2.ComposerBetaEnvironment()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if EnvironmentConfig.to_proto(self.config):
resource.config.CopyFrom(EnvironmentConfig.to_proto(self.config))
else:
resource.ClearField("config")
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class EnvironmentConfig(object):
def __init__(
self,
gke_cluster: str = None,
dag_gcs_prefix: str = None,
node_count: int = None,
software_config: dict = None,
node_config: dict = None,
private_environment_config: dict = None,
web_server_network_access_control: dict = None,
database_config: dict = None,
web_server_config: dict = None,
encryption_config: dict = None,
airflow_uri: str = None,
maintenance_window: dict = None,
):
self.gke_cluster = gke_cluster
self.dag_gcs_prefix = dag_gcs_prefix
self.node_count = node_count
self.software_config = software_config
self.node_config = node_config
self.private_environment_config = private_environment_config
self.web_server_network_access_control = web_server_network_access_control
self.database_config = database_config
self.web_server_config = web_server_config
self.encryption_config = encryption_config
self.airflow_uri = airflow_uri
self.maintenance_window = maintenance_window
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ComposerBetaEnvironmentConfig()
if Primitive.to_proto(resource.gke_cluster):
res.gke_cluster = Primitive.to_proto(resource.gke_cluster)
if Primitive.to_proto(resource.dag_gcs_prefix):
res.dag_gcs_prefix = Primitive.to_proto(resource.dag_gcs_prefix)
if Primitive.to_proto(resource.node_count):
res.node_count = Primitive.to_proto(resource.node_count)
if EnvironmentConfigSoftwareConfig.to_proto(resource.software_config):
res.software_config.CopyFrom(
EnvironmentConfigSoftwareConfig.to_proto(resource.software_config)
)
else:
res.ClearField("software_config")
if EnvironmentConfigNodeConfig.to_proto(resource.node_config):
res.node_config.CopyFrom(
EnvironmentConfigNodeConfig.to_proto(resource.node_config)
)
else:
res.ClearField("node_config")
if EnvironmentConfigPrivateEnvironmentConfig.to_proto(
resource.private_environment_config
):
res.private_environment_config.CopyFrom(
EnvironmentConfigPrivateEnvironmentConfig.to_proto(
resource.private_environment_config
)
)
else:
res.ClearField("private_environment_config")
if EnvironmentConfigWebServerNetworkAccessControl.to_proto(
resource.web_server_network_access_control
):
res.web_server_network_access_control.CopyFrom(
EnvironmentConfigWebServerNetworkAccessControl.to_proto(
resource.web_server_network_access_control
)
)
else:
res.ClearField("web_server_network_access_control")
if EnvironmentConfigDatabaseConfig.to_proto(resource.database_config):
res.database_config.CopyFrom(
EnvironmentConfigDatabaseConfig.to_proto(resource.database_config)
)
else:
res.ClearField("database_config")
if EnvironmentConfigWebServerConfig.to_proto(resource.web_server_config):
res.web_server_config.CopyFrom(
EnvironmentConfigWebServerConfig.to_proto(resource.web_server_config)
)
else:
res.ClearField("web_server_config")
if EnvironmentConfigEncryptionConfig.to_proto(resource.encryption_config):
res.encryption_config.CopyFrom(
EnvironmentConfigEncryptionConfig.to_proto(resource.encryption_config)
)
else:
res.ClearField("encryption_config")
if Primitive.to_proto(resource.airflow_uri):
res.airflow_uri = Primitive.to_proto(resource.airflow_uri)
if EnvironmentConfigMaintenanceWindow.to_proto(resource.maintenance_window):
res.maintenance_window.CopyFrom(
EnvironmentConfigMaintenanceWindow.to_proto(resource.maintenance_window)
)
else:
res.ClearField("maintenance_window")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfig(
gke_cluster=Primitive.from_proto(resource.gke_cluster),
dag_gcs_prefix=Primitive.from_proto(resource.dag_gcs_prefix),
node_count=Primitive.from_proto(resource.node_count),
software_config=EnvironmentConfigSoftwareConfig.from_proto(
resource.software_config
),
node_config=EnvironmentConfigNodeConfig.from_proto(resource.node_config),
private_environment_config=EnvironmentConfigPrivateEnvironmentConfig.from_proto(
resource.private_environment_config
),
web_server_network_access_control=EnvironmentConfigWebServerNetworkAccessControl.from_proto(
resource.web_server_network_access_control
),
database_config=EnvironmentConfigDatabaseConfig.from_proto(
resource.database_config
),
web_server_config=EnvironmentConfigWebServerConfig.from_proto(
resource.web_server_config
),
encryption_config=EnvironmentConfigEncryptionConfig.from_proto(
resource.encryption_config
),
airflow_uri=Primitive.from_proto(resource.airflow_uri),
maintenance_window=EnvironmentConfigMaintenanceWindow.from_proto(
resource.maintenance_window
),
)
class EnvironmentConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentConfig.from_proto(i) for i in resources]
class EnvironmentConfigSoftwareConfig(object):
def __init__(
self,
image_version: str = None,
airflow_config_overrides: dict = None,
pypi_packages: dict = None,
env_variables: dict = None,
python_version: str = None,
):
self.image_version = image_version
self.airflow_config_overrides = airflow_config_overrides
self.pypi_packages = pypi_packages
self.env_variables = env_variables
self.python_version = python_version
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ComposerBetaEnvironmentConfigSoftwareConfig()
if Primitive.to_proto(resource.image_version):
res.image_version = Primitive.to_proto(resource.image_version)
if Primitive.to_proto(resource.airflow_config_overrides):
res.airflow_config_overrides = Primitive.to_proto(
resource.airflow_config_overrides
)
if Primitive.to_proto(resource.pypi_packages):
res.pypi_packages = Primitive.to_proto(resource.pypi_packages)
if Primitive.to_proto(resource.env_variables):
res.env_variables = Primitive.to_proto(resource.env_variables)
if Primitive.to_proto(resource.python_version):
res.python_version = Primitive.to_proto(resource.python_version)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigSoftwareConfig(
image_version=Primitive.from_proto(resource.image_version),
airflow_config_overrides=Primitive.from_proto(
resource.airflow_config_overrides
),
pypi_packages=Primitive.from_proto(resource.pypi_packages),
env_variables=Primitive.from_proto(resource.env_variables),
python_version=Primitive.from_proto(resource.python_version),
)
class EnvironmentConfigSoftwareConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentConfigSoftwareConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentConfigSoftwareConfig.from_proto(i) for i in resources]
class EnvironmentConfigNodeConfig(object):
def __init__(
self,
location: str = None,
machine_type: str = None,
network: str = None,
subnetwork: str = None,
disk_size_gb: int = None,
oauth_scopes: list = None,
service_account: str = None,
tags: list = None,
ip_allocation_policy: dict = None,
max_pods_per_node: int = None,
):
self.location = location
self.machine_type = machine_type
self.network = network
self.subnetwork = subnetwork
self.disk_size_gb = disk_size_gb
self.oauth_scopes = oauth_scopes
self.service_account = service_account
self.tags = tags
self.ip_allocation_policy = ip_allocation_policy
self.max_pods_per_node = max_pods_per_node
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ComposerBetaEnvironmentConfigNodeConfig()
if Primitive.to_proto(resource.location):
res.location = Primitive.to_proto(resource.location)
if Primitive.to_proto(resource.machine_type):
res.machine_type = Primitive.to_proto(resource.machine_type)
if Primitive.to_proto(resource.network):
res.network = Primitive.to_proto(resource.network)
if Primitive.to_proto(resource.subnetwork):
res.subnetwork = Primitive.to_proto(resource.subnetwork)
if Primitive.to_proto(resource.disk_size_gb):
res.disk_size_gb = Primitive.to_proto(resource.disk_size_gb)
if Primitive.to_proto(resource.oauth_scopes):
res.oauth_scopes.extend(Primitive.to_proto(resource.oauth_scopes))
if Primitive.to_proto(resource.service_account):
res.service_account = Primitive.to_proto(resource.service_account)
if Primitive.to_proto(resource.tags):
res.tags.extend(Primitive.to_proto(resource.tags))
if EnvironmentConfigNodeConfigIPAllocationPolicy.to_proto(
resource.ip_allocation_policy
):
res.ip_allocation_policy.CopyFrom(
EnvironmentConfigNodeConfigIPAllocationPolicy.to_proto(
resource.ip_allocation_policy
)
)
else:
res.ClearField("ip_allocation_policy")
if Primitive.to_proto(resource.max_pods_per_node):
res.max_pods_per_node = Primitive.to_proto(resource.max_pods_per_node)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigNodeConfig(
location=Primitive.from_proto(resource.location),
machine_type=Primitive.from_proto(resource.machine_type),
network=Primitive.from_proto(resource.network),
subnetwork=Primitive.from_proto(resource.subnetwork),
disk_size_gb=Primitive.from_proto(resource.disk_size_gb),
oauth_scopes=Primitive.from_proto(resource.oauth_scopes),
service_account=Primitive.from_proto(resource.service_account),
tags=Primitive.from_proto(resource.tags),
ip_allocation_policy=EnvironmentConfigNodeConfigIPAllocationPolicy.from_proto(
resource.ip_allocation_policy
),
max_pods_per_node=Primitive.from_proto(resource.max_pods_per_node),
)
class EnvironmentConfigNodeConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentConfigNodeConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentConfigNodeConfig.from_proto(i) for i in resources]
class EnvironmentConfigNodeConfigIPAllocationPolicy(object):
def __init__(
self,
use_ip_aliases: bool = None,
cluster_secondary_range_name: str = None,
cluster_ipv4_cidr_block: str = None,
services_secondary_range_name: str = None,
services_ipv4_cidr_block: str = None,
):
self.use_ip_aliases = use_ip_aliases
self.cluster_secondary_range_name = cluster_secondary_range_name
self.cluster_ipv4_cidr_block = cluster_ipv4_cidr_block
self.services_secondary_range_name = services_secondary_range_name
self.services_ipv4_cidr_block = services_ipv4_cidr_block
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
environment_pb2.ComposerBetaEnvironmentConfigNodeConfigIPAllocationPolicy()
)
if Primitive.to_proto(resource.use_ip_aliases):
res.use_ip_aliases = Primitive.to_proto(resource.use_ip_aliases)
if Primitive.to_proto(resource.cluster_secondary_range_name):
res.cluster_secondary_range_name = Primitive.to_proto(
resource.cluster_secondary_range_name
)
if Primitive.to_proto(resource.cluster_ipv4_cidr_block):
res.cluster_ipv4_cidr_block = Primitive.to_proto(
resource.cluster_ipv4_cidr_block
)
if Primitive.to_proto(resource.services_secondary_range_name):
res.services_secondary_range_name = Primitive.to_proto(
resource.services_secondary_range_name
)
if Primitive.to_proto(resource.services_ipv4_cidr_block):
res.services_ipv4_cidr_block = Primitive.to_proto(
resource.services_ipv4_cidr_block
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigNodeConfigIPAllocationPolicy(
use_ip_aliases=Primitive.from_proto(resource.use_ip_aliases),
cluster_secondary_range_name=Primitive.from_proto(
resource.cluster_secondary_range_name
),
cluster_ipv4_cidr_block=Primitive.from_proto(
resource.cluster_ipv4_cidr_block
),
services_secondary_range_name=Primitive.from_proto(
resource.services_secondary_range_name
),
services_ipv4_cidr_block=Primitive.from_proto(
resource.services_ipv4_cidr_block
),
)
class EnvironmentConfigNodeConfigIPAllocationPolicyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EnvironmentConfigNodeConfigIPAllocationPolicy.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EnvironmentConfigNodeConfigIPAllocationPolicy.from_proto(i)
for i in resources
]
class EnvironmentConfigPrivateEnvironmentConfig(object):
def __init__(
self,
enable_private_environment: bool = None,
private_cluster_config: dict = None,
web_server_ipv4_cidr_block: str = None,
cloud_sql_ipv4_cidr_block: str = None,
web_server_ipv4_reserved_range: str = None,
):
self.enable_private_environment = enable_private_environment
self.private_cluster_config = private_cluster_config
self.web_server_ipv4_cidr_block = web_server_ipv4_cidr_block
self.cloud_sql_ipv4_cidr_block = cloud_sql_ipv4_cidr_block
self.web_server_ipv4_reserved_range = web_server_ipv4_reserved_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ComposerBetaEnvironmentConfigPrivateEnvironmentConfig()
if Primitive.to_proto(resource.enable_private_environment):
res.enable_private_environment = Primitive.to_proto(
resource.enable_private_environment
)
if EnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfig.to_proto(
resource.private_cluster_config
):
res.private_cluster_config.CopyFrom(
EnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfig.to_proto(
resource.private_cluster_config
)
)
else:
res.ClearField("private_cluster_config")
if Primitive.to_proto(resource.web_server_ipv4_cidr_block):
res.web_server_ipv4_cidr_block = Primitive.to_proto(
resource.web_server_ipv4_cidr_block
)
if Primitive.to_proto(resource.cloud_sql_ipv4_cidr_block):
res.cloud_sql_ipv4_cidr_block = Primitive.to_proto(
resource.cloud_sql_ipv4_cidr_block
)
if Primitive.to_proto(resource.web_server_ipv4_reserved_range):
res.web_server_ipv4_reserved_range = Primitive.to_proto(
resource.web_server_ipv4_reserved_range
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigPrivateEnvironmentConfig(
enable_private_environment=Primitive.from_proto(
resource.enable_private_environment
),
private_cluster_config=EnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfig.from_proto(
resource.private_cluster_config
),
web_server_ipv4_cidr_block=Primitive.from_proto(
resource.web_server_ipv4_cidr_block
),
cloud_sql_ipv4_cidr_block=Primitive.from_proto(
resource.cloud_sql_ipv4_cidr_block
),
web_server_ipv4_reserved_range=Primitive.from_proto(
resource.web_server_ipv4_reserved_range
),
)
class EnvironmentConfigPrivateEnvironmentConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EnvironmentConfigPrivateEnvironmentConfig.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EnvironmentConfigPrivateEnvironmentConfig.from_proto(i) for i in resources
]
class EnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfig(object):
def __init__(
self,
enable_private_endpoint: bool = None,
master_ipv4_cidr_block: str = None,
master_ipv4_reserved_range: str = None,
):
self.enable_private_endpoint = enable_private_endpoint
self.master_ipv4_cidr_block = master_ipv4_cidr_block
self.master_ipv4_reserved_range = master_ipv4_reserved_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
environment_pb2.ComposerBetaEnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfig()
)
if Primitive.to_proto(resource.enable_private_endpoint):
res.enable_private_endpoint = Primitive.to_proto(
resource.enable_private_endpoint
)
if Primitive.to_proto(resource.master_ipv4_cidr_block):
res.master_ipv4_cidr_block = Primitive.to_proto(
resource.master_ipv4_cidr_block
)
if Primitive.to_proto(resource.master_ipv4_reserved_range):
res.master_ipv4_reserved_range = Primitive.to_proto(
resource.master_ipv4_reserved_range
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfig(
enable_private_endpoint=Primitive.from_proto(
resource.enable_private_endpoint
),
master_ipv4_cidr_block=Primitive.from_proto(
resource.master_ipv4_cidr_block
),
master_ipv4_reserved_range=Primitive.from_proto(
resource.master_ipv4_reserved_range
),
)
class EnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfig.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EnvironmentConfigPrivateEnvironmentConfigPrivateClusterConfig.from_proto(i)
for i in resources
]
class EnvironmentConfigWebServerNetworkAccessControl(object):
def __init__(self, allowed_ip_ranges: list = None):
self.allowed_ip_ranges = allowed_ip_ranges
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
environment_pb2.ComposerBetaEnvironmentConfigWebServerNetworkAccessControl()
)
if EnvironmentConfigWebServerNetworkAccessControlAllowedIPRangesArray.to_proto(
resource.allowed_ip_ranges
):
res.allowed_ip_ranges.extend(
EnvironmentConfigWebServerNetworkAccessControlAllowedIPRangesArray.to_proto(
resource.allowed_ip_ranges
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigWebServerNetworkAccessControl(
allowed_ip_ranges=EnvironmentConfigWebServerNetworkAccessControlAllowedIPRangesArray.from_proto(
resource.allowed_ip_ranges
),
)
class EnvironmentConfigWebServerNetworkAccessControlArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EnvironmentConfigWebServerNetworkAccessControl.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EnvironmentConfigWebServerNetworkAccessControl.from_proto(i)
for i in resources
]
class EnvironmentConfigWebServerNetworkAccessControlAllowedIPRanges(object):
def __init__(self, value: str = None, description: str = None):
self.value = value
self.description = description
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
environment_pb2.ComposerBetaEnvironmentConfigWebServerNetworkAccessControlAllowedIPRanges()
)
if Primitive.to_proto(resource.value):
res.value = Primitive.to_proto(resource.value)
if Primitive.to_proto(resource.description):
res.description = Primitive.to_proto(resource.description)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigWebServerNetworkAccessControlAllowedIPRanges(
value=Primitive.from_proto(resource.value),
description=Primitive.from_proto(resource.description),
)
class EnvironmentConfigWebServerNetworkAccessControlAllowedIPRangesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EnvironmentConfigWebServerNetworkAccessControlAllowedIPRanges.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EnvironmentConfigWebServerNetworkAccessControlAllowedIPRanges.from_proto(i)
for i in resources
]
class EnvironmentConfigDatabaseConfig(object):
def __init__(self, machine_type: str = None):
self.machine_type = machine_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ComposerBetaEnvironmentConfigDatabaseConfig()
if Primitive.to_proto(resource.machine_type):
res.machine_type = Primitive.to_proto(resource.machine_type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigDatabaseConfig(
machine_type=Primitive.from_proto(resource.machine_type),
)
class EnvironmentConfigDatabaseConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentConfigDatabaseConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentConfigDatabaseConfig.from_proto(i) for i in resources]
class EnvironmentConfigWebServerConfig(object):
def __init__(self, machine_type: str = None):
self.machine_type = machine_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ComposerBetaEnvironmentConfigWebServerConfig()
if Primitive.to_proto(resource.machine_type):
res.machine_type = Primitive.to_proto(resource.machine_type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigWebServerConfig(
machine_type=Primitive.from_proto(resource.machine_type),
)
class EnvironmentConfigWebServerConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentConfigWebServerConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentConfigWebServerConfig.from_proto(i) for i in resources]
class EnvironmentConfigEncryptionConfig(object):
def __init__(self, kms_key_name: str = None):
self.kms_key_name = kms_key_name
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ComposerBetaEnvironmentConfigEncryptionConfig()
if Primitive.to_proto(resource.kms_key_name):
res.kms_key_name = Primitive.to_proto(resource.kms_key_name)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigEncryptionConfig(
kms_key_name=Primitive.from_proto(resource.kms_key_name),
)
class EnvironmentConfigEncryptionConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentConfigEncryptionConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentConfigEncryptionConfig.from_proto(i) for i in resources]
class EnvironmentConfigMaintenanceWindow(object):
def __init__(
self, start_time: str = None, end_time: str = None, recurrence: str = None
):
self.start_time = start_time
self.end_time = end_time
self.recurrence = recurrence
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ComposerBetaEnvironmentConfigMaintenanceWindow()
if Primitive.to_proto(resource.start_time):
res.start_time = Primitive.to_proto(resource.start_time)
if Primitive.to_proto(resource.end_time):
res.end_time = Primitive.to_proto(resource.end_time)
if Primitive.to_proto(resource.recurrence):
res.recurrence = Primitive.to_proto(resource.recurrence)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentConfigMaintenanceWindow(
start_time=Primitive.from_proto(resource.start_time),
end_time=Primitive.from_proto(resource.end_time),
recurrence=Primitive.from_proto(resource.recurrence),
)
class EnvironmentConfigMaintenanceWindowArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentConfigMaintenanceWindow.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentConfigMaintenanceWindow.from_proto(i) for i in resources]
class EnvironmentStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return environment_pb2.ComposerBetaEnvironmentStateEnum.Value(
"ComposerBetaEnvironmentStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return environment_pb2.ComposerBetaEnvironmentStateEnum.Name(resource)[
len("ComposerBetaEnvironmentStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
import rstem
from rstem.button import Button
from rstem.gpio import Output
from rstem.sound import Note
from random import randrange
import time
buttons = [Button(27), Button(23), Button(24), Button(22)]
lights = [Output(4), Output(18), Output(14), Output(15)]
notes = [Note('A'), Note('B'), Note('C'), Note('D')]
you_failed_note = Note('E2')
you_failed_note.volume = 1000
for note in notes:
note.volume = 400
for light in lights:
light.off()
play_order = []
failed = False
while not failed:
play_order += [randrange(4)]
# Play sequence
for i in play_order:
lights[i].on()
notes[i].play(0.4).wait()
lights[i].off()
time.sleep(0.2)
# Wait for user to repeat
for i in play_order:
button_pressed = Button.wait_many(buttons, timeout=3)
if button_pressed != i:
failed = True
break
# Light and play while button is pressed.
lights[button_pressed].on()
notes[button_pressed].play(duration=None)
buttons[button_pressed].wait(press=False)
time.sleep(0.2)
lights[button_pressed].off()
notes[button_pressed].stop()
if not failed:
time.sleep(1.0)
if button_pressed == None:
for light in lights:
light.on()
else:
lights[button_pressed].on()
you_failed_note.play(1.5).wait()
for light in lights:
light.off()
time.sleep(0.5)
|
"""This application demonstrates how to construct a Signed URL for objects in
Google Cloud Storage.
For more information, see the README.md under /storage and the documentation
at https://cloud.google.com/storage/docs/access-control/signing-urls-manually.
"""
import argparse
import binascii
import collections
import datetime
import hashlib
import sys
from google.oauth2 import service_account
import six
from six.moves.urllib.parse import quote
def generate_signed_url(service_account_file, bucket_name, object_name,
subresource=None, expiration=604800, http_method='GET',
query_parameters=None, headers=None):
if expiration > 604800:
print('Expiration Time can\'t be longer than 604800 seconds (7 days).')
sys.exit(1)
escaped_object_name = quote(six.ensure_binary(object_name), safe=b'/~')
canonical_uri = '/{}'.format(escaped_object_name)
datetime_now = datetime.datetime.now(tz=datetime.timezone.utc)
request_timestamp = datetime_now.strftime('%Y%m%dT%H%M%SZ')
datestamp = datetime_now.strftime('%Y%m%d')
google_credentials = service_account.Credentials.from_service_account_file(
service_account_file)
client_email = google_credentials.service_account_email
credential_scope = '{}/auto/storage/goog4_request'.format(datestamp)
credential = '{}/{}'.format(client_email, credential_scope)
if headers is None:
headers = dict()
host = '{}.storage.googleapis.com'.format(bucket_name)
headers['host'] = host
canonical_headers = ''
ordered_headers = collections.OrderedDict(sorted(headers.items()))
for k, v in ordered_headers.items():
lower_k = str(k).lower()
strip_v = str(v).lower()
canonical_headers += '{}:{}\n'.format(lower_k, strip_v)
signed_headers = ''
for k, _ in ordered_headers.items():
lower_k = str(k).lower()
signed_headers += '{};'.format(lower_k)
signed_headers = signed_headers[:-1] # remove trailing ';'
if query_parameters is None:
query_parameters = dict()
query_parameters['X-Goog-Algorithm'] = 'GOOG4-RSA-SHA256'
query_parameters['X-Goog-Credential'] = credential
query_parameters['X-Goog-Date'] = request_timestamp
query_parameters['X-Goog-Expires'] = expiration
query_parameters['X-Goog-SignedHeaders'] = signed_headers
if subresource:
query_parameters[subresource] = ''
canonical_query_string = ''
ordered_query_parameters = collections.OrderedDict(
sorted(query_parameters.items()))
for k, v in ordered_query_parameters.items():
encoded_k = quote(str(k), safe='')
encoded_v = quote(str(v), safe='')
canonical_query_string += '{}={}&'.format(encoded_k, encoded_v)
canonical_query_string = canonical_query_string[:-1] # remove trailing '&'
canonical_request = '\n'.join([http_method,
canonical_uri,
canonical_query_string,
canonical_headers,
signed_headers,
'UNSIGNED-PAYLOAD'])
canonical_request_hash = hashlib.sha256(
canonical_request.encode()).hexdigest()
string_to_sign = '\n'.join(['GOOG4-RSA-SHA256',
request_timestamp,
credential_scope,
canonical_request_hash])
# signer.sign() signs using RSA-SHA256 with PKCS1v15 padding
signature = binascii.hexlify(
google_credentials.signer.sign(string_to_sign)
).decode()
scheme_and_host = '{}://{}'.format('https', host)
signed_url = '{}{}?{}&x-goog-signature={}'.format(
scheme_and_host, canonical_uri, canonical_query_string, signature)
return signed_url
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'service_account_file',
help='Path to your Google service account keyfile.')
parser.add_argument(
'request_method',
help='A request method, e.g GET, POST.')
parser.add_argument('bucket_name', help='Your Cloud Storage bucket name.')
parser.add_argument('object_name', help='Your Cloud Storage object name.')
parser.add_argument('expiration', type=int, help='Expiration time.')
parser.add_argument(
'--subresource',
default=None,
help='Subresource of the specified resource, e.g. "acl".')
args = parser.parse_args()
signed_url = generate_signed_url(
service_account_file=args.service_account_file,
http_method=args.request_method, bucket_name=args.bucket_name,
object_name=args.object_name, subresource=args.subresource,
expiration=int(args.expiration))
print(signed_url)
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
from omr_utils import *
from OmrExceptions import *
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def get_x_splits(start, end):
# assert end > start
delta = (end - start) / 10
return [start + int(i * delta + delta / 2) for i in range(10)]
def filter_y_splits(downs, ups):
ends = downs[1:11]
starts = ups[0:10]
return [int((start + end) / 2) for start, end in zip(starts, ends)]
def id_blob_coverage(img, x, y, threshold=None):
"""
Percentage of marking coverage. Ideally, if marked, then coverage = 1.0 (black),
if not marked at all then coverage = 0.0 (white)
:param img: roi contains roughly the id_box
:param x: int, blob x coordinates
:param y: int, blob y coordinates
:param threshold: float (optional), used to return either 0.0 or 1.0 values
:return: marking percentage, value between 1.0 (fully marked) and 0.0 (not marked).
If threshold is applied then return either 1.0 (fully marked) or 0.0 (not marked)
"""
delta_y = 3
delta_x = 4
norm = 4 * delta_x * delta_y * 255
covarage = 1 - (np.sum(img[y - delta_y: y + delta_y, x - delta_x: x + delta_x]) / norm) # inverse percent level
if threshold is None:
return covarage
return 1 if covarage >= threshold else 0
def calc_id_number(img, x_splits, y_splits):
# id number sample = 7036093052
result = 0
for idx, x in enumerate(x_splits):
idx = 10 - idx
digit = 0
m_count = 0
for idy, y in enumerate(y_splits):
m_count = 0
coverage = id_blob_coverage(img, x, y, .85)
if coverage == 1: # marked
digit = idy
m_count += 1
if m_count > 1:
logger.error('second value: %s for digit %s ', digit, idx)
if m_count > 1:
raise IDError('Mutliple values for Digit %s = %s' % (idx))
result = result * 10 + digit
return result
def process_id(img, debug=False):
height, width = img.shape
img_x = otsu_filter(img, blur_kernel=1)
sum_x = np.sum(img_x, axis=0) / height
x_downs, x_ups = get_crossing_downs_ups(sum_x, 240)
if not len(x_downs) == 1 and not len(x_ups) == 1:
logger.error("downs: %s, ups: %s", x_downs, x_ups)
raise IDError("Could not locate the vertical borders of ID box")
x_shift_correction = 3
x_splits = get_x_splits(x_downs[0] + x_shift_correction, x_ups[0])
img_y = otsu_filter(img, blur_kernel=5)
sum_y = np.sum(img_y, axis=1) / width
y_starts, y_ends = get_crossing_downs_ups(sum_y, 240, spacing=0)
if not len(y_starts) > 0 and not len(y_ends) > 0:
logger.error("y_starts: %s, y_ends: %s", y_starts, y_ends)
raise IDError("Could not locate the horizontal borders of ID box")
# slicing sum_y : [y_starts[0]: y_ends[0]]
m_avg = np.average(sum_y[50: 285]) - 10
logger.debug("moving avg: %s", m_avg)
y_downs, y_ups = get_crossing_downs_ups(sum_y, m_avg, spacing=0)
logger.debug("y_donws: %s, y_ups: %s", len(y_downs), len(y_ups))
y_splits = filter_y_splits(y_downs, y_ups)
if len(y_splits) < 10:
raise IDError('Not enough y_split markers: %s' % len(y_splits))
id_number = calc_id_number(img_y, x_splits, y_splits)
logger.info('id_number = %s', id_number)
if debug:
vis_y = img_y.copy()
plt.subplot(221), plt.imshow(img, 'gray'), plt.title('img_x')
plt.subplot(222), plt.imshow(vis_y, 'gray'), plt.title('img_y')
plt.subplot(223), plt.plot(sum_x, 'r'), plt.title('sum_x')
for x in x_splits:
plt.axvline(x=x) # draw vertical lines in chart on xs
cv2.line(vis_y, (x, 0), (x, height), (255, 255, 255), 1)
plt.subplot(224), plt.plot(sum_y, 'b'), plt.title('sum_y')
plt.axhline(y=m_avg)
for y in y_splits:
plt.axvline(x=y) # draw vertical lines in chart on xs
cv2.line(vis_y, (0, y), (width, y), (0, 0, 0), 1)
plt.show()
return id_number
|
import contextlib
import mock
from oslo.config import cfg
import testtools
from neutron.plugins.mlnx.agent import eswitch_neutron_agent
from neutron.plugins.mlnx.agent import utils
from neutron.plugins.mlnx.common import exceptions
from neutron.tests import base
class TestEswichManager(base.BaseTestCase):
def setUp(self):
super(TestEswichManager, self).setUp()
class MockEswitchUtils(object):
def __init__(self, endpoint, timeout):
pass
mock.patch('neutron.plugins.mlnx.agent.utils.EswitchManager',
new=MockEswitchUtils)
with mock.patch.object(utils, 'zmq'):
self.manager = eswitch_neutron_agent.EswitchManager({}, None, None)
def test_get_not_exist_port_id(self):
with testtools.ExpectedException(exceptions.MlnxException):
self.manager.get_port_id_by_mac('no-such-mac')
class TestMlnxEswitchRpcCallbacks(base.BaseTestCase):
def setUp(self):
super(TestMlnxEswitchRpcCallbacks, self).setUp()
agent = mock.Mock()
self.rpc_callbacks = eswitch_neutron_agent.MlnxEswitchRpcCallbacks(
'context',
agent,
agent
)
def test_port_update(self):
port = {'mac_address': '10:20:30:40:50:60'}
add_port_update = self.rpc_callbacks.agent.add_port_update
self.rpc_callbacks.port_update('context', port=port)
add_port_update.assert_called_once_with(port['mac_address'])
class TestEswitchAgent(base.BaseTestCase):
def setUp(self):
super(TestEswitchAgent, self).setUp()
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)
with mock.patch.object(utils, 'zmq'):
self.agent = eswitch_neutron_agent.MlnxEswitchNeutronAgent({}, {})
self.agent.plugin_rpc = mock.Mock()
self.agent.context = mock.Mock()
self.agent.agent_id = mock.Mock()
self.agent.eswitch = mock.Mock()
self.agent.eswitch.get_vnics_mac.return_value = []
def test_treat_devices_added_returns_true_for_missing_device(self):
attrs = {'get_devices_details_list.side_effect': Exception()}
self.agent.plugin_rpc.configure_mock(**attrs)
with contextlib.nested(
mock.patch('neutron.plugins.mlnx.agent.eswitch_neutron_agent.'
'EswitchManager.get_vnics_mac',
return_value=[])):
self.assertTrue(self.agent.treat_devices_added_or_updated([{}]))
def _mock_treat_devices_added_updated(self, details, func_name):
"""Mock treat devices added.
:param details: the details to return for the device
:param func_name: the function that should be called
:returns: whether the named function was called
"""
with contextlib.nested(
mock.patch('neutron.plugins.mlnx.agent.eswitch_neutron_agent.'
'EswitchManager.get_vnics_mac',
return_value=[]),
mock.patch.object(self.agent.plugin_rpc,
'get_devices_details_list',
return_value=[details]),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent, func_name)
) as (vnics_fn, get_dev_fn, upd_dev_up, func):
self.assertFalse(self.agent.treat_devices_added_or_updated([{}]))
return (func.called, upd_dev_up.called)
def test_treat_devices_added_updates_known_port(self):
details = mock.MagicMock()
details.__contains__.side_effect = lambda x: True
func, dev_up = self._mock_treat_devices_added_updated(details,
'treat_vif_port')
self.assertTrue(func)
self.assertTrue(dev_up)
def test_treat_devices_added_updates_known_port_admin_down(self):
details = {'port_id': '1234567890',
'device': '01:02:03:04:05:06',
'network_id': '123456789',
'network_type': 'vlan',
'physical_network': 'default',
'segmentation_id': 2,
'admin_state_up': False}
func, dev_up = self._mock_treat_devices_added_updated(details,
'treat_vif_port')
self.assertTrue(func)
self.assertFalse(dev_up)
def test_treat_devices_removed_returns_true_for_missing_device(self):
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
side_effect=Exception()):
self.assertTrue(self.agent.treat_devices_removed([{}]))
def test_treat_devices_removed_releases_port(self):
details = dict(exists=False)
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
return_value=details):
with mock.patch.object(self.agent.eswitch,
'port_release') as port_release:
self.assertFalse(self.agent.treat_devices_removed([{}]))
self.assertTrue(port_release.called)
def _test_process_network_ports(self, port_info):
with contextlib.nested(
mock.patch.object(self.agent, 'treat_devices_added_or_updated',
return_value=False),
mock.patch.object(self.agent, 'treat_devices_removed',
return_value=False)
) as (device_added_updated, device_removed):
self.assertFalse(self.agent.process_network_ports(port_info))
device_added_updated.assert_called_once_with(
port_info['added'] | port_info['updated'])
device_removed.assert_called_once_with(port_info['removed'])
def test_process_network_ports(self):
self._test_process_network_ports(
{'current': set(['10:20:30:40:50:60']),
'updated': set(),
'added': set(['11:21:31:41:51:61']),
'removed': set(['13:23:33:43:53:63'])})
def test_process_network_ports_with_updated_ports(self):
self._test_process_network_ports(
{'current': set(['10:20:30:40:50:60']),
'updated': set(['12:22:32:42:52:62']),
'added': set(['11:21:31:41:51:61']),
'removed': set(['13:23:33:43:53:63'])})
def test_add_port_update(self):
mac_addr = '10:20:30:40:50:60'
self.agent.add_port_update(mac_addr)
self.assertEqual(set([mac_addr]), self.agent.updated_ports)
def _mock_scan_ports(self, vif_port_set, previous,
updated_ports, sync=False):
self.agent.updated_ports = updated_ports
with mock.patch.object(self.agent.eswitch, 'get_vnics_mac',
return_value=vif_port_set):
return self.agent.scan_ports(previous, sync)
def test_scan_ports_return_current_for_unchanged_ports(self):
vif_port_set = set([1, 2])
previous = dict(current=set([1, 2]), added=set(),
removed=set(), updated=set())
expected = dict(current=vif_port_set, added=set(),
removed=set(), updated=set())
actual = self._mock_scan_ports(vif_port_set,
previous, set())
self.assertEqual(expected, actual)
def test_scan_ports_return_port_changes(self):
vif_port_set = set([1, 3])
previous = dict(current=set([1, 2]), added=set(),
removed=set(), updated=set())
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set())
actual = self._mock_scan_ports(vif_port_set,
previous, set())
self.assertEqual(expected, actual)
def test_scan_ports_with_updated_ports(self):
vif_port_set = set([1, 3, 4])
previous = dict(current=set([1, 2, 4]), added=set(),
removed=set(), updated=set())
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([4]))
actual = self._mock_scan_ports(vif_port_set,
previous, set([4]))
self.assertEqual(expected, actual)
def test_scan_ports_with_unknown_updated_ports(self):
vif_port_set = set([1, 3, 4])
previous = dict(current=set([1, 2, 4]), added=set(),
removed=set(), updated=set())
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([4]))
actual = self._mock_scan_ports(vif_port_set,
previous,
updated_ports=set([4, 5]))
self.assertEqual(expected, actual)
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.base.validation import assert_list
class ParseValidation(unittest.TestCase):
def test_valid_inputs(self):
list_result0 = assert_list(["file1.txt"])
list_result1 = assert_list(["file1.txt", "file2.txt"])
list_result2 = assert_list(None)
self.assertEqual(list_result0, ["file1.txt"]) # list of strings gives list of strings
self.assertEqual(list_result1, ["file1.txt", "file2.txt"])
self.assertEqual(list_result2, []) # None is ok by default
def test_invalid_inputs(self):
with self.assertRaises(ValueError):
assert_list({"file2.txt": True}) # Can't pass a dict by default
with self.assertRaises(ValueError):
assert_list([["file2.txt"], "file2.txt"]) # All values in list must be stringy values
with self.assertRaises(ValueError):
assert_list(None, can_be_none=False) # The default is ok as None only when can_be_noe is true
def test_invalid_inputs_with_key_arg(self):
with self.assertRaisesRegexp(ValueError, "In key 'resources':"):
assert_list({"file3.txt":"source"}, key_arg='resources') # Can't pass a dict
with self.assertRaisesRegexp(ValueError, "In key 'artifacts':"):
assert_list([["file3.txt"]], key_arg='artifacts') # All values most be strings
with self.assertRaisesRegexp(ValueError, "In key 'jars':"):
assert_list(None, can_be_none=False, key_arg='jars') # The default is ok as None only when can_be_noe is true
|
def mult(m,n):
def loop(m,n):
if n > 1:
if(n%2==0):
return loop(m*2,n//2)
else:
return m+ loop(m*2,n//2)
else: # n == 1
return m
if n > 0:
return loop(m,n)
else:
return 0
|
"""Runs a command, saving stdout, stderr, and the return code in files.
Simplifies executing long-running commands on a remote host.
The status file (as specified by --status) is exclusively locked until the
child process running the user-specified command exits.
This command will fail if the status file cannot be successfully locked.
To await completion, "wait_for_command.py" acquires a shared lock on the
status file, which blocks until the process completes.
*Runs on the guest VM. Supports Python 2.6, 2.7, and 3.x.*
"""
import fcntl
import logging
import optparse
import sys
import subprocess
def main():
parser = optparse.OptionParser()
parser.add_option('-o', '--stdout', dest='stdout', metavar='FILE',
help="""Write stdout to FILE. Required.""")
parser.add_option('-e', '--stderr', dest='stderr', metavar='FILE',
help="""Write stderr to FILE. Required.""")
parser.add_option('-p', '--pid', dest='pid', help="""Write PID to FILE.""",
metavar='FILE')
parser.add_option('-s', '--status', dest='status', help="""Write process exit
status to FILE. An exclusive lock will be placed on FILE
until this process exits. Required.""", metavar='FILE')
parser.add_option('-c', '--command', dest='command', help="""Shell command to
execute. Required.""")
options, args = parser.parse_args()
if args:
sys.stderr.write('Unexpected arguments: {0}\n'.format(args))
return 1
missing = []
for option in ('stdout', 'stderr', 'status', 'command'):
if getattr(options, option) is None:
missing.append(option)
if missing:
parser.print_usage()
msg = 'Missing required flag(s): {0}\n'.format(
', '.join('--' + i for i in missing))
sys.stderr.write(msg)
return 1
with open(options.status, 'w+') as status:
with open(options.stdout, 'w') as stdout:
with open(options.stderr, 'w') as stderr:
logging.info('Acquiring lock on %s', options.status)
# Non-blocking exclusive lock acquisition; will raise an IOError if
# acquisition fails, which is desirable here.
fcntl.lockf(status, fcntl.LOCK_EX | fcntl.LOCK_NB)
p = subprocess.Popen(options.command, stdout=stdout, stderr=stderr,
shell=True)
logging.info('Started pid %d: %s', p.pid, options.command)
if options.pid:
with open(options.pid, 'w') as pid:
pid.write(str(p.pid))
logging.info('Waiting on PID %s', p.pid)
return_code = p.wait()
logging.info('Return code: %s', return_code)
status.truncate()
status.write(str(return_code))
# File lock will be released when the status file is closed.
return return_code
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
|
import os
import subprocess
from string import find
def run_command(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out,err
def schema():
"""
This returns
"""
return { 'title': 'apt schema',
'type': 'string'
}
def verify(inputhashes):
"""
"""
failed = []
for package in inputhashes:
out = run_command(['dpkg-query', '-W', package])[0]
#We expect the output to contain the version
#Any error doesn't get captured, so out will be empty (yes this is weird)
if(find(out, package) == -1):
failed.append(package)
return failed
def apply(inputhashes, dry_run=True):
failed = []
for package in inputhashes:
out = run_command(['apt-get', 'install', '-y', package])
if(find(out[1], 'Permission denied') != -1):
failed.append(package) #Install failed because we're not root
if(find(out[0], ('Setting up ' + package)) != -1 and find(out[0], (package + ' already the newest version')) != -1):
#Something else happened, we weren't installed and we didn't get installed
failed.append(package)
print out
return failed
|
"""
Produce a Reduced Representation genome from fasta file genome . The sequence is cut into fragments according to the given restriction enzyme recognition sequence : This sequence is always cleaved just after the 5prime first base towards 3prime and just before the last base on the complementary strand
ex for Msp1:
C|CGG
--
GGC|C
ex for BssS1:
C|ACGAG
----
GTGCT|C
Fragments are size selected or not. Chromosomes scaffolds can be treated or not.
The output is a fragment fasta file that is chromosome sorted. The description line displays the chromosome name, fragment start and stop.
CG addition : the Python split comand causes the disappearance of all CCGG in the sequences. Then we have to add CCG, CGG and C in the fragments. CGG is always added at the 5' start of the fragments (except for the first one). A C is always added at the 3' end of the fragments. A CG is added to the C at the 3' end of the fragments ONLY if the following fragment is not contiguous.
argument #1 = fasta file genome (sequences must be written on one line only)
argument #2 = recognition sequence (ie : CCGG for Msp1)
argument #3 = min fragment size, argument #4 = max fragment size (if min = 0 and max = -1, no size selection)
argument #5 = enter "1" if chromosomes scaffolds must be treated
"""
from string import *
from sys import argv
import re
import os
from os import getcwd
def cut_seq(sequence,rest_enz,fragment_number,empty_fragments):
coupage_seq = sequence.split(rest_enz)
start_frag = 1
end_frag = 0
for idx in range(0,len(coupage_seq)):
# fragment count incremented
####fragment_number = fragment_number + 1
current_frag = coupage_seq[idx]
if current_frag == '':
# if frag is empty, empty frag incremented
empty_fragments = empty_fragments + 1
# if not the first fragment:
if idx != 0:
current_frag = rest_enz[1:]+ current_frag # CGG for Msp1
sel_current_frag = 0
if max_frag == -1 or (len(current_frag) > int(min_frag) and len(current_frag) < int(max_frag)):
sel_current_frag = 1
# If not the first fragment:
if idx != 0:
# if current_frag has the right size
if sel_current_frag == 1:
# if the precedent fragment has the right size
if sel_last_frag == 1:
# => contigous fragments
last_frag = last_frag + rest_enz[:-3] #C
else: # if the precedent fragment hasn't the right size
last_frag=last_frag + rest_enz[:-1] #CCG
# end If sel_last_frag == 1:
else: # If sel_last_frag != 1 & If sel_current_frag != 1:
last_frag = last_frag + rest_enz[:-1] #CCG
# End if sel_current_frag == 1:
if sel_last_frag == 1:
ofh.write(chromosome +'\t'+ str(start_frag) + '\t'+ str(end_frag) + '\t'+ last_frag + '\n')
fragment_number = fragment_number + 1
# End if sel_last_frag == 1:
# End if not first fragment:
# precedent fragment becomes the selected one
sel_last_frag = sel_current_frag
# the first fragment becomes the last treated one
last_frag = current_frag
start_frag = end_frag + 1
end_frag = len(current_frag) + start_frag
# END for idx in........
if sel_last_frag:
ofh.write(chromosome +'\t'+ str(start_frag) + '\t'+ str(end_frag) + '\t'+ last_frag +'\n')
fragment_number = fragment_number + 1
return fragment_number, empty_fragments
input_file = argv[1]
rest_enz = argv[2]
min_frag = int(argv[3])
max_frag = int(argv[4])
if len(argv)>=6:
treat_scaffold=int(argv[5])
else:
treat_scaffold=0
if treat_scaffold!=1 and treat_scaffold!=0:
print "Treat scaffold parameter incorrect (expect 1 or 0) : ",treat_scaffold
exit(1)
print "---------------------------"
print "Input file :\t",input_file
print "Restriction site :\t",rest_enz
if max_frag==-1:
label_selection="No selection"
output_file1=os.path.abspath(input_file)
output_file1=output_file1[:(output_file1.rfind("."))]+"_frag_in_silico_"+rest_enz+".tmp"
output_file2=os.path.abspath(input_file)
output_file2=output_file2[:(output_file2.rfind("."))]+"_frag_in_silico_"+rest_enz+".fasta"
else:
label_selection="[" + str(min_frag) + ";" + str(max_frag) +"]"
output_file1=os.path.abspath(input_file)
output_file1=output_file1[:(output_file1.rfind("."))]+"_frag_in_silico_"+rest_enz+"_"+str(min_frag)+"_"+str(max_frag)+".tmp"
output_file2=os.path.abspath(input_file)
output_file2=output_file2[:(output_file2.rfind("."))]+"_frag_in_silico_"+rest_enz+"_"+str(min_frag)+"_"+str(max_frag)+".fasta"
print "Size selection :\t",label_selection
if treat_scaffold==0:
label_scaffold="No"
else:
label_scaffold="Yes"
print "Treat scaffold :\t",label_scaffold
print "Output file :\t",output_file2
print "---------------------------"
Fragment_total_number = 0
fragment_number = 0
empty_fragments = 0
empty_fragments_total_number = 0
sequence =''
numericChromosome=re.compile('([Cc][Hh][Rr])?([0-9]+)$')
realChromosome=re.compile('([Cc][Hh][Rr])?(M[Tt]|X|Y)$')
current_dir = getcwd()
ifh = open(os.path.abspath(input_file))
pattern = re.search(".fa$",input_file)
if pattern:
input_file = re.sub(".fa$","",input_file)
ofh = open(output_file1, "w")
for line in ifh:
line = line.rstrip('\r\n')
if line.startswith('>'):
if sequence !='':
output_var_from_cut_seq_function = cut_seq(sequence,rest_enz,fragment_number,empty_fragments)
Fragment_total_number = Fragment_total_number + output_var_from_cut_seq_function[0]
empty_fragments_total_number = empty_fragments_total_number + output_var_from_cut_seq_function[1]
line = line.split()
chromosome = line[0]
pattern_chr = re.search("^>([Cc][Hh][Rr])?_?(.*)$",chromosome)
if pattern_chr:
chromosome = pattern_chr.group(2)
sequence =''
# scaffolds are not treated: chrUn....
toTreat=0
if (numericChromosome.match(chromosome) or realChromosome.match(chromosome) or treat_scaffold==1):
toTreat=1
else:
if toTreat==0:
continue
line = line.upper()
# Test if line matching (^[AGTCN]*$ ) if non return error
if re.match ('^[AGTCN]*$', line):
sequence=sequence+line
else:
print 'caracteres speciaux trouves', line
# End if line starts with >
if sequence !='':
output_var_from_cut_seq_function = cut_seq(sequence,rest_enz,fragment_number,empty_fragments)
Fragment_total_number = Fragment_total_number + output_var_from_cut_seq_function[0]
empty_fragments_total_number = empty_fragments_total_number + output_var_from_cut_seq_function[1]
print "Fragments total number = ", Fragment_total_number
print 'Empty fragments total number = ', empty_fragments_total_number
ifh.close()
ofh.close()
file_1 = open(output_file1)
ofh = open(output_file2, "w")
locations={}
print "Sorting initialization..."
for line in file_1:
line = line.rstrip('\n\r')
elmts = line.split("\t")
chr = elmts[0]
start = elmts[1]
end = elmts[2]
sequ = elmts[3]
keyLoc = chr + "_"+ start+ "_"+ end
if locations.has_key(keyLoc):
print "Several lines for chromosome '",chr,"' and start=",start
locations[keyLoc]=sequ
file_1.close()
def sortChromosomeAndStart_V2(keyLoc1, keyLoc2):
# split the keyloc with regards to the underscore between the chromosome and the start
m1=re.match("^(.*)_([0-9]+)_([0-9]+)$",keyLoc1)
if m1 is None :
sys.exit("Cannot interpret chromosome location '"+keyLoc1+"'. Exiting.")
m2=re.match("^(.*)_([0-9]+)_([0-9]+)$",keyLoc2)
if m2 is None :
sys.exit("Cannot interpret chromosome location '"+keyLoc2+"'. Exiting.")
loc1 = (m1.group(1),m1.group(2))
loc2 = (m2.group(1),m2.group(2))
if (loc1[0] == loc2[0]):
# if same chromosome : sort on start
return int(loc1[1])-int(loc2[1])
else:
m = numericChromosome.match(loc1[0])
if m:
# group : return the string matched by the RE
chr1 = m.group(2)
m = numericChromosome.match(loc2[0])
if m:
chr2 = m.group(2)
#Compare chromosome number
return int(chr1)-int(chr2)
else:
return -1
else:
m = numericChromosome.match(loc2[0])
if m:
return 1
else: #Neither chr1 nor chr2 are numeric
m1 = realChromosome.match(loc1[0])
m2 = realChromosome.match(loc2[0])
if m1 and m2:
if loc1[0] < loc2[0]:
return -1
else:
return 1
elif m1:
return -1
elif m2:
return 1
else:
#Compare letters
if loc1[0] < loc2[0]:
return -1
else:
return 1
sortedLocations = sorted(locations.keys(), cmp = sortChromosomeAndStart_V2)
for keyLoc in sortedLocations:
ofh.write('>' + keyLoc + '\n' + locations[keyLoc] + '\n')
os.chmod(output_file2, 0775)
os.remove(output_file1)
ofh.close()
|
"""
All methods must return media_ids that can be
passed into e.g. like() or comment() functions.
"""
import random
from tqdm import tqdm
from . import delay
def get_media_owner(self, media_id):
self.mediaInfo(media_id)
try:
return str(self.LastJson["items"][0]["user"]["pk"])
except:
return False
def get_popular_medias(self):
self.getPopularFeed()
return [str(media['pk']) for media in self.LastJson['items']]
def get_your_medias(self, as_dict=False):
self.getSelfUserFeed()
if as_dict:
return self.LastJson["items"]
return self.filter_medias(self.LastJson["items"], False)
def get_archived_medias(self, as_dict=False):
self.getArchiveFeed()
if as_dict:
return self.LastJson["items"]
return self.filter_medias(self.LastJson["items"], False)
def get_timeline_medias(self, filtration=True):
if not self.getTimelineFeed():
self.logger.warning("Error while getting timeline feed.")
return []
return self.filter_medias(self.LastJson["items"], filtration)
def get_user_medias(self, user_id, filtration=True, is_comment=False):
user_id = self.convert_to_user_id(user_id)
self.getUserFeed(user_id)
if self.LastJson["status"] == 'fail':
self.logger.warning("This is a closed account.")
return []
return self.filter_medias(self.LastJson["items"], filtration, is_comment=is_comment)
def get_total_user_medias(self, user_id):
user_id = self.convert_to_user_id(user_id)
medias = self.getTotalUserFeed(user_id)
if self.LastJson["status"] == 'fail':
self.logger.warning("This is a closed account.")
return []
return self.filter_medias(medias, filtration=False)
def get_user_likers(self, user_id, media_count=10):
your_likers = set()
media_items = self.get_user_medias(user_id, filtration=False)
if not media_items:
self.logger.warning("Can't get %s medias." % user_id)
return []
for media_id in tqdm(media_items[:media_count],
desc="Getting %s media likers" % user_id):
media_likers = self.get_media_likers(media_id)
your_likers |= set(media_likers)
return list(your_likers)
def get_hashtag_medias(self, hashtag, filtration=True):
if not self.getHashtagFeed(hashtag):
self.logger.warning("Error while getting hashtag feed.")
return []
return self.filter_medias(self.LastJson["items"], filtration)
def get_total_hashtag_medias(self, hashtag, amount=100, filtration=False):
medias = self.getTotalHashtagFeed(hashtag, amount)
return self.filter_medias(medias, filtration=filtration)
def get_geotag_medias(self, geotag, filtration=True):
# TODO: returns list of medias from geotag
pass
def get_locations_from_coordinates(self, latitude, longitude):
self.searchLocation(lat=latitude, lng=longitude)
return [location for location in self.LastJson["items"] if int(location["location"]["lat"]) == int(latitude) and
int(location["location"]["lng"]) == int(longitude)]
def get_media_info(self, media_id):
if isinstance(media_id, dict):
return media_id
self.mediaInfo(media_id)
if "items" not in self.LastJson:
self.logger.info("Media with %s not found." % media_id)
return []
return self.LastJson["items"]
def get_timeline_users(self):
# TODO: returns list userids who just posted on your timeline feed
if not self.getTimelineFeed():
self.logger.warning("Error while getting timeline feed.")
return []
return [str(i['user']['pk']) for i in self.LastJson['items'] if i.get('user')]
def get_hashtag_users(self, hashtag):
users = []
self.getHashtagFeed(hashtag)
for i in self.LastJson['items']:
users.append(str(i['user']['pk']))
return users
def get_geotag_users(self, geotag):
# TODO: returns list userids who just posted on this geotag
pass
def get_userid_from_username(self, username):
self.searchUsername(username)
if "user" in self.LastJson:
return str(self.LastJson["user"]["pk"])
return None # Not found
def get_username_from_userid(self, userid):
self.getUsernameInfo(userid)
if "user" in self.LastJson:
return str(self.LastJson["user"]["username"])
return None # Not found
def get_user_info(self, user_id):
user_id = self.convert_to_user_id(user_id)
self.getUsernameInfo(user_id)
if 'user' not in self.LastJson:
return False
return self.LastJson['user']
def get_user_followers(self, user_id, nfollows):
user_id = self.convert_to_user_id(user_id)
followers = self.getTotalFollowers(user_id, nfollows)
return [str(item['pk']) for item in followers][::-1] if followers else []
def get_user_following(self, user_id, nfollows=None):
user_id = self.convert_to_user_id(user_id)
following = self.getTotalFollowings(user_id, nfollows)
return [str(item['pk']) for item in following][::-1] if following else []
def get_media_likers(self, media_id):
self.getMediaLikers(media_id)
if "users" not in self.LastJson:
self.logger.info("Media with %s not found." % media_id)
return []
return list(map(lambda user: str(user['pk']), self.LastJson["users"]))
def get_media_comments(self, media_id, only_text=False):
self.getMediaComments(media_id)
if 'comments' not in self.LastJson:
return []
if only_text:
return [str(item["text"]) for item in self.LastJson['comments']]
return self.LastJson['comments']
def get_media_commenters(self, media_id):
self.getMediaComments(media_id)
if 'comments' not in self.LastJson:
return []
return [str(item["user"]["pk"]) for item in self.LastJson['comments']]
def get_comment(self):
if len(self.comments):
return random.choice(self.comments).strip()
return "wow"
def get_media_id_from_link(self, link):
if 'instagram.com/p/' not in link:
self.logger.error('Unexpected link')
return False
link = link.split('/')
code = link[link.index('p') + 1]
alphabet = {'-': 62, '1': 53, '0': 52, '3': 55, '2': 54, '5': 57, '4': 56, '7': 59, '6': 58, '9': 61, '8': 60,
'A': 0, 'C': 2, 'B': 1, 'E': 4, 'D': 3, 'G': 6, 'F': 5, 'I': 8, 'H': 7, 'K': 10, 'J': 9, 'M': 12,
'L': 11, 'O': 14, 'N': 13, 'Q': 16, 'P': 15, 'S': 18, 'R': 17, 'U': 20, 'T': 19, 'W': 22, 'V': 21,
'Y': 24, 'X': 23, 'Z': 25, '_': 63, 'a': 26, 'c': 28, 'b': 27, 'e': 30, 'd': 29, 'g': 32, 'f': 31,
'i': 34, 'h': 33, 'k': 36, 'j': 35, 'm': 38, 'l': 37, 'o': 40, 'n': 39, 'q': 42, 'p': 41, 's': 44,
'r': 43, 'u': 46, 't': 45, 'w': 48, 'v': 47, 'y': 50, 'x': 49, 'z': 51}
result = 0
for char in code:
result = result * 64 + alphabet[char]
return result
def convert_to_user_id(self, smth):
smth = str(smth)
if not smth.isdigit():
if smth[0] == "@": # cut first @
smth = smth[1:]
smth = self.get_userid_from_username(smth)
delay.very_small_delay(self)
# if type is not str than it is int so user_id passed
return smth
|
import unittest
from dummy import dummy
class DummyTestCase(unittest.TestCase):
def setUp(self):
self.tobcri = dummy.Dummy()
def test_true(self):
self.assertEqual('Hello World!', str(self.tobcri))
if __name__ == '__main__':
unittest.main()
|
"""Implementation of the AWS Config Service APIs."""
import json
import re
import time
import random
import string
from datetime import datetime
from moto.config.exceptions import (
InvalidResourceTypeException,
InvalidDeliveryFrequency,
InvalidConfigurationRecorderNameException,
NameTooLongException,
MaxNumberOfConfigurationRecordersExceededException,
InvalidRecordingGroupException,
NoSuchConfigurationRecorderException,
NoAvailableConfigurationRecorderException,
InvalidDeliveryChannelNameException,
NoSuchBucketException,
InvalidS3KeyPrefixException,
InvalidSNSTopicARNException,
MaxNumberOfDeliveryChannelsExceededException,
NoAvailableDeliveryChannelException,
NoSuchDeliveryChannelException,
LastDeliveryChannelDeleteFailedException,
TagKeyTooBig,
TooManyTags,
TagValueTooBig,
TooManyAccountSources,
InvalidParameterValueException,
InvalidNextTokenException,
NoSuchConfigurationAggregatorException,
InvalidTagCharacters,
DuplicateTags,
InvalidLimitException,
InvalidResourceParameters,
TooManyResourceIds,
ResourceNotDiscoveredException,
ResourceNotFoundException,
TooManyResourceKeys,
InvalidResultTokenException,
ValidationException,
NoSuchOrganizationConformancePackException,
MaxNumberOfConfigRulesExceededException,
InsufficientPermissionsException,
NoSuchConfigRuleException,
ResourceInUseException,
MissingRequiredConfigRuleParameterException,
)
from moto.core import BaseBackend, BaseModel
from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID
from moto.core.responses import AWSServiceSpec
from moto.core.utils import BackendDict
from moto.iam.config import role_config_query, policy_config_query
from moto.s3.config import s3_config_query
from moto.s3control.config import s3_account_public_access_block_query
from moto.utilities.utils import load_resource
POP_STRINGS = [
"capitalizeStart",
"CapitalizeStart",
"capitalizeArn",
"CapitalizeArn",
"capitalizeARN",
"CapitalizeARN",
]
DEFAULT_PAGE_SIZE = 100
CONFIG_RULE_PAGE_SIZE = 25
RESOURCE_MAP = {
"AWS::S3::Bucket": s3_config_query,
"AWS::S3::AccountPublicAccessBlock": s3_account_public_access_block_query,
"AWS::IAM::Role": role_config_query,
"AWS::IAM::Policy": policy_config_query,
}
CAMEL_TO_SNAKE_REGEX = re.compile(r"(?<!^)(?=[A-Z])")
MAX_TAGS_IN_ARG = 50
MANAGED_RULES = load_resource(__name__, "resources/aws_managed_rules.json")
MANAGED_RULES_CONSTRAINTS = MANAGED_RULES["ManagedRules"]
def datetime2int(date):
return int(time.mktime(date.timetuple()))
def snake_to_camels(original, cap_start, cap_arn):
parts = original.split("_")
camel_cased = parts[0].lower() + "".join(p.title() for p in parts[1:])
if cap_arn:
camel_cased = camel_cased.replace(
"Arn", "ARN"
) # Some config services use 'ARN' instead of 'Arn'
if cap_start:
camel_cased = camel_cased[0].upper() + camel_cased[1::]
return camel_cased
def random_string():
"""Returns a random set of 8 lowercase letters for the Config Aggregator ARN"""
chars = []
for _ in range(0, 8):
chars.append(random.choice(string.ascii_lowercase))
return "".join(chars)
def validate_tag_key(tag_key, exception_param="tags.X.member.key"):
"""Validates the tag key.
:param tag_key: The tag key to check against.
:param exception_param: The exception parameter to send over to help
format the message. This is to reflect
the difference between the tag and untag APIs.
:return:
"""
# Validate that the key length is correct:
if len(tag_key) > 128:
raise TagKeyTooBig(tag_key, param=exception_param)
# Validate that the tag key fits the proper Regex:
# [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS
# documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+
match = re.findall(r"[\w\s_.:/=+\-@]+", tag_key)
# Kudos if you can come up with a better way of doing a global search :)
if not match or len(match[0]) < len(tag_key):
raise InvalidTagCharacters(tag_key, param=exception_param)
def check_tag_duplicate(all_tags, tag_key):
"""Validates that a tag key is not a duplicate
:param all_tags: Dict to check if there is a duplicate tag.
:param tag_key: The tag key to check against.
:return:
"""
if all_tags.get(tag_key):
raise DuplicateTags()
def validate_tags(tags):
proper_tags = {}
if len(tags) > MAX_TAGS_IN_ARG:
raise TooManyTags(tags)
for tag in tags:
# Validate the Key:
validate_tag_key(tag["Key"])
check_tag_duplicate(proper_tags, tag["Key"])
# Validate the Value:
if len(tag["Value"]) > 256:
raise TagValueTooBig(tag["Value"])
proper_tags[tag["Key"]] = tag["Value"]
return proper_tags
def convert_to_class_args(dict_arg):
"""Return dict that can be used to instantiate it's representative class.
Given a dictionary in the incoming API request, convert the keys to
snake case to use as arguments when instatiating the representative
class's __init__().
"""
class_args = {}
for key, value in dict_arg.items():
class_args[CAMEL_TO_SNAKE_REGEX.sub("_", key).lower()] = value
# boto detects if extra/unknown arguments are provided, so it's not
# necessary to do so here.
return class_args
class ConfigEmptyDictable(BaseModel):
"""Base class to make serialization easy.
This assumes that the sub-class will NOT return 'None's in the JSON.
"""
def __init__(self, capitalize_start=False, capitalize_arn=True):
"""Assists with the serialization of the config object
:param capitalize_start: For some Config services, the first letter
is lowercase -- for others it's capital
:param capitalize_arn: For some Config services, the API expects
'ARN' and for others, it expects 'Arn'
"""
self.capitalize_start = capitalize_start
self.capitalize_arn = capitalize_arn
def to_dict(self):
data = {}
for item, value in self.__dict__.items():
# ignore private attributes
if not item.startswith("_") and value is not None:
if isinstance(value, ConfigEmptyDictable):
data[
snake_to_camels(
item, self.capitalize_start, self.capitalize_arn
)
] = value.to_dict()
else:
data[
snake_to_camels(
item, self.capitalize_start, self.capitalize_arn
)
] = value
# Cleanse the extra properties:
for prop in POP_STRINGS:
data.pop(prop, None)
return data
class ConfigRecorderStatus(ConfigEmptyDictable):
def __init__(self, name):
super().__init__()
self.name = name
self.recording = False
self.last_start_time = None
self.last_stop_time = None
self.last_status = None
self.last_error_code = None
self.last_error_message = None
self.last_status_change_time = None
def start(self):
self.recording = True
self.last_status = "PENDING"
self.last_start_time = datetime2int(datetime.utcnow())
self.last_status_change_time = datetime2int(datetime.utcnow())
def stop(self):
self.recording = False
self.last_stop_time = datetime2int(datetime.utcnow())
self.last_status_change_time = datetime2int(datetime.utcnow())
class ConfigDeliverySnapshotProperties(ConfigEmptyDictable):
def __init__(self, delivery_frequency):
super().__init__()
self.delivery_frequency = delivery_frequency
class ConfigDeliveryChannel(ConfigEmptyDictable):
def __init__(
self, name, s3_bucket_name, prefix=None, sns_arn=None, snapshot_properties=None
):
super().__init__()
self.name = name
self.s3_bucket_name = s3_bucket_name
self.s3_key_prefix = prefix
self.sns_topic_arn = sns_arn
self.config_snapshot_delivery_properties = snapshot_properties
class RecordingGroup(ConfigEmptyDictable):
def __init__(
self,
all_supported=True,
include_global_resource_types=False,
resource_types=None,
):
super().__init__()
self.all_supported = all_supported
self.include_global_resource_types = include_global_resource_types
self.resource_types = resource_types
class ConfigRecorder(ConfigEmptyDictable):
def __init__(self, role_arn, recording_group, name="default", status=None):
super().__init__()
self.name = name
self.role_arn = role_arn
self.recording_group = recording_group
if not status:
self.status = ConfigRecorderStatus(name)
else:
self.status = status
class AccountAggregatorSource(ConfigEmptyDictable):
def __init__(self, account_ids, aws_regions=None, all_aws_regions=None):
super().__init__(capitalize_start=True)
# Can't have both the regions and all_regions flag present -- also
# can't have them both missing:
if aws_regions and all_aws_regions:
raise InvalidParameterValueException(
"Your configuration aggregator contains a list of regions "
"and also specifies the use of all regions. You must choose "
"one of these options."
)
if not (aws_regions or all_aws_regions):
raise InvalidParameterValueException(
"Your request does not specify any regions. Select AWS Config-supported "
"regions and try again."
)
self.account_ids = account_ids
self.aws_regions = aws_regions
if not all_aws_regions:
all_aws_regions = False
self.all_aws_regions = all_aws_regions
class OrganizationAggregationSource(ConfigEmptyDictable):
def __init__(self, role_arn, aws_regions=None, all_aws_regions=None):
super().__init__(capitalize_start=True, capitalize_arn=False)
# Can't have both the regions and all_regions flag present -- also
# can't have them both missing:
if aws_regions and all_aws_regions:
raise InvalidParameterValueException(
"Your configuration aggregator contains a list of regions and also specifies "
"the use of all regions. You must choose one of these options."
)
if not (aws_regions or all_aws_regions):
raise InvalidParameterValueException(
"Your request does not specify any regions. Select AWS Config-supported "
"regions and try again."
)
self.role_arn = role_arn
self.aws_regions = aws_regions
if not all_aws_regions:
all_aws_regions = False
self.all_aws_regions = all_aws_regions
class ConfigAggregator(ConfigEmptyDictable):
def __init__(self, name, region, account_sources=None, org_source=None, tags=None):
super().__init__(capitalize_start=True, capitalize_arn=False)
self.configuration_aggregator_name = name
self.configuration_aggregator_arn = "arn:aws:config:{region}:{id}:config-aggregator/config-aggregator-{random}".format(
region=region, id=DEFAULT_ACCOUNT_ID, random=random_string()
)
self.account_aggregation_sources = account_sources
self.organization_aggregation_source = org_source
self.creation_time = datetime2int(datetime.utcnow())
self.last_updated_time = datetime2int(datetime.utcnow())
# Tags are listed in the list_tags_for_resource API call.
self.tags = tags or {}
# Override the to_dict so that we can format the tags properly...
def to_dict(self):
result = super().to_dict()
# Override the account aggregation sources if present:
if self.account_aggregation_sources:
result["AccountAggregationSources"] = [
a.to_dict() for a in self.account_aggregation_sources
]
if self.tags:
result["Tags"] = [
{"Key": key, "Value": value} for key, value in self.tags.items()
]
return result
class ConfigAggregationAuthorization(ConfigEmptyDictable):
def __init__(
self, current_region, authorized_account_id, authorized_aws_region, tags=None
):
super().__init__(capitalize_start=True, capitalize_arn=False)
self.aggregation_authorization_arn = (
"arn:aws:config:{region}:{id}:aggregation-authorization/"
"{auth_account}/{auth_region}".format(
region=current_region,
id=DEFAULT_ACCOUNT_ID,
auth_account=authorized_account_id,
auth_region=authorized_aws_region,
)
)
self.authorized_account_id = authorized_account_id
self.authorized_aws_region = authorized_aws_region
self.creation_time = datetime2int(datetime.utcnow())
# Tags are listed in the list_tags_for_resource API call.
self.tags = tags or {}
class OrganizationConformancePack(ConfigEmptyDictable):
def __init__(
self,
region,
name,
delivery_s3_bucket,
delivery_s3_key_prefix=None,
input_parameters=None,
excluded_accounts=None,
):
super().__init__(capitalize_start=True, capitalize_arn=False)
self._status = "CREATE_SUCCESSFUL"
self._unique_pack_name = "{0}-{1}".format(name, random_string())
self.conformance_pack_input_parameters = input_parameters or []
self.delivery_s3_bucket = delivery_s3_bucket
self.delivery_s3_key_prefix = delivery_s3_key_prefix
self.excluded_accounts = excluded_accounts or []
self.last_update_time = datetime2int(datetime.utcnow())
self.organization_conformance_pack_arn = "arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format(
region, DEFAULT_ACCOUNT_ID, self._unique_pack_name
)
self.organization_conformance_pack_name = name
def update(
self,
delivery_s3_bucket,
delivery_s3_key_prefix,
input_parameters,
excluded_accounts,
):
self._status = "UPDATE_SUCCESSFUL"
self.conformance_pack_input_parameters = input_parameters
self.delivery_s3_bucket = delivery_s3_bucket
self.delivery_s3_key_prefix = delivery_s3_key_prefix
self.excluded_accounts = excluded_accounts
self.last_update_time = datetime2int(datetime.utcnow())
class Scope(ConfigEmptyDictable):
"""Defines resources that can trigger an evaluation for the rule.
Per boto3 documentation, Scope can be one of:
- one or more resource types,
- combo of one resource type and one resource ID,
- combo of tag key and value.
If no scope is specified, evaluations are trigged when any resource
in the recording group changes.
"""
def __init__(
self,
compliance_resource_types=None,
tag_key=None,
tag_value=None,
compliance_resource_id=None,
):
super().__init__(capitalize_start=True, capitalize_arn=False)
self.tags = None
if tag_key or tag_value:
if tag_value and not tag_key:
raise InvalidParameterValueException(
"Tag key should not be empty when tag value is provided in scope"
)
if tag_key and len(tag_key) > 128:
raise TagKeyTooBig(tag_key, "ConfigRule.Scope.TagKey")
if tag_value and len(tag_value) > 256:
raise TagValueTooBig(tag_value, "ConfigRule.Scope.TagValue")
self.tags = {tag_key: tag_value}
# Can't use more than one combo to specify scope - either tags,
# resource types, or resource id and resource type.
if self.tags and (compliance_resource_types or compliance_resource_id):
raise InvalidParameterValueException(
"Scope cannot be applied to both resource and tag"
)
if compliance_resource_id and len(compliance_resource_types) != 1:
raise InvalidParameterValueException(
"A single resourceType should be provided when resourceId "
"is provided in scope"
)
self.compliance_resource_types = compliance_resource_types
self.compliance_resource_id = compliance_resource_id
class SourceDetail(ConfigEmptyDictable):
"""Source and type of event triggering AWS Config resource evaluation.
Applies only to customer rules.
"""
MESSAGE_TYPES = {
"ConfigurationItemChangeNotification",
"ConfigurationSnapshotDeliveryCompleted",
"OversizedConfigurationItemChangeNotification",
"ScheduledNotification",
}
DEFAULT_FREQUENCY = "TwentyFour_Hours"
FREQUENCY_TYPES = {
"One_Hour",
"Six_Hours",
"Three_Hours",
"Twelve_Hours",
"TwentyFour_Hours",
}
EVENT_SOURCES = ["aws.config"]
def __init__(
self, event_source=None, message_type=None, maximum_execution_frequency=None
):
super().__init__(capitalize_start=True, capitalize_arn=False)
# If the event_source or message_type fields are not provided,
# boto3 reports: "SourceDetails should be null/empty if the owner is
# AWS. SourceDetails should be provided if the owner is CUSTOM_LAMBDA."
# A more specific message will be used here instead.
if not event_source:
raise MissingRequiredConfigRuleParameterException(
"Missing required parameter in ConfigRule.SourceDetails: "
"'EventSource'"
)
if event_source not in SourceDetail.EVENT_SOURCES:
raise ValidationException(
f"Value '{event_source}' at "
f"'configRule.source.sourceDetails.eventSource' failed "
f"to satisfy constraint: Member must satisfy enum value set: {{"
+ ", ".join((SourceDetail.EVENT_SOURCES))
+ "}"
)
if not message_type:
# boto3 doesn't have a specific error if this field is missing.
raise MissingRequiredConfigRuleParameterException(
"Missing required parameter in ConfigRule.SourceDetails: 'MessageType'"
)
if message_type not in SourceDetail.MESSAGE_TYPES:
raise ValidationException(
f"Value '{message_type}' at "
f"'configRule.source.sourceDetails.message_type' failed "
f"to satisfy constraint: Member must satisfy enum value set: {{"
+ ", ".join(sorted(SourceDetail.MESSAGE_TYPES))
+ "}"
)
if maximum_execution_frequency:
if maximum_execution_frequency not in SourceDetail.FREQUENCY_TYPES:
raise ValidationException(
f"Value '{maximum_execution_frequency}' at "
f"'configRule.source.sourceDetails.maximumExecutionFrequency' "
f"failed to satisfy constraint: "
f"Member must satisfy enum value set: {{"
+ ", ".join(sorted(SourceDetail.FREQUENCY_TYPES))
+ "}"
)
if message_type in [
"ConfigurationItemChangeNotification",
"OversizedConfigurationItemChangeNotification",
]:
raise InvalidParameterValueException(
"A maximum execution frequency is not allowed if "
"MessageType is ConfigurationItemChangeNotification or "
"OversizedConfigurationItemChangeNotification"
)
else:
# If no value is specified, use a default value for
# maximum_execution_frequency for message types representing a
# periodic trigger.
if message_type in [
"ScheduledNotification",
"ConfigurationSnapshotDeliveryCompleted",
]:
maximum_execution_frequency = SourceDetail.DEFAULT_FREQUENCY
self.event_source = event_source
self.message_type = message_type
self.maximum_execution_frequency = maximum_execution_frequency
class Source(ConfigEmptyDictable):
"""Defines rule owner, id and notification for triggering evaluation."""
OWNERS = {"AWS", "CUSTOM_LAMBDA"}
def __init__(self, region, owner, source_identifier, source_details=None):
super().__init__(capitalize_start=True, capitalize_arn=False)
if owner not in Source.OWNERS:
raise ValidationException(
f"Value '{owner}' at 'configRule.source.owner' failed to "
f"satisfy constraint: Member must satisfy enum value set: {{"
+ ", ".join(sorted(Source.OWNERS))
+ "}"
)
if owner == "AWS":
# Can the Source ID be found in the dict of managed rule IDs?
if source_identifier not in MANAGED_RULES_CONSTRAINTS:
raise InvalidParameterValueException(
f"The sourceIdentifier {source_identifier} is invalid. "
f"Please refer to the documentation for a list of valid "
f"sourceIdentifiers that can be used when AWS is the Owner"
)
if source_details:
raise InvalidParameterValueException(
"SourceDetails should be null/empty if the owner is AWS. "
"SourceDetails should be provided if the owner is "
"CUSTOM_LAMBDA"
)
self.owner = owner
self.source_identifier = source_identifier
self.source_details = None
return
# Otherwise, owner == "CUSTOM_LAMBDA"
if not source_details:
raise InvalidParameterValueException(
"SourceDetails should be null/empty if the owner is AWS. "
"SourceDetails should be provided if the owner is CUSTOM_LAMBDA"
)
# Import is slow and as it's not needed for all config service
# operations, only load it if needed.
from moto.awslambda import lambda_backends
lambda_func = lambda_backends[region].get_function(source_identifier)
if not lambda_func:
raise InsufficientPermissionsException(
f"The AWS Lambda function {source_identifier} cannot be "
f"invoked. Check the specified function ARN, and check the "
f"function's permissions"
)
details = []
for detail in source_details:
detail_dict = convert_to_class_args(detail)
details.append(SourceDetail(**detail_dict))
self.source_details = details
self.owner = owner
self.source_identifier = source_identifier
def to_dict(self):
"""Format the SourceDetails properly."""
result = super().to_dict()
if self.source_details:
result["SourceDetails"] = [x.to_dict() for x in self.source_details]
return result
class ConfigRule(ConfigEmptyDictable):
"""AWS Config Rule to evaluate compliance of resources to configuration.
Can be a managed or custom config rule. Contains the instantiations of
the Source and SourceDetail classes, and optionally the Scope class.
"""
MAX_RULES = 150
RULE_STATES = {"ACTIVE", "DELETING", "DELETING_RESULTS", "EVALUATING"}
def __init__(self, region, config_rule, tags):
super().__init__(capitalize_start=True, capitalize_arn=False)
self.config_rule_name = config_rule.get("ConfigRuleName")
if config_rule.get("ConfigRuleArn") or config_rule.get("ConfigRuleId"):
raise InvalidParameterValueException(
"ConfigRule Arn and Id can not be specified when creating a "
"new ConfigRule. ConfigRule Arn and Id are generated by the "
"service. Please try the request again without specifying "
"ConfigRule Arn or Id"
)
self.maximum_execution_frequency = None # keeps pylint happy
self.modify_fields(region, config_rule, tags)
self.config_rule_id = f"config-rule-{random_string():.6}"
self.config_rule_arn = f"arn:aws:config:{region}:{DEFAULT_ACCOUNT_ID}:config-rule/{self.config_rule_id}"
def modify_fields(self, region, config_rule, tags):
"""Initialize or update ConfigRule fields."""
self.config_rule_state = config_rule.get("ConfigRuleState", "ACTIVE")
if self.config_rule_state not in ConfigRule.RULE_STATES:
raise ValidationException(
f"Value '{self.config_rule_state}' at "
f"'configRule.configRuleState' failed to satisfy constraint: "
f"Member must satisfy enum value set: {{"
+ ", ".join(sorted(ConfigRule.RULE_STATES))
+ "}"
)
if self.config_rule_state != "ACTIVE":
raise InvalidParameterValueException(
f"The ConfigRuleState {self.config_rule_state} is invalid. "
f"Only the following values are permitted: ACTIVE"
)
self.description = config_rule.get("Description")
self.scope = None
if "Scope" in config_rule:
scope_dict = convert_to_class_args(config_rule["Scope"])
self.scope = Scope(**scope_dict)
source_dict = convert_to_class_args(config_rule["Source"])
self.source = Source(region, **source_dict)
self.input_parameters = config_rule.get("InputParameters")
self.input_parameters_dict = {}
if self.input_parameters:
try:
# A dictionary will be more useful when these parameters
# are actually needed.
self.input_parameters_dict = json.loads(self.input_parameters)
except ValueError:
raise InvalidParameterValueException( # pylint: disable=raise-missing-from
f"Invalid json {self.input_parameters} passed in the "
f"InputParameters field"
)
self.maximum_execution_frequency = config_rule.get("MaximumExecutionFrequency")
if self.maximum_execution_frequency:
if self.maximum_execution_frequency not in SourceDetail.FREQUENCY_TYPES:
raise ValidationException(
f"Value '{self.maximum_execution_frequency}' at "
f"'configRule.maximumExecutionFrequency' failed to "
f"satisfy constraint: Member must satisfy enum value set: {{"
+ ", ".join(sorted(SourceDetail.FREQUENCY_TYPES))
+ "}"
)
# For an AWS managed rule, validate the parameters and trigger type.
# Verify the MaximumExecutionFrequency makes sense as well.
if self.source.owner == "AWS":
self.validate_managed_rule()
else:
# Per the AWS documentation for a custom rule, ConfigRule's
# MaximumExecutionFrequency can only be set if the message type
# is ConfigSnapshotDeliveryProperties. However, if
# ConfigSnapshotDeliveryProperties is used, the AWS console
# leaves the Trigger Type blank and doesn't show the frequency.
# If you edit the rule, it doesn't show the frequency either.
#
# If you provide two custom rules, one with a message type of
# ConfigurationSnapshotDeliveryCompleted, one with
# ScheduleNotification and specify a MaximumExecutionFrequency
# for each, the first one is shown on the AWS console and the
# second frequency is shown on the edit page.
#
# If you provide a custom rule for
# OversizedConfigurationItemChangeNotification (not a periodic
# trigger) with a MaximumExecutionFrequency for ConfigRule itself,
# boto3 doesn't complain and describe_config_rule() shows the
# frequency, but the AWS console and the edit page do not.
#
# So I'm not sure how to validate this situation or when to
# set this value to a default value.
pass
self.created_by = config_rule.get("CreatedBy")
if self.created_by:
raise InvalidParameterValueException(
"AWS Config populates the CreatedBy field for "
"ServiceLinkedConfigRule. Try again without populating the "
"CreatedBy field"
)
self.last_updated_time = datetime2int(datetime.utcnow())
self.tags = tags
def validate_managed_rule(self):
"""Validate parameters specific to managed rules."""
rule_info = MANAGED_RULES_CONSTRAINTS[self.source.source_identifier]
param_names = self.input_parameters_dict.keys()
# Verify input parameter names are actual parameters for the rule ID.
if param_names:
allowed_names = {x["Name"] for x in rule_info["Parameters"]}
if not set(param_names).issubset(allowed_names):
raise InvalidParameterValueException(
"Unknown parameters provided in the inputParameters: "
+ self.input_parameters
)
# Verify all the required parameters are specified.
required_names = {
x["Name"] for x in rule_info["Parameters"] if not x["Optional"]
}
diffs = required_names.difference(set(param_names))
if diffs:
raise InvalidParameterValueException(
"The required parameter ["
+ ", ".join(sorted(diffs))
+ "] is not present in the inputParameters"
)
# boto3 doesn't appear to be checking for valid types in the
# InputParameters. It did give an error if a unquoted number was
# used: "Blank spaces are not acceptable for input parameter:
# MinimumPasswordLength. InputParameters':
# '{"RequireNumbers":"true","MinimumPasswordLength":10}'
# but I'm not going to attempt to detect that error. I could
# check for ints, floats, strings and stringmaps, but boto3 doesn't
# check.
# WARNING: The AWS documentation indicates MaximumExecutionFrequency
# can be specified for managed rules triggered at a periodic frequency.
# However, boto3 allows a MaximumExecutionFrequency to be specified
# for a AWS managed rule regardless of the frequency type. Also of
# interest: triggers of "Configuration Changes and Periodic",
# i.e., both trigger types. But again, the trigger type is ignored.
# if rule_info["Trigger type"] == "Configuration changes":
# if self.maximum_execution_frequency:
# raise InvalidParameterValueException(
# "A maximum execution frequency is not allowed for "
# "rules triggered by configuration changes"
# )
#
# WARNING: boto3's describe_config_rule is not showing the
# MaximumExecutionFrequency value as being updated, but the AWS
# console shows the default value on the console. The default value
# is used even if the rule is non-periodic
# if "Periodic" in rule_info["Trigger type"]:
# if not self.maximum_execution_frequency:
# self.maximum_execution_frequency = SourceDetail.DEFAULT_FREQUENCY
# if not self.maximum_execution_frequency:
# self.maximum_execution_frequency = SourceDetail.DEFAULT_FREQUENCY
# Verify the rule is allowed for this region -- not yet implemented.
class ConfigBackend(BaseBackend):
def __init__(self, region=None):
self.recorders = {}
self.delivery_channels = {}
self.config_aggregators = {}
self.aggregation_authorizations = {}
self.organization_conformance_packs = {}
self.config_rules = {}
self.config_schema = None
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""List of dicts representing default VPC endpoints for this service."""
return BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "config"
)
def _validate_resource_types(self, resource_list):
if not self.config_schema:
self.config_schema = AWSServiceSpec(
path="data/config/2014-11-12/service-2.json"
)
# Verify that each entry exists in the supported list:
bad_list = []
for resource in resource_list:
if resource not in self.config_schema.shapes["ResourceType"]["enum"]:
bad_list.append(resource)
if bad_list:
raise InvalidResourceTypeException(
bad_list, self.config_schema.shapes["ResourceType"]["enum"]
)
def _validate_delivery_snapshot_properties(self, properties):
if not self.config_schema:
self.config_schema = AWSServiceSpec(
path="data/config/2014-11-12/service-2.json"
)
# Verify that the deliveryFrequency is set to an acceptable value:
if (
properties.get("deliveryFrequency", None)
not in self.config_schema.shapes["MaximumExecutionFrequency"]["enum"]
):
raise InvalidDeliveryFrequency(
properties.get("deliveryFrequency", None),
self.config_schema.shapes["MaximumExecutionFrequency"]["enum"],
)
def put_configuration_aggregator(self, config_aggregator, region):
# Validate the name:
if len(config_aggregator["ConfigurationAggregatorName"]) > 256:
raise NameTooLongException(
config_aggregator["ConfigurationAggregatorName"],
"configurationAggregatorName",
)
account_sources = None
org_source = None
# Tag validation:
tags = validate_tags(config_aggregator.get("Tags", []))
# Exception if both AccountAggregationSources and
# OrganizationAggregationSource are supplied:
if config_aggregator.get("AccountAggregationSources") and config_aggregator.get(
"OrganizationAggregationSource"
):
raise InvalidParameterValueException(
"The configuration aggregator cannot be created because your "
"request contains both the AccountAggregationSource and the "
"OrganizationAggregationSource. Include only one aggregation "
"source and try again."
)
# If neither are supplied:
if not config_aggregator.get(
"AccountAggregationSources"
) and not config_aggregator.get("OrganizationAggregationSource"):
raise InvalidParameterValueException(
"The configuration aggregator cannot be created because your "
"request is missing either the AccountAggregationSource or "
"the OrganizationAggregationSource. Include the "
"appropriate aggregation source and try again."
)
if config_aggregator.get("AccountAggregationSources"):
# Currently, only 1 account aggregation source can be set:
if len(config_aggregator["AccountAggregationSources"]) > 1:
raise TooManyAccountSources(
len(config_aggregator["AccountAggregationSources"])
)
account_sources = []
for source in config_aggregator["AccountAggregationSources"]:
account_sources.append(
AccountAggregatorSource(
source["AccountIds"],
aws_regions=source.get("AwsRegions"),
all_aws_regions=source.get("AllAwsRegions"),
)
)
else:
org_source = OrganizationAggregationSource(
config_aggregator["OrganizationAggregationSource"]["RoleArn"],
aws_regions=config_aggregator["OrganizationAggregationSource"].get(
"AwsRegions"
),
all_aws_regions=config_aggregator["OrganizationAggregationSource"].get(
"AllAwsRegions"
),
)
# Grab the existing one if it exists and update it:
if not self.config_aggregators.get(
config_aggregator["ConfigurationAggregatorName"]
):
aggregator = ConfigAggregator(
config_aggregator["ConfigurationAggregatorName"],
region,
account_sources=account_sources,
org_source=org_source,
tags=tags,
)
self.config_aggregators[
config_aggregator["ConfigurationAggregatorName"]
] = aggregator
else:
aggregator = self.config_aggregators[
config_aggregator["ConfigurationAggregatorName"]
]
aggregator.tags = tags
aggregator.account_aggregation_sources = account_sources
aggregator.organization_aggregation_source = org_source
aggregator.last_updated_time = datetime2int(datetime.utcnow())
return aggregator.to_dict()
def describe_configuration_aggregators(self, names, token, limit):
limit = DEFAULT_PAGE_SIZE if not limit or limit < 0 else limit
agg_list = []
result = {"ConfigurationAggregators": []}
if names:
for name in names:
if not self.config_aggregators.get(name):
raise NoSuchConfigurationAggregatorException(number=len(names))
agg_list.append(name)
else:
agg_list = list(self.config_aggregators.keys())
# Empty?
if not agg_list:
return result
# Sort by name:
sorted_aggregators = sorted(agg_list)
# Get the start:
if not token:
start = 0
else:
# Tokens for this moto feature are just the next names of the items in the list:
if not self.config_aggregators.get(token):
raise InvalidNextTokenException()
start = sorted_aggregators.index(token)
# Get the list of items to collect:
agg_list = sorted_aggregators[start : (start + limit)]
result["ConfigurationAggregators"] = [
self.config_aggregators[agg].to_dict() for agg in agg_list
]
if len(sorted_aggregators) > (start + limit):
result["NextToken"] = sorted_aggregators[start + limit]
return result
def delete_configuration_aggregator(self, config_aggregator):
if not self.config_aggregators.get(config_aggregator):
raise NoSuchConfigurationAggregatorException()
del self.config_aggregators[config_aggregator]
def put_aggregation_authorization(
self, current_region, authorized_account, authorized_region, tags
):
# Tag validation:
tags = validate_tags(tags or [])
# Does this already exist?
key = "{}/{}".format(authorized_account, authorized_region)
agg_auth = self.aggregation_authorizations.get(key)
if not agg_auth:
agg_auth = ConfigAggregationAuthorization(
current_region, authorized_account, authorized_region, tags=tags
)
self.aggregation_authorizations[
"{}/{}".format(authorized_account, authorized_region)
] = agg_auth
else:
# Only update the tags:
agg_auth.tags = tags
return agg_auth.to_dict()
def describe_aggregation_authorizations(self, token, limit):
limit = DEFAULT_PAGE_SIZE if not limit or limit < 0 else limit
result = {"AggregationAuthorizations": []}
if not self.aggregation_authorizations:
return result
# Sort by name:
sorted_authorizations = sorted(self.aggregation_authorizations.keys())
# Get the start:
if not token:
start = 0
else:
# Tokens for this moto feature are just the next names of the items in the list:
if not self.aggregation_authorizations.get(token):
raise InvalidNextTokenException()
start = sorted_authorizations.index(token)
# Get the list of items to collect:
auth_list = sorted_authorizations[start : (start + limit)]
result["AggregationAuthorizations"] = [
self.aggregation_authorizations[auth].to_dict() for auth in auth_list
]
if len(sorted_authorizations) > (start + limit):
result["NextToken"] = sorted_authorizations[start + limit]
return result
def delete_aggregation_authorization(self, authorized_account, authorized_region):
# This will always return a 200 -- regardless if there is or isn't an existing
# aggregation authorization.
key = "{}/{}".format(authorized_account, authorized_region)
self.aggregation_authorizations.pop(key, None)
def put_configuration_recorder(self, config_recorder):
# Validate the name:
if not config_recorder.get("name"):
raise InvalidConfigurationRecorderNameException(config_recorder.get("name"))
if len(config_recorder.get("name")) > 256:
raise NameTooLongException(
config_recorder.get("name"), "configurationRecorder.name"
)
# We're going to assume that the passed in Role ARN is correct.
# Config currently only allows 1 configuration recorder for an account:
if len(self.recorders) == 1 and not self.recorders.get(config_recorder["name"]):
raise MaxNumberOfConfigurationRecordersExceededException(
config_recorder["name"]
)
# Is this updating an existing one?
recorder_status = None
if self.recorders.get(config_recorder["name"]):
recorder_status = self.recorders[config_recorder["name"]].status
# Validate the Recording Group:
if config_recorder.get("recordingGroup") is None:
recording_group = RecordingGroup()
else:
rgroup = config_recorder["recordingGroup"]
# If an empty dict is passed in, then bad:
if not rgroup:
raise InvalidRecordingGroupException()
# Can't have both the resource types specified and the other flags as True.
if rgroup.get("resourceTypes") and (
rgroup.get("allSupported", False)
or rgroup.get("includeGlobalResourceTypes", False)
):
raise InvalidRecordingGroupException()
# Must supply resourceTypes if 'allSupported' is not supplied:
if not rgroup.get("allSupported") and not rgroup.get("resourceTypes"):
raise InvalidRecordingGroupException()
# Validate that the list provided is correct:
self._validate_resource_types(rgroup.get("resourceTypes", []))
recording_group = RecordingGroup(
all_supported=rgroup.get("allSupported", True),
include_global_resource_types=rgroup.get(
"includeGlobalResourceTypes", False
),
resource_types=rgroup.get("resourceTypes", []),
)
self.recorders[config_recorder["name"]] = ConfigRecorder(
config_recorder["roleARN"],
recording_group,
name=config_recorder["name"],
status=recorder_status,
)
def describe_configuration_recorders(self, recorder_names):
recorders = []
if recorder_names:
for rname in recorder_names:
if not self.recorders.get(rname):
raise NoSuchConfigurationRecorderException(rname)
# Format the recorder:
recorders.append(self.recorders[rname].to_dict())
else:
for recorder in self.recorders.values():
recorders.append(recorder.to_dict())
return recorders
def describe_configuration_recorder_status(self, recorder_names):
recorders = []
if recorder_names:
for rname in recorder_names:
if not self.recorders.get(rname):
raise NoSuchConfigurationRecorderException(rname)
# Format the recorder:
recorders.append(self.recorders[rname].status.to_dict())
else:
for recorder in self.recorders.values():
recorders.append(recorder.status.to_dict())
return recorders
def put_delivery_channel(self, delivery_channel):
# Must have a configuration recorder:
if not self.recorders:
raise NoAvailableConfigurationRecorderException()
# Validate the name:
if not delivery_channel.get("name"):
raise InvalidDeliveryChannelNameException(delivery_channel.get("name"))
if len(delivery_channel.get("name")) > 256:
raise NameTooLongException(
delivery_channel.get("name"), "deliveryChannel.name"
)
# We are going to assume that the bucket exists -- but will verify if
# the bucket provided is blank:
if not delivery_channel.get("s3BucketName"):
raise NoSuchBucketException()
# We are going to assume that the bucket has the correct policy
# attached to it. We are only going to verify
# if the prefix provided is not an empty string:
if delivery_channel.get("s3KeyPrefix", None) == "":
raise InvalidS3KeyPrefixException()
# Ditto for SNS -- Only going to assume that the ARN provided is not
# an empty string:
if delivery_channel.get("snsTopicARN", None) == "":
raise InvalidSNSTopicARNException()
# Config currently only allows 1 delivery channel for an account:
if len(self.delivery_channels) == 1 and not self.delivery_channels.get(
delivery_channel["name"]
):
raise MaxNumberOfDeliveryChannelsExceededException(delivery_channel["name"])
if not delivery_channel.get("configSnapshotDeliveryProperties"):
dprop = None
else:
# Validate the config snapshot delivery properties:
self._validate_delivery_snapshot_properties(
delivery_channel["configSnapshotDeliveryProperties"]
)
dprop = ConfigDeliverySnapshotProperties(
delivery_channel["configSnapshotDeliveryProperties"][
"deliveryFrequency"
]
)
self.delivery_channels[delivery_channel["name"]] = ConfigDeliveryChannel(
delivery_channel["name"],
delivery_channel["s3BucketName"],
prefix=delivery_channel.get("s3KeyPrefix", None),
sns_arn=delivery_channel.get("snsTopicARN", None),
snapshot_properties=dprop,
)
def describe_delivery_channels(self, channel_names):
channels = []
if channel_names:
for cname in channel_names:
if not self.delivery_channels.get(cname):
raise NoSuchDeliveryChannelException(cname)
# Format the delivery channel:
channels.append(self.delivery_channels[cname].to_dict())
else:
for channel in self.delivery_channels.values():
channels.append(channel.to_dict())
return channels
def start_configuration_recorder(self, recorder_name):
if not self.recorders.get(recorder_name):
raise NoSuchConfigurationRecorderException(recorder_name)
# Must have a delivery channel available as well:
if not self.delivery_channels:
raise NoAvailableDeliveryChannelException()
# Start recording:
self.recorders[recorder_name].status.start()
def stop_configuration_recorder(self, recorder_name):
if not self.recorders.get(recorder_name):
raise NoSuchConfigurationRecorderException(recorder_name)
# Stop recording:
self.recorders[recorder_name].status.stop()
def delete_configuration_recorder(self, recorder_name):
if not self.recorders.get(recorder_name):
raise NoSuchConfigurationRecorderException(recorder_name)
del self.recorders[recorder_name]
def delete_delivery_channel(self, channel_name):
if not self.delivery_channels.get(channel_name):
raise NoSuchDeliveryChannelException(channel_name)
# Check if a channel is recording -- if so, bad -- (there can only be 1 recorder):
for recorder in self.recorders.values():
if recorder.status.recording:
raise LastDeliveryChannelDeleteFailedException(channel_name)
del self.delivery_channels[channel_name]
def list_discovered_resources(
self,
resource_type,
backend_region,
resource_ids,
resource_name,
limit,
next_token,
):
"""Queries against AWS Config (non-aggregated) listing function.
The listing function must exist for the resource backend.
:param resource_type:
:param backend_region:
:param ids:
:param name:
:param limit:
:param next_token:
:return:
"""
identifiers = []
new_token = None
limit = limit or DEFAULT_PAGE_SIZE
if limit > DEFAULT_PAGE_SIZE:
raise InvalidLimitException(limit)
if resource_ids and resource_name:
raise InvalidResourceParameters()
# Only 20 maximum Resource IDs:
if resource_ids and len(resource_ids) > 20:
raise TooManyResourceIds()
# If resource type exists and the backend region is implemented in
# moto, then call upon the resource type's Config Query class to
# retrieve the list of resources that match the criteria:
if RESOURCE_MAP.get(resource_type, {}):
# Is this a global resource type? -- if so, re-write the region to 'global':
backend_query_region = (
backend_region # Always provide the backend this request arrived from.
)
if RESOURCE_MAP[resource_type].backends.get("global"):
backend_region = "global"
# For non-aggregated queries, the we only care about the
# backend_region. Need to verify that moto has implemented
# the region for the given backend:
if RESOURCE_MAP[resource_type].backends.get(backend_region):
# Fetch the resources for the backend's region:
identifiers, new_token = RESOURCE_MAP[
resource_type
].list_config_service_resources(
resource_ids,
resource_name,
limit,
next_token,
backend_region=backend_query_region,
)
resource_identifiers = []
for identifier in identifiers:
item = {"resourceType": identifier["type"], "resourceId": identifier["id"]}
# Some resource types lack names:
if identifier.get("name"):
item["resourceName"] = identifier["name"]
resource_identifiers.append(item)
result = {"resourceIdentifiers": resource_identifiers}
if new_token:
result["nextToken"] = new_token
return result
def list_aggregate_discovered_resources(
self, aggregator_name, resource_type, filters, limit, next_token
):
"""Queries AWS Config listing function that must exist for resource backend.
As far a moto goes -- the only real difference between this function
and the `list_discovered_resources` function is that this will require
a Config Aggregator be set up a priori and can search based on resource
regions.
:param aggregator_name:
:param resource_type:
:param filters:
:param limit:
:param next_token:
:return:
"""
if not self.config_aggregators.get(aggregator_name):
raise NoSuchConfigurationAggregatorException()
identifiers = []
new_token = None
filters = filters or {}
limit = limit or DEFAULT_PAGE_SIZE
if limit > DEFAULT_PAGE_SIZE:
raise InvalidLimitException(limit)
# If the resource type exists and the backend region is implemented
# in moto, then call upon the resource type's Config Query class to
# retrieve the list of resources that match the criteria:
if RESOURCE_MAP.get(resource_type, {}):
# We only care about a filter's Region, Resource Name, and Resource ID:
resource_region = filters.get("Region")
resource_id = [filters["ResourceId"]] if filters.get("ResourceId") else None
resource_name = filters.get("ResourceName")
identifiers, new_token = RESOURCE_MAP[
resource_type
].list_config_service_resources(
resource_id,
resource_name,
limit,
next_token,
resource_region=resource_region,
aggregator=self.config_aggregators.get(aggregator_name).__dict__,
)
resource_identifiers = []
for identifier in identifiers:
item = {
"SourceAccountId": DEFAULT_ACCOUNT_ID,
"SourceRegion": identifier["region"],
"ResourceType": identifier["type"],
"ResourceId": identifier["id"],
}
if identifier.get("name"):
item["ResourceName"] = identifier["name"]
resource_identifiers.append(item)
result = {"ResourceIdentifiers": resource_identifiers}
if new_token:
result["NextToken"] = new_token
return result
def get_resource_config_history(self, resource_type, resource_id, backend_region):
"""Returns configuration of resource for the current regional backend.
Item returned in AWS Config format.
NOTE: This is --NOT-- returning history as it is not supported in
moto at this time. (PR's welcome!)
As such, the later_time, earlier_time, limit, and next_token are
ignored as this will only return 1 item. (If no items, it raises an
exception).
"""
# If the type isn't implemented then we won't find the item:
if resource_type not in RESOURCE_MAP:
raise ResourceNotDiscoveredException(resource_type, resource_id)
# Is the resource type global?
backend_query_region = (
backend_region # Always provide the backend this request arrived from.
)
if RESOURCE_MAP[resource_type].backends.get("global"):
backend_region = "global"
# If the backend region isn't implemented then we won't find the item:
if not RESOURCE_MAP[resource_type].backends.get(backend_region):
raise ResourceNotDiscoveredException(resource_type, resource_id)
# Get the item:
item = RESOURCE_MAP[resource_type].get_config_resource(
resource_id, backend_region=backend_query_region
)
if not item:
raise ResourceNotDiscoveredException(resource_type, resource_id)
item["accountId"] = DEFAULT_ACCOUNT_ID
return {"configurationItems": [item]}
def batch_get_resource_config(self, resource_keys, backend_region):
"""Returns configuration of resource for the current regional backend.
Item is returned in AWS Config format.
:param resource_keys:
:param backend_region:
"""
# Can't have more than 100 items
if len(resource_keys) > 100:
raise TooManyResourceKeys(
["com.amazonaws.starling.dove.ResourceKey@12345"] * len(resource_keys)
)
results = []
for resource in resource_keys:
# Does the resource type exist?
if not RESOURCE_MAP.get(resource["resourceType"]):
# Not found so skip.
continue
# Is the resource type global?
config_backend_region = backend_region
backend_query_region = (
backend_region # Always provide the backend this request arrived from.
)
if RESOURCE_MAP[resource["resourceType"]].backends.get("global"):
config_backend_region = "global"
# If the backend region isn't implemented then we won't find the item:
if not RESOURCE_MAP[resource["resourceType"]].backends.get(
config_backend_region
):
continue
# Get the item:
item = RESOURCE_MAP[resource["resourceType"]].get_config_resource(
resource["resourceId"], backend_region=backend_query_region
)
if not item:
continue
item["accountId"] = DEFAULT_ACCOUNT_ID
results.append(item)
return {
"baseConfigurationItems": results,
"unprocessedResourceKeys": [],
} # At this time, moto is not adding unprocessed items.
def batch_get_aggregate_resource_config(
self, aggregator_name, resource_identifiers
):
"""Returns configuration of resource for current regional backend.
Item is returned in AWS Config format.
As far a moto goes -- the only real difference between this function
and the `batch_get_resource_config` function is that this will require
a Config Aggregator be set up a priori and can search based on resource
regions.
Note: moto will IGNORE the resource account ID in the search query.
"""
if not self.config_aggregators.get(aggregator_name):
raise NoSuchConfigurationAggregatorException()
# Can't have more than 100 items
if len(resource_identifiers) > 100:
raise TooManyResourceKeys(
["com.amazonaws.starling.dove.AggregateResourceIdentifier@12345"]
* len(resource_identifiers)
)
found = []
not_found = []
for identifier in resource_identifiers:
resource_type = identifier["ResourceType"]
resource_region = identifier["SourceRegion"]
resource_id = identifier["ResourceId"]
resource_name = identifier.get("ResourceName", None)
# Does the resource type exist?
if not RESOURCE_MAP.get(resource_type):
not_found.append(identifier)
continue
# Get the item:
item = RESOURCE_MAP[resource_type].get_config_resource(
resource_id,
resource_name=resource_name,
resource_region=resource_region,
)
if not item:
not_found.append(identifier)
continue
item["accountId"] = DEFAULT_ACCOUNT_ID
# The 'tags' field is not included in aggregate results for some reason...
item.pop("tags", None)
found.append(item)
return {
"BaseConfigurationItems": found,
"UnprocessedResourceIdentifiers": not_found,
}
def put_evaluations(self, evaluations=None, result_token=None, test_mode=False):
if not evaluations:
raise InvalidParameterValueException(
"The Evaluations object in your request cannot be null."
"Add the required parameters and try again."
)
if not result_token:
raise InvalidResultTokenException()
# Moto only supports PutEvaluations with test mode currently
# (missing rule and token support).
if not test_mode:
raise NotImplementedError(
"PutEvaluations without TestMode is not yet implemented"
)
return {
"FailedEvaluations": [],
} # At this time, moto is not adding failed evaluations.
def put_organization_conformance_pack(
self,
region,
name,
template_s3_uri,
template_body,
delivery_s3_bucket,
delivery_s3_key_prefix,
input_parameters,
excluded_accounts,
):
# a real validation of the content of the template is missing at the moment
if not template_s3_uri and not template_body:
raise ValidationException("Template body is invalid")
if not re.match(r"s3://.*", template_s3_uri):
raise ValidationException(
"1 validation error detected: "
"Value '{}' at 'templateS3Uri' failed to satisfy constraint: "
"Member must satisfy regular expression pattern: "
"s3://.*".format(template_s3_uri)
)
pack = self.organization_conformance_packs.get(name)
if pack:
pack.update(
delivery_s3_bucket=delivery_s3_bucket,
delivery_s3_key_prefix=delivery_s3_key_prefix,
input_parameters=input_parameters,
excluded_accounts=excluded_accounts,
)
else:
pack = OrganizationConformancePack(
region=region,
name=name,
delivery_s3_bucket=delivery_s3_bucket,
delivery_s3_key_prefix=delivery_s3_key_prefix,
input_parameters=input_parameters,
excluded_accounts=excluded_accounts,
)
self.organization_conformance_packs[name] = pack
return {
"OrganizationConformancePackArn": pack.organization_conformance_pack_arn
}
def describe_organization_conformance_packs(self, names):
packs = []
for name in names:
pack = self.organization_conformance_packs.get(name)
if not pack:
raise NoSuchOrganizationConformancePackException(
"One or more organization conformance packs with "
"specified names are not present. Ensure your names are "
"correct and try your request again later."
)
packs.append(pack.to_dict())
return {"OrganizationConformancePacks": packs}
def describe_organization_conformance_pack_statuses(self, names):
packs = []
statuses = []
if names:
for name in names:
pack = self.organization_conformance_packs.get(name)
if not pack:
raise NoSuchOrganizationConformancePackException(
"One or more organization conformance packs with "
"specified names are not present. Ensure your names "
"are correct and try your request again later."
)
packs.append(pack)
else:
packs = list(self.organization_conformance_packs.values())
for pack in packs:
statuses.append(
{
"OrganizationConformancePackName": pack.organization_conformance_pack_name,
"Status": pack._status,
"LastUpdateTime": pack.last_update_time,
}
)
return {"OrganizationConformancePackStatuses": statuses}
def get_organization_conformance_pack_detailed_status(self, name):
pack = self.organization_conformance_packs.get(name)
if not pack:
raise NoSuchOrganizationConformancePackException(
"One or more organization conformance packs with specified names are not present. "
"Ensure your names are correct and try your request again later."
)
# actually here would be a list of all accounts in the organization
statuses = [
{
"AccountId": DEFAULT_ACCOUNT_ID,
"ConformancePackName": "OrgConformsPack-{0}".format(
pack._unique_pack_name
),
"Status": pack._status,
"LastUpdateTime": datetime2int(datetime.utcnow()),
}
]
return {"OrganizationConformancePackDetailedStatuses": statuses}
def delete_organization_conformance_pack(self, name):
pack = self.organization_conformance_packs.get(name)
if not pack:
raise NoSuchOrganizationConformancePackException(
"Could not find an OrganizationConformancePack for given "
"request with resourceName {}".format(name)
)
self.organization_conformance_packs.pop(name)
def _match_arn(self, resource_arn):
"""Return config instance that has a matching ARN."""
# The allowed resources are ConfigRule, ConfigurationAggregator,
# and AggregatorAuthorization.
allowed_resources = [
{
"configs": self.config_aggregators,
"arn_attribute": "configuration_aggregator_arn",
},
{
"configs": self.aggregation_authorizations,
"arn_attribute": "aggregation_authorization_arn",
},
{"configs": self.config_rules, "arn_attribute": "config_rule_arn"},
]
# Find matching config for given resource_arn among all the
# allowed config resources.
matched_config = None
for resource in allowed_resources:
for config in resource["configs"].values():
if resource_arn == getattr(config, resource["arn_attribute"]):
matched_config = config
break
if not matched_config:
raise ResourceNotFoundException(resource_arn)
return matched_config
def tag_resource(self, resource_arn, tags):
"""Add tags in config with a matching ARN."""
# Tag validation:
tags = validate_tags(tags)
# Find config with a matching ARN.
matched_config = self._match_arn(resource_arn)
# Merge the new tags with the existing tags.
matched_config.tags.update(tags)
def untag_resource(self, resource_arn, tag_keys):
"""Remove tags in config with a matching ARN.
If the tags in the tag_keys don't match any keys for that
ARN, they're just ignored.
"""
if len(tag_keys) > MAX_TAGS_IN_ARG:
raise TooManyTags(tag_keys)
# Find config with a matching ARN.
matched_config = self._match_arn(resource_arn)
for tag_key in tag_keys:
matched_config.tags.pop(tag_key, None)
def list_tags_for_resource(
self, resource_arn, limit, next_token
): # pylint: disable=unused-argument
"""Return list of tags for AWS Config resource."""
# The limit argument is essentially ignored as a config instance
# can only have 50 tags, but we'll check the argument anyway.
# Although the boto3 documentation indicates the limit is 50, boto3
# accepts a limit value up to 100 as does the AWS CLI.
limit = limit or DEFAULT_PAGE_SIZE
if limit > DEFAULT_PAGE_SIZE:
raise InvalidLimitException(limit)
matched_config = self._match_arn(resource_arn)
return {
"Tags": [
{"Key": k, "Value": v} for k, v in sorted(matched_config.tags.items())
]
}
def put_config_rule(self, region, config_rule, tags=None):
"""Add/Update config rule for evaluating resource compliance.
TBD - Only the "accounting" of config rules are handled at the
moment. No events are created or triggered. There is no
interaction with the config recorder.
"""
# If there is no rule_name, use the ARN or ID to get the
# rule_name.
rule_name = config_rule.get("ConfigRuleName")
if rule_name:
if len(rule_name) > 128:
raise NameTooLongException(rule_name, "configRule.configRuleName", 128)
else:
# Can we find the rule using the ARN or ID?
rule_arn = config_rule.get("ConfigRuleArn")
rule_id = config_rule.get("ConfigRuleId")
if not rule_arn and not rule_id:
raise InvalidParameterValueException(
"One or more identifiers needs to be provided. Provide "
"Name or Id or Arn"
)
for config_rule_obj in self.config_rules.values():
if rule_id and config_rule_obj.config_rule_id == rule_id:
rule_name = config_rule_obj.config_rule_name
break
if rule_arn and config_rule_obj.config_rule_arn == rule_arn:
rule_name = config_rule_obj.config_rule_name
break
else:
raise InvalidParameterValueException(
"One or more identifiers needs to be provided. Provide "
"Name or Id or Arn"
)
tags = validate_tags(tags or [])
# With the rule_name, determine whether it's for an existing rule
# or whether a new rule should be created.
rule = self.config_rules.get(rule_name)
if rule:
# Rule exists. Make sure it isn't in use for another activity.
rule_state = rule.config_rule_state
if rule_state != "ACTIVE":
activity = "deleted" if rule_state.startswith("DELET") else "evaluated"
raise ResourceInUseException(
f"The rule {rule_name} is currently being {activity}. "
f"Please retry after some time"
)
# Update the current rule.
rule.modify_fields(region, config_rule, tags)
else:
# Create a new ConfigRule if the limit hasn't been reached.
if len(self.config_rules) == ConfigRule.MAX_RULES:
raise MaxNumberOfConfigRulesExceededException(
rule_name, ConfigRule.MAX_RULES
)
rule = ConfigRule(region, config_rule, tags)
self.config_rules[rule_name] = rule
return ""
def describe_config_rules(self, config_rule_names, next_token):
"""Return details for the given ConfigRule names or for all rules."""
result = {"ConfigRules": []}
if not self.config_rules:
return result
rule_list = []
if config_rule_names:
for name in config_rule_names:
if not self.config_rules.get(name):
raise NoSuchConfigRuleException(name)
rule_list.append(name)
else:
rule_list = list(self.config_rules.keys())
# The rules are not sorted alphanumerically.
sorted_rules = sorted(rule_list)
start = 0
if next_token:
if not self.config_rules.get(next_token):
raise InvalidNextTokenException()
start = sorted_rules.index(next_token)
rule_list = sorted_rules[start : start + CONFIG_RULE_PAGE_SIZE]
result["ConfigRules"] = [self.config_rules[x].to_dict() for x in rule_list]
if len(sorted_rules) > (start + CONFIG_RULE_PAGE_SIZE):
result["NextToken"] = sorted_rules[start + CONFIG_RULE_PAGE_SIZE]
return result
def delete_config_rule(self, rule_name):
"""Delete config rule used for evaluating resource compliance."""
rule = self.config_rules.get(rule_name)
if not rule:
raise NoSuchConfigRuleException(rule_name)
# The following logic is not applicable for moto as far as I can tell.
# if rule.config_rule_state == "DELETING":
# raise ResourceInUseException(
# f"The rule {rule_name} is currently being deleted. Please "
# f"retry after some time"
# )
rule.config_rule_state = "DELETING"
self.config_rules.pop(rule_name)
config_backends = BackendDict(ConfigBackend, "config")
|
"""Tests for slim.pnasnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets.nasnet import pnasnet
slim = tf.contrib.slim
class PNASNetTest(tf.test.TestCase):
def testBuildLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildNonExistingLayerLargeModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random_uniform((5, 331, 331, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
pnasnet.build_pnasnet_large(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildNonExistingLayerMobileModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random_uniform((5, 224, 224, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
pnasnet.build_pnasnet_mobile(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildPreLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
net, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1080])
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 540],
'Cell_0': [batch_size, 42, 42, 1080],
'Cell_1': [batch_size, 42, 42, 1080],
'Cell_2': [batch_size, 42, 42, 1080],
'Cell_3': [batch_size, 42, 42, 1080],
'Cell_4': [batch_size, 21, 21, 2160],
'Cell_5': [batch_size, 21, 21, 2160],
'Cell_6': [batch_size, 21, 21, 2160],
'Cell_7': [batch_size, 21, 21, 2160],
'Cell_8': [batch_size, 11, 11, 4320],
'Cell_9': [batch_size, 11, 11, 4320],
'Cell_10': [batch_size, 11, 11, 4320],
'Cell_11': [batch_size, 11, 11, 4320],
'global_pool': [batch_size, 4320],
# Logits and predictions
'AuxLogits': [batch_size, 1000],
'Predictions': [batch_size, 1000],
'Logits': [batch_size, 1000],
}
self.assertEqual(len(end_points), 17)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
endpoints_shapes = {
'Stem': [batch_size, 28, 28, 135],
'Cell_0': [batch_size, 28, 28, 270],
'Cell_1': [batch_size, 28, 28, 270],
'Cell_2': [batch_size, 28, 28, 270],
'Cell_3': [batch_size, 14, 14, 540],
'Cell_4': [batch_size, 14, 14, 540],
'Cell_5': [batch_size, 14, 14, 540],
'Cell_6': [batch_size, 7, 7, 1080],
'Cell_7': [batch_size, 7, 7, 1080],
'Cell_8': [batch_size, 7, 7, 1080],
'global_pool': [batch_size, 1080],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
}
self.assertEqual(len(end_points), 14)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testNoAuxHeadMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testOverrideHParamsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
def testOverrideHParamsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(),
[batch_size, 135, 28, 28])
if __name__ == '__main__':
tf.test.main()
|
from unittest import TestCase
from dino.environ import GNEnvironment, ConfigDict, ConfigKeys
from dino.cache.redis import CacheRedis
from dino.config import RedisKeys
from datetime import datetime, timedelta
import time
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
class CacheRedisTest(TestCase):
class FakeEnv(GNEnvironment):
def __init__(self):
super(CacheRedisTest.FakeEnv, self).__init__(None, ConfigDict(), skip_init=True)
self.config = ConfigDict()
self.config.set(ConfigKeys.TESTING, True)
self.cache = CacheRedis(self, 'mock')
self.node = 'test'
self.session = dict()
USER_ID = '8888'
CHANNEL_ID = '1234'
ROOM_ID = '4321'
USER_NAME = 'Batman'
CHANNEL_NAME = 'Shanghai'
ROOM_NAME = 'cool kids'
def setUp(self):
self.env = CacheRedisTest.FakeEnv()
self.cache = self.env.cache
self.cache._flushall()
def test_get_channels_with_sort_no_channels(self):
self.assertEqual(None, self.env.cache.get_channels_with_sort())
def test_get_channels_with_sort_channel_in_cache(self):
self.assertEqual(None, self.env.cache.get_channels_with_sort())
self.cache._set(RedisKeys.RKEY_CHANNELS_SORT, {'channel-id': ('channel-name', 999)})
self.assertEqual(1, len(self.env.cache.get_channels_with_sort()))
def test_get_channels_with_sort_channel_in_redis(self):
self.assertEqual(None, self.env.cache.get_channels_with_sort())
self.env.cache.redis_instance.hmset(RedisKeys.RKEY_CHANNELS_SORT, {'channel-id': '999|normal|channel-name'})
cached_value = self.env.cache.get_channels_with_sort()
self.assertEqual(1, len(cached_value))
self.assertEqual('channel-id', list(cached_value.keys())[0])
channel_name = list(cached_value.values())[0][0]
channel_sort = list(cached_value.values())[0][1]
self.assertEqual(999, channel_sort)
self.assertEqual('channel-name', channel_name)
def test_set_user_status(self):
self.cache.set_user_status(CacheRedisTest.USER_ID, '1')
self.assertEqual('1', self.cache.get_user_status(CacheRedisTest.USER_ID))
def test_user_check_status(self):
self.assertFalse(self.cache.user_check_status(CacheRedisTest.USER_ID, '1'))
self.cache.set_user_status(CacheRedisTest.USER_ID, '1')
self.assertTrue(self.cache.user_check_status(CacheRedisTest.USER_ID, '1'))
def test_get_not_expired(self):
self.cache._set('foo', 'bar')
self.assertEqual('bar', self.cache._get('foo'))
def test_get_expired(self):
self.cache._set('foo', 'bar', ttl=0.1)
self.assertEqual('bar', self.cache._get('foo'))
time.sleep(0.15)
self.assertEqual(None, self.cache._get('foo'))
def test_get_admin_room_after_expired(self):
self.cache.set_admin_room(CacheRedisTest.ROOM_ID)
self.assertEqual(CacheRedisTest.ROOM_ID, self.cache.get_admin_room())
key = RedisKeys.admin_room()
self.cache._del(key)
self.assertEqual(CacheRedisTest.ROOM_ID, self.cache.get_admin_room())
def test_get_global_ban_timestamp_after_expired(self):
timestamp = str(int((datetime.utcnow() + timedelta(seconds=5*60)).timestamp()))
duration = '5m'
self.cache.set_global_ban_timestamp(CacheRedisTest.USER_ID, duration, timestamp, CacheRedisTest.USER_NAME)
_dur, _time, _name = self.cache.get_global_ban_timestamp(CacheRedisTest.USER_ID)
self.assertEqual(duration, _dur)
self.assertEqual(timestamp, _time)
self.assertEqual(CacheRedisTest.USER_NAME, _name)
key = RedisKeys.banned_users()
cache_key = '%s-%s' % (key, CacheRedisTest.USER_ID)
self.cache._del(cache_key)
_dur, _time, _name = self.cache.get_global_ban_timestamp(CacheRedisTest.USER_ID)
self.assertEqual(duration, _dur)
self.assertEqual(timestamp, _time)
self.assertEqual(CacheRedisTest.USER_NAME, _name)
def test_get_room_id_for_name_after_expired(self):
self.cache.set_room_id_for_name(CacheRedisTest.CHANNEL_ID, CacheRedisTest.ROOM_NAME, CacheRedisTest.ROOM_ID)
self.assertEqual(
CacheRedisTest.ROOM_ID,
self.cache.get_room_id_for_name(CacheRedisTest.CHANNEL_ID, CacheRedisTest.ROOM_NAME))
key = RedisKeys.room_id_for_name(CacheRedisTest.CHANNEL_ID)
cache_key = '%s-%s' % (key, CacheRedisTest.ROOM_NAME)
self.cache._del(cache_key)
self.assertEqual(
CacheRedisTest.ROOM_ID,
self.cache.get_room_id_for_name(CacheRedisTest.CHANNEL_ID, CacheRedisTest.ROOM_NAME))
def test_get_user_name_after_expired(self):
self.cache.set_user_name(CacheRedisTest.USER_ID, CacheRedisTest.USER_NAME)
self.assertEqual(CacheRedisTest.USER_NAME, self.cache.get_user_name(CacheRedisTest.USER_ID))
key = RedisKeys.user_names()
cache_key = '%s-%s' % (key, CacheRedisTest.USER_ID)
self.cache._del(cache_key)
self.assertEqual(CacheRedisTest.USER_NAME, self.cache.get_user_name(CacheRedisTest.USER_ID))
def test_get_room_exists_after_expired(self):
self.assertFalse(self.cache.get_room_exists(CacheRedisTest.CHANNEL_ID, CacheRedisTest.ROOM_ID))
self.cache.set_room_exists(CacheRedisTest.CHANNEL_ID, CacheRedisTest.ROOM_ID, CacheRedisTest.ROOM_NAME)
self.assertTrue(self.cache.get_room_exists(CacheRedisTest.CHANNEL_ID, CacheRedisTest.ROOM_ID))
key = RedisKeys.rooms(CacheRedisTest.CHANNEL_ID)
cache_key = '%s-%s' % (key, CacheRedisTest.ROOM_ID)
self.cache._del(cache_key)
self.assertTrue(self.cache.get_room_exists(CacheRedisTest.CHANNEL_ID, CacheRedisTest.ROOM_ID))
def test_get_channel_exists_after_expired(self):
self.assertFalse(self.cache.get_channel_exists(CacheRedisTest.CHANNEL_ID))
self.cache.set_channel_exists(CacheRedisTest.CHANNEL_ID)
self.assertTrue(self.cache.get_channel_exists(CacheRedisTest.CHANNEL_ID))
key = RedisKeys.channel_exists()
cache_key = '%s-%s' % (key, CacheRedisTest.CHANNEL_ID)
self.cache._del(cache_key)
self.assertTrue(self.cache.get_channel_exists(CacheRedisTest.CHANNEL_ID))
def test_get_channel_name_after_expired(self):
self.assertIsNone(self.cache.get_channel_name(CacheRedisTest.CHANNEL_ID))
self.cache.set_channel_name(CacheRedisTest.CHANNEL_ID, CacheRedisTest.CHANNEL_NAME)
self.assertEqual(CacheRedisTest.CHANNEL_NAME, self.cache.get_channel_name(CacheRedisTest.CHANNEL_ID))
key = RedisKeys.channels()
cache_key = '%s-name-%s' % (key, CacheRedisTest.CHANNEL_ID)
self.cache._del(cache_key)
self.assertEqual(CacheRedisTest.CHANNEL_NAME, self.cache.get_channel_name(CacheRedisTest.CHANNEL_ID))
def test_get_user_status_after_expired(self):
self.assertIsNone(self.cache.get_user_status(CacheRedisTest.USER_ID))
self.cache.set_user_status(CacheRedisTest.USER_ID, '1')
self.assertEqual('1', self.cache.get_user_status(CacheRedisTest.USER_ID))
key = RedisKeys.user_status(CacheRedisTest.USER_ID)
self.cache._del(key)
self.assertEqual('1', self.cache.get_user_status(CacheRedisTest.USER_ID))
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdcop_web', '0019_auto_20160412_1627'),
]
operations = [
migrations.AlterField(
model_name='tip',
name='details',
field=models.TextField(verbose_name='>What happened? (Be specific about the alleged crime and any victims).'),
),
]
|
import asyncio
import asyncio.streams
import http.server
import socket
import traceback
import warnings
from collections import deque
from contextlib import suppress
from html import escape as html_escape
from . import helpers, http
from .helpers import CeilTimeout, create_future, ensure_future
from .http import (HttpProcessingError, HttpRequestParser, PayloadWriter,
StreamWriter)
from .log import access_logger, server_logger
from .streams import EMPTY_PAYLOAD
from .web_exceptions import HTTPException
from .web_request import BaseRequest
from .web_response import Response
__all__ = ('RequestHandler', 'RequestPayloadError')
ERROR = http.RawRequestMessage(
'UNKNOWN', '/', http.HttpVersion10, {},
{}, True, False, False, False, http.URL('/'))
if hasattr(socket, 'SO_KEEPALIVE'):
def tcp_keepalive(server, transport):
sock = transport.get_extra_info('socket')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
else:
def tcp_keepalive(server, transport): # pragma: no cover
pass
class RequestPayloadError(Exception):
"""Payload parsing error."""
class RequestHandler(asyncio.streams.FlowControlMixin, asyncio.Protocol):
"""HTTP protocol implementation.
RequestHandler handles incoming HTTP request. It reads request line,
request headers and request payload and calls handle_request() method.
By default it always returns with 404 response.
RequestHandler handles errors in incoming request, like bad
status line, bad headers or incomplete payload. If any error occurs,
connection gets closed.
:param time_service: Low resolution time service
:param keepalive_timeout: number of seconds before closing
keep-alive connection
:type keepalive_timeout: int or None
:param bool tcp_keepalive: TCP keep-alive is on, default is on
:param bool debug: enable debug mode
:param logger: custom logger object
:type logger: aiohttp.log.server_logger
:param access_log: custom logging object
:type access_log: aiohttp.log.server_logger
:param str access_log_format: access log format string
:param loop: Optional event loop
:param int max_line_size: Optional maximum header line size
:param int max_field_size: Optional maximum header field size
:param int max_headers: Optional maximum header size
"""
_request_count = 0
_keepalive = False # keep transport open
def __init__(self, manager, *, loop=None,
keepalive_timeout=75, # NGINX default value is 75 secs
tcp_keepalive=True,
slow_request_timeout=None,
logger=server_logger,
access_log=access_logger,
access_log_format=helpers.AccessLogger.LOG_FORMAT,
debug=False,
max_line_size=8190,
max_headers=32768,
max_field_size=8190,
lingering_time=10.0,
max_concurrent_handlers=2,
**kwargs):
# process deprecated params
logger = kwargs.get('logger', logger)
if slow_request_timeout is not None:
warnings.warn(
'slow_request_timeout is deprecated', DeprecationWarning)
super().__init__(loop=loop)
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._manager = manager
self._time_service = manager.time_service
self._request_handler = manager.request_handler
self._request_factory = manager.request_factory
self._tcp_keepalive = tcp_keepalive
self._keepalive_time = None
self._keepalive_handle = None
self._keepalive_timeout = keepalive_timeout
self._lingering_time = float(lingering_time)
self._messages = deque()
self._message_tail = b''
self._waiters = deque()
self._error_handler = None
self._request_handlers = []
self._max_concurrent_handlers = max_concurrent_handlers
self._upgrade = False
self._payload_parser = None
self._request_parser = HttpRequestParser(
self, loop,
max_line_size=max_line_size,
max_field_size=max_field_size,
max_headers=max_headers,
payload_exception=RequestPayloadError)
self.transport = None
self._reading_paused = False
self.logger = logger
self.debug = debug
self.access_log = access_log
if access_log:
self.access_logger = helpers.AccessLogger(
access_log, access_log_format)
else:
self.access_logger = None
self._close = False
self._force_close = False
def __repr__(self):
self._request = None
if self._request is None:
meth = 'none'
path = 'none'
else:
meth = 'none'
path = 'none'
# meth = self._request.method
# path = self._request.rel_url.raw_path
return "<{} {}:{} {}>".format(
self.__class__.__name__, meth, path,
'connected' if self.transport is not None else 'disconnected')
@property
def time_service(self):
return self._time_service
@property
def keepalive_timeout(self):
return self._keepalive_timeout
@asyncio.coroutine
def shutdown(self, timeout=15.0):
"""Worker process is about to exit, we need cleanup everything and
stop accepting requests. It is especially important for keep-alive
connections."""
self._force_close = True
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
# cancel waiters
for waiter in self._waiters:
if not waiter.done():
waiter.cancel()
# wait for handlers
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with CeilTimeout(timeout, loop=self._loop):
if self._error_handler and not self._error_handler.done():
yield from self._error_handler
while True:
h = None
for handler in self._request_handlers:
if not handler.done():
h = handler
break
if h:
yield from h
else:
break
# force-close non-idle handlers
for handler in self._request_handlers:
if not handler.done():
handler.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
if self._request_handlers:
self._request_handlers.clear()
def connection_made(self, transport):
super().connection_made(transport)
self.transport = transport
self.writer = StreamWriter(self, transport, self._loop)
if self._tcp_keepalive:
tcp_keepalive(self, transport)
self.writer.set_tcp_nodelay(True)
self._manager.connection_made(self, transport)
def connection_lost(self, exc):
self._manager.connection_lost(self, exc)
super().connection_lost(exc)
self._manager = None
self._force_close = True
self._request_factory = None
self._request_handler = None
self._request_parser = None
self.transport = self.writer = None
if self._payload_parser is not None:
self._payload_parser.feed_eof()
self._payload_parser = None
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
for handler in self._request_handlers:
if not handler.done():
handler.cancel()
if self._error_handler is not None:
if not self._error_handler.done():
self._error_handler.cancel()
self._request_handlers = ()
def set_parser(self, parser):
assert self._payload_parser is None
self._payload_parser = parser
if self._message_tail:
self._payload_parser.feed_data(self._message_tail)
self._message_tail = b''
def eof_received(self):
pass
def data_received(self, data):
if self._force_close or self._close:
return
# parse http messages
if self._payload_parser is None and not self._upgrade:
try:
messages, upgraded, tail = self._request_parser.feed_data(data)
except HttpProcessingError as exc:
# something happened during parsing
self.close()
self._error_handler = ensure_future(
self.handle_parse_error(
PayloadWriter(self.writer, self._loop),
400, exc, exc.message),
loop=self._loop)
except Exception as exc:
# 500: internal error
self.close()
self._error_handler = ensure_future(
self.handle_parse_error(
PayloadWriter(self.writer, self._loop),
500, exc), loop=self._loop)
else:
for (msg, payload) in messages:
self._request_count += 1
if self._waiters:
waiter = self._waiters.popleft()
waiter.set_result((msg, payload))
elif self._max_concurrent_handlers:
self._max_concurrent_handlers -= 1
data = []
handler = ensure_future(
self.start(msg, payload, data), loop=self._loop)
data.append(handler)
self._request_handlers.append(handler)
else:
self._messages.append((msg, payload))
self._upgraded = upgraded
if upgraded:
self._message_tail = tail
# no parser, just store
elif self._payload_parser is None and self._upgrade and data:
self._message_tail += data
# feed payload
elif data:
eof, tail = self._payload_parser.feed_data(data)
if eof:
self.close()
def keep_alive(self, val):
"""Set keep-alive connection mode.
:param bool val: new state.
"""
self._keepalive = val
def close(self):
"""Stop accepting new pipelinig messages and close
connection when handlers done processing messages"""
self._close = True
for waiter in self._waiters:
if not waiter.done():
waiter.cancel()
def force_close(self):
"""Force close connection"""
self._force_close = True
for waiter in self._waiters:
if not waiter.done():
waiter.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def log_access(self, message, environ, response, time):
if self.access_logger:
self.access_logger.log(message, environ, response,
self.transport, time)
def log_debug(self, *args, **kw):
if self.debug:
self.logger.debug(*args, **kw)
def log_exception(self, *args, **kw):
self.logger.exception(*args, **kw)
def _process_keepalive(self):
if self._force_close:
return
next = self._keepalive_time + self._keepalive_timeout
# all handlers in idle state
if len(self._request_handlers) == len(self._waiters):
now = self._time_service.loop_time
if now + 1.0 > next:
self.force_close()
return
self._keepalive_handle = self._loop.call_at(
next, self._process_keepalive)
def pause_reading(self):
if not self._reading_paused:
try:
self.transport.pause_reading()
except (AttributeError, NotImplementedError, RuntimeError):
pass
self._reading_paused = True
def resume_reading(self):
if self._reading_paused:
try:
self.transport.resume_reading()
except (AttributeError, NotImplementedError, RuntimeError):
pass
self._reading_paused = False
@asyncio.coroutine
def start(self, message, payload, handler):
"""Start processing of incoming requests.
It reads request line, request headers and request payload, then
calls handle_request() method. Subclass has to override
handle_request(). start() handles various exceptions in request
or response handling. Connection is being closed always unless
keep_alive(True) specified.
"""
loop = self._loop
handler = handler[0]
manager = self._manager
keepalive_timeout = self._keepalive_timeout
while not self._force_close:
if self.access_log:
now = loop.time()
manager.requests_count += 1
writer = PayloadWriter(self.writer, loop)
request = self._request_factory(
message, payload, self, writer, handler)
try:
try:
resp = yield from self._request_handler(request)
except HTTPException as exc:
resp = exc
except asyncio.CancelledError:
self.log_debug('Ignored premature client disconnection')
break
except asyncio.TimeoutError:
self.log_debug('Request handler timed out.')
resp = self.handle_error(request, 504)
except Exception as exc:
resp = self.handle_error(request, 500, exc)
yield from resp.prepare(request)
yield from resp.write_eof()
# notify server about keep-alive
self._keepalive = resp.keep_alive
# Restore default state.
# Should be no-op if server code didn't touch these attributes.
writer.set_tcp_cork(False)
writer.set_tcp_nodelay(True)
# log access
if self.access_log:
self.log_access(message, None, resp, loop.time() - now)
# check payload
if not payload.is_eof():
lingering_time = self._lingering_time
if not self._force_close and lingering_time:
self.log_debug(
'Start lingering close timer for %s sec.',
lingering_time)
now = loop.time()
end_t = now + lingering_time
with suppress(asyncio.TimeoutError):
while (not payload.is_eof() and now < end_t):
timeout = min(end_t - now, lingering_time)
with CeilTimeout(timeout, loop=loop):
# read and ignore
yield from payload.readany()
now = loop.time()
# if payload still uncompleted
if not payload.is_eof() and not self._force_close:
self.log_debug('Uncompleted request.')
self.close()
except Exception as exc:
self.log_exception('Unhandled exception', exc_info=exc)
self.force_close()
finally:
if self.transport is None:
self.log_debug('Ignored premature client disconnection.')
elif not self._force_close:
if self._messages:
message, payload = self._messages.popleft()
else:
if self._keepalive and not self._close:
# start keep-alive timer
if keepalive_timeout is not None:
now = self._time_service.loop_time
self._keepalive_time = now
if self._keepalive_handle is None:
self._keepalive_handle = loop.call_at(
now + keepalive_timeout,
self._process_keepalive)
# wait for next request
waiter = create_future(loop)
self._waiters.append(waiter)
try:
message, payload = yield from waiter
except asyncio.CancelledError:
# shutdown process
break
else:
break
# remove handler, close transport if no handlers left
if not self._force_close:
self._request_handlers.remove(handler)
if not self._request_handlers:
if self.transport is not None:
self.transport.close()
def handle_error(self, request, status=500, exc=None, message=None):
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection."""
self.log_exception("Error handling request", exc_info=exc)
if status == 500:
msg = "<h1>500 Internal Server Error</h1>"
if self.debug:
try:
tb = traceback.format_exc()
tb = html_escape(tb)
msg += '<br><h2>Traceback:</h2>\n<pre>'
msg += tb
msg += '</pre>'
except: # pragma: no cover
pass
else:
msg += "Server got itself in trouble"
msg = ("<html><head><title>500 Internal Server Error</title>"
"</head><body>" + msg + "</body></html>")
else:
msg = message
resp = Response(status=status, text=msg, content_type='text/html')
resp.force_close()
# some data already got sent, connection is broken
if request.writer.output_size > 0 or self.transport is None:
self.force_close()
return resp
@asyncio.coroutine
def handle_parse_error(self, writer, status, exc=None, message=None):
request = BaseRequest(
ERROR, EMPTY_PAYLOAD,
self, writer, self._time_service, None)
resp = self.handle_error(request, status, exc, message)
yield from resp.prepare(request)
yield from resp.write_eof()
# Restore default state.
# Should be no-op if server code didn't touch these attributes.
self.writer.set_tcp_cork(False)
self.writer.set_tcp_nodelay(True)
|
ARR = [int(x) for x in input().split()]
BEG, END = [int(x) for x in input().split()]
K = int(input())
def range_k_smallest(arr, beg, end, k):
return k_smallest(arr[beg-1:end], k-1)
def part(arr, low, high):
if low >= high:
return low
pivot = arr[high]
i = low
for j in range(low, high+1):
if arr[j] < pivot:
arr[i], arr[j] = arr[j], arr[i]
i += 1
arr[i], arr[high] = arr[high], arr[i]
return i
def k_smallest(arr, k):
ix = part(arr, 0, len(arr)-1)
if k < ix:
return k_smallest(arr[:ix], k)
if k > ix:
return k_smallest(arr[ix+1:], ix-k)
return arr[ix]
def range_k_smallest2(arr, beg, end, k):
return sorted(arr[beg-1:end])[k-1]
print(range_k_smallest2(ARR, BEG, END, K))
|
"""This example creates a content category with the given name and description.
Tags: contentcategory.saveContentCategory
"""
__author__ = 'Joseph DiLallo'
import uuid
from googleads import dfa
def main(client):
# Initialize appropriate service.
content_category_service = client.GetService(
'contentcategory', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Construct and save content category.
content_category = {
'name': 'Category %s' % uuid.uuid4()
}
result = content_category_service.saveContentCategory(
content_category)
# Display results.
print 'Content category with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client)
|
'''
Service support for Solaris 10 and 11, should work with other systems
that use SMF also. (e.g. SmartOS)
'''
from __future__ import absolute_import
__func_alias__ = {
'reload_': 'reload'
}
__virtualname__ = 'service'
def __virtual__():
'''
Only work on systems which default to SMF
'''
if 'Solaris' in __grains__['os_family']:
# Don't let this work on Solaris 9 since SMF doesn't exist on it.
if __grains__['kernelrelease'] == "5.9":
return False
return __virtualname__
return False
def _get_enabled_disabled(enabled_prop="true"):
'''
DRY: Get all service FMRIs and their enabled property
'''
ret = set()
cmd = '/usr/bin/svcprop -c -p general/enabled "*"'
lines = __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
if comps[2] == enabled_prop:
ret.add(comps[0].split("/:properties")[0])
return sorted(ret)
def get_running():
'''
Return the running services
CLI Example:
.. code-block:: bash
salt '*' service.get_running
'''
ret = set()
cmd = '/usr/bin/svcs -H -o FMRI,STATE -s FMRI'
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
if 'online' in line:
ret.add(comps[0])
return sorted(ret)
def get_stopped():
'''
Return the stopped services
CLI Example:
.. code-block:: bash
salt '*' service.get_stopped
'''
ret = set()
cmd = '/usr/bin/svcs -aH -o FMRI,STATE -s FMRI'
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
if 'online' not in line and 'legacy_run' not in line:
ret.add(comps[0])
return sorted(ret)
def available(name):
'''
Returns ``True`` if the specified service is available, otherwise returns
``False``.
We look up the name with the svcs command to get back the FMRI
This allows users to use simpler service names
CLI Example:
.. code-block:: bash
salt '*' service.available net-snmp
'''
cmd = '/usr/bin/svcs -H -o FMRI {0}'.format(name)
name = __salt__['cmd.run'](cmd, python_shell=False)
return name in get_all()
def missing(name):
'''
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing net-snmp
'''
cmd = '/usr/bin/svcs -H -o FMRI {0}'.format(name)
name = __salt__['cmd.run'](cmd, python_shell=False)
return name not in get_all()
def get_all():
'''
Return all installed services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
ret = set()
cmd = '/usr/bin/svcs -aH -o FMRI,STATE -s FMRI'
lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
ret.add(comps[0])
return sorted(ret)
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/usr/sbin/svcadm enable -s -t {0}'.format(name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = '/usr/sbin/svcadm clear {0}'.format(name)
__salt__['cmd.retcode'](clear_cmd, python_shell=False)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
return False
def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = '/usr/sbin/svcadm disable -s -t {0}'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '/usr/sbin/svcadm restart {0}'.format(name)
if not __salt__['cmd.retcode'](cmd, python_shell=False):
# calling restart doesn't clear maintenance
# or tell us that the service is in the 'online' state
return start(name)
return False
def reload_(name):
'''
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
cmd = '/usr/sbin/svcadm refresh {0}'.format(name)
if not __salt__['cmd.retcode'](cmd, python_shell=False):
# calling reload doesn't clear maintenance
# or tell us that the service is in the 'online' state
return start(name)
return False
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
cmd = '/usr/bin/svcs -H -o STATE {0}'.format(name)
line = __salt__['cmd.run'](cmd, python_shell=False)
if line == 'online':
return True
else:
return False
def enable(name, **kwargs):
'''
Enable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
cmd = '/usr/sbin/svcadm enable {0}'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def disable(name, **kwargs):
'''
Disable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
cmd = '/usr/sbin/svcadm disable {0}'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def enabled(name, **kwargs):
'''
Check to see if the named service is enabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
# The property that reveals whether a service is enabled
# can only be queried using the full FMRI
# We extract the FMRI and then do the query
fmri_cmd = '/usr/bin/svcs -H -o FMRI {0}'.format(name)
fmri = __salt__['cmd.run'](fmri_cmd, python_shell=False)
cmd = '/usr/sbin/svccfg -s {0} listprop general/enabled'.format(fmri)
comps = __salt__['cmd.run'](cmd, python_shell=False).split()
if comps[2] == 'true':
return True
else:
return False
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return not enabled(name)
def get_enabled():
'''
Return the enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Note that this returns the full FMRI
return _get_enabled_disabled("true")
def get_disabled():
'''
Return the disabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
# Note that this returns the full FMRI
return _get_enabled_disabled("false")
|
import sys, argparse, gzip, re, random, collections, inspect, os
from multiprocessing import Pool, cpu_count
from seqtools.statistics import average, median
def main(args):
inf = sys.stdin
if args.input != '-':
if re.search('\.gz$',args.input):
inf = gzip.open(args.input)
else:
inf = open(args.input)
of = sys.stdout
if args.output:
of = open(args.output,'w')
vals = []
for line in inf:
f = line.rstrip().split("\t")
if args.full and f[4] != 'full':
vals.append(None)
continue
if args.gene:
vals.append(f[2])
elif args.transcript:
vals.append(f[3])
if args.original_read_count:
if len(vals) > args.original_read_count:
sys.stderr.write("ERROR: cant have a read count greater than the original read count\n")
sys.exit()
vals += [None]*(args.original_read_count-len(vals))
# vals now holds an array to select from
total = len(vals)
xvals = make_sequence(total)
# make shuffled arrays to use for each point
qsvals = []
if args.threads > 1:
p = Pool(processes=args.threads)
for i in range(0,args.samples_per_xval):
if args.threads > 1:
qsvals.append(p.apply_async(get_shuffled_array,args=(vals,)))
else:
qsvals.append(Queue(get_shuffled_array(vals)))
if args.threads > 1:
p.close()
p.join()
svals = [x.get() for x in qsvals]
second_threads = 1
if second_threads > 1:
p = Pool(processes=second_threads)
results = []
for xval in xvals:
if second_threads > 1:
r = p.apply_async(analyze_x,args=(xval,svals,args))
results.append(r)
else:
r = Queue(analyze_x(xval,svals,args))
results.append(r)
if second_threads > 1:
p.close()
p.join()
for r in [x.get() for x in results]:
of.write("\t".join([str(x) for x in r])+"\n")
inf.close()
of.close()
class Queue:
def __init__(self,val):
self.val = val
def get(self):
return self.val
def get_shuffled_array(val):
random.shuffle(val)
return val[:]
def analyze_x(xval,svals,args):
s = args.samples_per_xval
#cnts = sorted(
# [len(
# [k for k in collections.Counter([z for z in [random.choice(vals) for y in range(0,xval)] if z]).values() if k >= args.min_depth]
# )
# for j in range(0,s)]
# )
cnts = []
for j in range(0,s):
vals = svals[j][0:xval]
cnts.append(len([x for x in collections.Counter([k for k in vals if k]).values() if x >= args.min_depth]))
cnts = sorted(cnts)
lower = float(cnts[int(len(cnts)*0.05)])
mid = median(cnts)
upper = float(cnts[int(len(cnts)*0.95)])
#print len(vals)
#print len([x for x in vals if x >0])
#print cnts[0:5]
#print cnts[-5:]
#print [xval, lower, mid, upper]
return [xval, lower, mid, upper]
def make_sequence(total):
start = [1,2,3,4,5,10]
while True:
start += [x*10 for x in start[-5:]]
if start[-1] > total: break
return [x for x in start if x < total]+[total]
def do_inputs():
parser = argparse.ArgumentParser(description="Take a locus bed file (bed) followed by locus id followed by read count. Generate a rarefraction.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Use - for STDIN")
parser.add_argument('-o','--output',help="Write output here")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT threads to use")
parser.add_argument('--original_read_count',type=int,help="INT allows accounting for unmapped reads not included here.")
parser.add_argument('--samples_per_xval',type=int,default=1000,help="Sample this many times")
parser.add_argument('--min_depth',type=int,default=1,help="Require at least this depth to count as a hit.")
parser.add_argument('--full',action='store_true',help="Return full length matchs only")
group1 = parser.add_mutually_exclusive_group(required=True)
group1.add_argument('--gene',action='store_true',help="Gene based output")
group1.add_argument('--transcript',action='store_true',help="Gene based output")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
"""Utils for metrics used in eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import bleu_hook
from tensor2tensor.utils import rouge
import tensorflow as tf
class Metrics(object):
"""Available evaluation metrics."""
# Entries here should match the keys in METRICS_FN below
ACC = "accuracy"
ACC_TOP5 = "accuracy_top5"
ACC_PER_SEQ = "accuracy_per_sequence"
NEG_LOG_PERPLEXITY = "neg_log_perplexity"
APPROX_BLEU = "approx_bleu_score"
RMSE = "rmse"
LOG_POISSON = "log_poisson"
R2 = "r_squared"
ROUGE_2_F = "rouge_2_fscore"
ROUGE_L_F = "rouge_L_fscore"
EDIT_DISTANCE = "edit_distance"
def padded_rmse(predictions, labels, weights_fn=common_layers.weights_all):
predictions, labels = common_layers.pad_with_zeros(predictions, labels)
targets = labels
weights = weights_fn(targets)
error = tf.sqrt(tf.pow(predictions - labels, 2))
return tf.reduce_sum(error * weights), tf.reduce_sum(weights)
def padded_log_poisson(predictions,
labels,
weights_fn=common_layers.weights_all):
# Expects predictions to already be transformed into log space
predictions, labels = common_layers.pad_with_zeros(predictions, labels)
targets = labels
weights = weights_fn(targets)
lp_loss = tf.nn.log_poisson_loss(targets, predictions, compute_full_loss=True)
return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights)
def padded_variance_explained(predictions,
labels,
weights_fn=common_layers.weights_all):
# aka R^2
predictions, labels = common_layers.pad_with_zeros(predictions, labels)
targets = labels
weights = weights_fn(targets)
y_bar = tf.reduce_mean(weights * targets)
tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2))
res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2))
r2 = 1. - res_ss / tot_ss
return r2, tf.reduce_sum(weights)
def padded_accuracy_topk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
effective_k = tf.minimum(k, tf.shape(padded_predictions)[-1])
_, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(padded_labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
return padded_accuracy_topk(predictions, labels, 5, weights_fn)
def padded_sequence_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope(
"padded_sequence_accuracy", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
padded_labels = tf.to_int32(padded_labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def sequence_edit_distance(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Average edit distance, ignoring padding 0s.
The score returned is the edit distance divided by the total length of
reference truth and the weight returned is the total length of the truth.
Args:
predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and
type tf.float32 representing the logits, 0-padded.
labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32
representing the labels of same length as logits and 0-padded.
weights_fn: ignored. The weights returned are the total length of the ground
truth labels, excluding 0-paddings.
Returns:
(edit distance / reference length, reference length)
Raises:
ValueError: if weights_fn is not common_layers.weights_nonzero.
"""
if weights_fn is not common_layers.weights_nonzero:
raise ValueError("Only weights_nonzero can be used for this metric.")
with tf.variable_scope("edit_distance", values=[predictions, labels]):
# Transform logits into sequence classes by taking max at every step.
predictions = tf.to_int32(
tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3)))
nonzero_idx = tf.where(tf.not_equal(predictions, 0))
sparse_outputs = tf.SparseTensor(nonzero_idx,
tf.gather_nd(predictions, nonzero_idx),
tf.shape(predictions, out_type=tf.int64))
labels = tf.squeeze(labels, axis=(2, 3))
nonzero_idx = tf.where(tf.not_equal(labels, 0))
label_sparse_outputs = tf.SparseTensor(nonzero_idx,
tf.gather_nd(labels, nonzero_idx),
tf.shape(labels, out_type=tf.int64))
distance = tf.reduce_sum(
tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False))
reference_length = tf.to_float(tf.shape(nonzero_idx)[0])
return distance / reference_length, reference_length
def padded_neg_log_perplexity(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Average log-perplexity exluding padding 0s. No smoothing."""
num, den = common_layers.padded_cross_entropy(
predictions, labels, 0.0, weights_fn=weights_fn, reduce_sum=False)
return (-num, den)
def padded_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
padded_labels = tf.to_int32(padded_labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def create_evaluation_metrics(problems, model_hparams):
"""Creates the evaluation metrics for the model.
Args:
problems: List of tuples (problem name, problem instance).
model_hparams: a set of hparams.
Returns:
dict<metric name, metric function>. The metric functions have signature
(Tensor predictions, features) -> (metric Tensor, update op), where features
is a dict with keys {targets, problem_choice}.
Raises:
ValueError: if the metrics specified by a problem are not recognized (i.e.
are not defined in the Metrics enum.
"""
def make_problem_specific_metric_fn(metric_fn, problem_idx, weights_fn):
"""Create a metric fn conditioned on problem_idx."""
def problem_metric_fn(predictions, features):
"""Metric fn."""
labels = features.get("targets", None)
problem_choice = features.get("problem_choice", 0)
# Send along the entire features dict if the metric fn has the kwarg
# "features".
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if "features" in args or keywords:
kwargs["features"] = features
def wrapped_metric_fn():
return metric_fn(predictions, labels, weights_fn=weights_fn, **kwargs)
(scores, weights) = tf.cond(
tf.equal(problem_idx, problem_choice), wrapped_metric_fn,
lambda: (tf.constant(0.0), tf.constant(0.0)))
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
eval_metrics = dict()
for problem_idx, (problem_name, problem_instance) in enumerate(problems):
if problem_instance is None:
# For problems in problem_hparams
metrics = [
Metrics.ACC, Metrics.ACC_TOP5, Metrics.ACC_PER_SEQ,
Metrics.NEG_LOG_PERPLEXITY
]
if "wmt" in problem_name:
metrics.append(Metrics.APPROX_BLEU)
else:
# For registered Problems
metrics = problem_instance.eval_metrics()
if not all([m in METRICS_FNS for m in metrics]):
raise ValueError("Unrecognized metric. Problem %s specified metrics "
"%s. Recognized metrics are %s." %
(problem_name, metrics, METRICS_FNS.keys()))
class_output = "image" in problem_name and "coco" not in problem_name
real_output = "gene_expression" in problem_name
if model_hparams.prepend_mode != "none":
assert (model_hparams.prepend_mode == "prepend_inputs_masked_attention" or
model_hparams.prepend_mode == "prepend_inputs_full_attention")
assert not class_output
weights_fn = common_layers.weights_prepend_inputs_to_targets
elif class_output or real_output:
weights_fn = common_layers.weights_all
else:
weights_fn = common_layers.weights_nonzero
for metric in metrics:
metric_fn = METRICS_FNS[metric]
problem_metric_fn = make_problem_specific_metric_fn(
metric_fn, problem_idx, weights_fn)
eval_metrics["metrics-%s/%s" % (problem_name, metric)] = problem_metric_fn
return eval_metrics
METRICS_FNS = {
Metrics.ACC: padded_accuracy,
Metrics.ACC_TOP5: padded_accuracy_top5,
Metrics.ACC_PER_SEQ: padded_sequence_accuracy,
Metrics.NEG_LOG_PERPLEXITY: padded_neg_log_perplexity,
Metrics.APPROX_BLEU: bleu_hook.bleu_score,
Metrics.RMSE: padded_rmse,
Metrics.LOG_POISSON: padded_log_poisson,
Metrics.R2: padded_variance_explained,
Metrics.ROUGE_2_F: rouge.rouge_2_fscore,
Metrics.ROUGE_L_F: rouge.rouge_l_fscore,
Metrics.EDIT_DISTANCE: sequence_edit_distance,
}
|
import wx
from cairis.core.armid import *
import WidgetFactory
from cairis.core.Borg import Borg
__author__ = 'Shamal Faily'
class RoleCostDialog(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,ROLECOST_ID,'Add Role Cost',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,140))
b = Borg()
self.dbProxy = b.dbProxy
self.theRoleName = ''
self.theRoleCost = ''
mainSizer = wx.BoxSizer(wx.VERTICAL)
roleList = self.dbProxy.getDimensionNames('role')
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Role',(87,30),ROLECOST_COMBOROLE_ID,roleList),0,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Cost',(87,30),ROLECOST_COMBOCOST_ID,['Low','Medium','High']),0,wx.EXPAND)
mainSizer.Add(wx.StaticText(self,-1),1,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildAddCancelButtonSizer(self,ROLECOST_BUTTONADD_ID),0,wx.ALIGN_CENTER)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,ROLECOST_BUTTONADD_ID,self.onAdd)
def onAdd(self,evt):
roleCtrl = self.FindWindowById(ROLECOST_COMBOROLE_ID)
costCtrl = self.FindWindowById(ROLECOST_COMBOCOST_ID)
self.theRoleName = roleCtrl.GetStringSelection()
self.theRoleCost = costCtrl.GetStringSelection()
if len(self.theRoleName) == 0:
dlg = wx.MessageDialog(self,'No role selected','Add Role Cost',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.theRoleCost) == 0):
dlg = wx.MessageDialog(self,'No cost selected','Add Role Cost',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
else:
self.EndModal(ROLECOST_BUTTONADD_ID)
def role(self): return self.theRoleName
def cost(self): return self.theRoleCost
|
from __future__ import print_function
import sys
import itertools
import time
if __name__ == "__main__":
request = int(sys.argv[1])
# if the request is less than 0, sleep forever
if request < 0:
cycles = itertools.count()
else:
cycles = range(request)
for cycle in cycles:
print("[{}/{}] sleeping for 1 second at {}".format(cycle, request, time.time()))
time.sleep(1)
print("Done sleeping at {}".format(time.time()))
|
LOGIN = """
{
"path": "/logincheck",
"method": "POST",
"body": {
"username": "$username",
"secretkey": "$secretkey"
}
}
"""
RELOGIN = """login?redir=%2fapi%2fv2"""
LOGOUT = """
{
"path": "/logout",
"method": "POST"
}
"""
ADD_VLAN_INTERFACE = """
{
"path": "/api/v2/cmdb/system/interface/",
"method": "POST",
"body": {
"name": "interface",
"json": {
#if $varExists('name')
"name": "$name",
#else
"name": "os_vid_$vlanid",
#end if
#if $varExists('vlanid')
"vlanid": "$vlanid",
#end if
"interface": "$interface",
"vdom": "$vdom",
"type": "vlan",
#if $varExists('ip')
"ip": "$ip",
"mode": "static",
"allowaccess": "ping",
#end if
"secondary-IP":"enable",
#if $varExists('alias')
"alias": "$alias",
#end if
"ipv6": {
"ip6-extra-addr": []
}
}
}
}
"""
SET_VLAN_INTERFACE = """
{
"path": "/api/v2/cmdb/system/interface/$name",
"method": "PUT",
"body": {
"name": "interface",
"json": {
#if $varExists('ip') and $ip != None
"ip": "$ip",
"mode": "static",
"allowaccess": "ping https ssh snmp http fgfm capwap",
#end if
#if $varExists('secondaryips')
#if $secondaryips
"secondary-IP": "enable",
"secondaryip": [
#for $secondaryip in $secondaryips[:-1]
{
"ip": "$secondaryip",
"allowaccess": "ping https ssh snmp http fgfm capwap"
},
#end for
{
"ip": "$secondaryips[-1]",
"allowaccess": "ping https ssh snmp http fgfm capwap"
}
],
#else
"secondary-IP": "disable",
#end if
#end if
#if $varExists('vdom')
"vdom": "$vdom"
#else
"vdom": "root"
#end if
}
}
}
"""
DELETE_VLAN_INTERFACE = """
{
"path": "/api/v2/cmdb/system/interface/$name",
"method": "DELETE",
"body": {
"name": "interface",
"json": {
#if $varExists('vdom')
"vdom": "$vdom"
#else
"vdom": "root"
#end if
}
}
}
"""
GET_VLAN_INTERFACE = """
{
#if $varExists('name')
#if $varExists('vdom')
"path":"/api/v2/cmdb/system/interface/$name/?vdom=$vdom",
#else
"path":"/api/v2/cmdb/system/interface/$name/",
#end if
#else
#if $varExists('vdom')
"path":"/api/v2/cmdb/system/interface/?vdom=$vdom",
#else
"path":"/api/v2/cmdb/system/interface/",
#end if
#end if
"method": "GET"
}
"""
ADD_DHCP_SERVER = """
{
"path":"/api/v2/cmdb/system.dhcp/server/",
"method": "POST",
"body": {
"name": "server",
#if $varExists('vdom')
"vdom": "$vdom",
#end if
"json": {
"status":"enable",
"dns-service":"local",
#if $gateway != None
"default-gateway":"$gateway",
#end if
"netmask":"$netmask",
"interface":"$interface",
"ip-range":[
{
"start-ip":"$start_ip",
"end-ip":"$end_ip"
}
]
}
}
}
"""
DELETE_DHCP_SERVER = """
{
"path":"/api/v2/cmdb/system.dhcp/server/$id/",
"method": "DELETE",
"body": {
"name": "server",
#if $varExists('vdom')
"vdom": "$vdom",
#end if
"id": "$id",
"json": {
}
}
}
"""
GET_DHCP_SERVER = """
{
#if $varExists('id')
#if $varExists('vdom')
"path":"/api/v2/cmdb/system.dhcp/server/$id/?vdom=$vdom",
#else
"path":"/api/v2/cmdb/system.dhcp/server/$id/",
#end if
#else
#if $varExists('vdom')
"path":"/api/v2/cmdb/system.dhcp/server/?vdom=$vdom",
#else
"path":"/api/v2/cmdb/system.dhcp/server/",
#end if
#end if
"method": "GET"
}
"""
SET_DHCP_SERVER_RSV_ADDR = """
{
"path":"/api/v2/cmdb/system.dhcp/server/$id/reserved-address",
"method": "PUT",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#end if
"json": {
"reserved-address":$reserved_address
}
}
}
"""
ADD_VDOM = """
{
"path":"/api/v2/cmdb/system/vdom/",
"method": "POST",
"body": {
"name": "vdom",
"json": {
"name":"$name"
}
}
}
"""
DELETE_VDOM = """
{
"path":"/api/v2/cmdb/system/vdom/$name",
"method": "DELETE",
"body": {
}
}
"""
GET_VDOM = """
{
"path":"/api/v2/cmdb/system/vdom/$name",
"method": "GET"
}
"""
ADD_VDOM_LINK = """
{
"path":"/api/v2/cmdb/system/vdom-link/",
"method": "POST",
"body": {
"name": "vdom-link",
"json": {
"name":"$name"
}
}
}
"""
DELETE_VDOM_LINK = """
{
"path": "/api/v2/cmdb/system/vdom-link/$name",
"method": "DELETE",
"body": {
}
}
"""
GET_VDOM_LINK = """
{
"path":"/api/v2/cmdb/system/vdom-link/$name",
"method": "GET"
}
"""
ADD_VDOM_LNK_INTERFACE = """
{
"path":"/api/v2/cmdb/system/interface/",
"method": "POST",
"body": {
"name": "vdom-link",
"json": {
"name":"$name"
}
}
}
"""
ADD_ROUTER_STATIC = """
{
"path": "/api/v2/cmdb/router/static/",
"method": "POST",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"json": {
"dst": "$dst",
"device": "$device",
"gateway": "$gateway"
}
}
}
"""
DELETE_ROUTER_STATIC = """
{
"path": "/api/v2/cmdb/router/static/$id/",
"method": "DELETE",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"json": {
}
}
}
"""
GET_ROUTER_STATIC = """
{
#if $varExists('id')
#if $varExists('vdom')
"path":"/api/v2/cmdb/router/static/$id/?vdom=$vdom",
#else
"path":"/api/v2/cmdb/router/static/$id/",
#end if
#else
#if $varExists('vdom')
"path":"/api/v2/cmdb/router/static/?vdom=$vdom",
#else
"path":"/api/v2/cmdb/router/static/",
#end if
#end if
"method": "GET"
}
"""
ADD_FIREWALL_POLICY = """
{
"path": "/api/v2/cmdb/firewall/policy/",
"method": "POST",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"json": {
"srcintf": [
{
"name": "$srcintf"
}
],
"dstintf": [
{
"name": "$dstintf"
}
],
"srcaddr": [
{
#if $varExists('srcaddr')
"name": "$srcaddr"
#else
"name": "all"
#end if
}
],
"dstaddr": [
{
#if $varExists('dstaddr')
"name": "$dstaddr"
#else
"name": "all"
#end if
}
],
"action": "accept",
"schedule": "always",
#if $varExists('nat')
"nat": "$nat",
#end if
#if $varExists('poolname')
#if not $varExists('nat')
"nat": "enable",
#end if
"ippool": "enable",
"poolname":[{
"name":"$poolname"
}],
#end if
"service": [{
"name": "ALL"
}]
}
}
}
"""
DELETE_FIREWALL_POLICY = """
{
"path": "/api/v2/cmdb/firewall/policy/$id/",
"method": "DELETE",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"json": {
}
}
}
"""
GET_FIREWALL_POLICY = """
{
#if $varExists('id')
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/policy/$id/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/policy/$id/",
#end if
#else
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/policy/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/policy/",
#end if
#end if
"method": "GET"
}
"""
MOVE_FIREWALL_POLICY = """
{
"path": "/api/v2/cmdb/firewall/policy/$id",
"method": "PUT",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
#if $varExists('before')
"before": "$before",
#else
"after": "$after",
#end if
"action": "move"
}
}
"""
ADD_FIREWALL_VIP = """
{
"path":"/api/v2/cmdb/firewall/vip/",
"method": "POST",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"name": "vip",
"json": {
"name": "$name",
"extip": "$extip",
"extintf": "$extintf",
"mappedip": [{
"range": "$mappedip"
}]
}
}
}
"""
DELETE_FIREWALL_VIP = """
{
"path":"/api/v2/cmdb/firewall/vip/$name",
"method": "DELETE",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"name": "vip"
}
}
"""
GET_FIREWALL_VIP = """
{
#if $varExists('name')
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/vip/$name/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/vip/$name/",
#end if
#else
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/vip/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/vip/",
#end if
#end if
"method": "GET"
}
"""
ADD_FIREWALL_IPPOOL = """
{
"path":"/api/v2/cmdb/firewall/ippool/",
"method": "POST",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"name": "ippool",
"json": {
"startip": "$startip",
#if $varExists('endip')
"endip": "$endip",
#else
"endip": "$startip",
#end if
#if $varExists('type')
"type": "$type",
#else
"type": "one-to-one",
#end if
#if $varExists('comments')
"comments": "$comments",
#end if
#if $varExists('name')
"name": "$name"
#else
"name": "$startip"
#end if
}
}
}
"""
DELETE_FIREWALL_IPPOOL = """
{
"path":"/api/v2/cmdb/firewall/ippool/$name",
"method": "DELETE",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"name": "ippool"
}
}
"""
GET_FIREWALL_IPPOOL = """
{
#if $varExists('name')
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/ippool/$name/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/ippool/$name/",
#end if
#else
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/ippool/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/ippool/",
#end if
#end if
"method": "GET"
}
"""
ADD_FIREWALL_ADDRESS = """
{
"path":"/api/v2/cmdb/firewall/address/",
"method": "POST",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"name": "address",
"json": {
#if $varExists('associated_interface')
"associated-interface": "$associated_interface",
#end if
#if $varExists('comment')
"comment": "$comment",
#end if
"name": "$name",
"subnet": "$subnet"
}
}
}
"""
DELETE_FIREWALL_ADDRESS = """
{
"path":"/api/v2/cmdb/firewall/address/$name",
"method": "DELETE",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"name": "address"
}
}
"""
GET_FIREWALL_ADDRESS = """
{
#if $varExists('name')
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/address/$name/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/address/$name/",
#end if
#else
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/address/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/address/",
#end if
#end if
"method": "GET"
}
"""
ADD_FIREWALL_ADDRGRP = """
{
"path":"/api/v2/cmdb/firewall/addrgrp/",
"method": "POST",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"name": "addrgrp",
"json": {
"name": "$name",
"member": [
#for $member in $members[:-1]
{
"name": "$member"
},
#end for
{
"name": "$members[-1]"
}
]
}
}
}
"""
SET_FIREWALL_ADDRGRP = """
{
"path": "/api/v2/cmdb/firewall/addrgrp/$name/",
"method": "PUT",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"json": {
"member": [
#for $member in $members[:-1]
{
"name": "$member"
},
#end for
{
"name": "$members[-1]"
}
]
}
}
}
"""
DELETE_FIREWALL_ADDRGRP = """
{
"path":"/api/v2/cmdb/firewall/addrgrp/$name",
"method": "DELETE",
"body": {
#if $varExists('vdom')
"vdom": "$vdom",
#else
"vdom": "root",
#end if
"name": "addrgrp"
}
}
"""
GET_FIREWALL_ADDRGRP = """
{
#if $varExists('name')
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/addrgrp/$name/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/addrgrp/$name/",
#end if
#else
#if $varExists('vdom')
"path": "/api/v2/cmdb/firewall/addrgrp/?vdom=$vdom",
#else
"path": "/api/v2/cmdb/firewall/addrgrp/",
#end if
#end if
"method": "GET"
}
"""
|
from boto.ec2 import connect_to_region
from sys import path
from os import environ
path.append('%s/programs/lib' % environ['HOME'])
from AwsConfigMFA import AwsConfigMFA
class InstanceByTag():
"""
Do things with instances defined by AWS tags.
"""
def __init__(self, profile, region='us-east-1', tag=None, value=None, key_name=None):
"""
@param profile Profile defined in your AWS config file
(Usually the file defined by the environment
variable $AWS_CONFIG_FILE. If you omit tag and
value parameters, it willcollect all instances
that pertain to the profile.
@param region The region to connect to.
@param tag The tag name to search on. If you omit value
it will return all instances with this tag.
@param value The value of the specified tag. Specifying this
parameter should collect all instances where
tag = value.
@param key_name Filter results
@var self.instances A list of instances that can be queried in
other class functions.
"""
config = AwsConfigMFA()
creds = config.getTokenCredentials(profile)
try:
conn = connect_to_region(region,
aws_access_key_id=creds['aws_access_key_id'],
aws_secret_access_key=creds['aws_secret_access_key'],
security_token = creds['security_token'])
except Exception,e:
print "Failed to connect!"
print e.message
try:
self.instances = []
instances = conn.get_only_instances()
if not hasattr(key_name, '__iter__'):
if hasattr(key_name, 'join'):
key_name = [key_name]
else:
# Invalid key_name. Just won't say anything to the user =P
key_name = None
if key_name:
# Return only the instances with key_name(s)
instances = self.__parse_by_key_names__(instances, key_name)
if tag:
for x in instances:
if x.tags.has_key(tag):
if value:
if x.tags[tag] == value:
self.instances.append(x)
else:
self.instances.append(x)
else:
self.instances = instances
except Exception,e:
print e
def __parse_by_key_names__(self, instances, key_names):
rtn = []
for x in instances:
if x.key_name in key_names:
rtn.append(x)
return rtn
def private_ips(self, key_name = None):
l = []
for x in self.instances:
if x.private_ip_address:
if key_name:
if x.key_name == key_name:
l.append(x.private_ip_address)
else:
l.append(x.private_ip_address)
return l
def instance_by_ip(self, private_ip):
rtn = None
for x in self.instances:
if x.private_ip_address == private_ip:
rtn = x
return rtn
def role_by_ip(self, ip):
rtn = None
for x in self.instances:
if x.private_ip_address == ip:
rtn = x.tags['role']
return rtn
if __name__ == "__main__":
s = InstanceByTag('dev', 'us-east-1', 'role', 'public_api')
print ' '.join(s.private_ips())
|
import os
import csv
import re
import sys
import msvcrt
int_total10g = 0
int_total1g = 0
def connect(str_hostname,str_username,str_password):
# Declaring global variables to count total ports
global int_total10g
global int_total1g
# Resetting local variables that count avaliable ports for this host
int_port10g = 0
int_port1g = 0
# Connect to the session
str_cmd = "/SSH2 /L %s /PASSWORD %s /C 3DES /M SHA1 %s" % (str_username, str_password, str_hostname+".rns.fg.rbc.com")
crt.Session.Connect(str_cmd)
crt.Screen.Synchronous = True
# Create two csv worksheets to record to
file_10g = open("intdesc_both_10g.csv", 'ab')
csvws_10g = csv.writer(file_10g)
file_1g = open("intdesc_both_1g.csv", 'ab')
csvws_1g = csv.writer(file_1g)
# Write the hostname on the documents
csvws_10g.writerow([str_hostname])
csvws_1g.writerow([str_hostname])
crt.Screen.WaitForString("#")
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("sh int desc\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
list_str_waitfor = ["\n", str_hostname+"#"]
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefeed at the end of each line, or the shell
# prompt that indicates we're done.
int_result = crt.Screen.WaitForStrings( list_str_waitfor )
# If see a prompt, then done
if int_result == 2:
break
# Fetch current row and read the characters from the screen
int_screenrow = crt.Screen.CurrentRow - 1
str_readline = crt.Screen.Get(int_screenrow, 1, int_screenrow, 140)
# Split the line by whitespace, and only take the port name and its description
list_str_items = re.split(r'\s{2,}',str_readline.strip())
str_port = list_str_items[0]
try:
str_desc = list_str_items[3]
except IndexError:
str_desc = ''
# Match port names to differentiate between 10g and 1g ports
# Write the name of the port and its description to the document
if re.match(r'Eth\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_10g.writerow([str_port,str_desc])
int_total10g += 1
int_port10g += 1
elif re.match(r'Eth1\d{2}/',str_port) and re.match(r'V,', str_desc, flags=re.IGNORECASE):
csvws_1g.writerow([str_port,str_desc])
int_total1g += 1
int_port1g += 1
# Write the number of available ports on the document
csvws_10g.writerow([str(int_port10g)+" ports available for "+str_hostname])
csvws_1g.writerow([str(int_port1g)+" ports available for "+str_hostname])
csvws_10g.writerow([])
csvws_1g.writerow([])
# Close files and disconnect from the session
file_10g.close()
file_1g.close()
crt.Screen.Synchronous = False
crt.Session.Disconnect()
def main():
# Get login information
str_usr = crt.Dialog.Prompt("Enter username: ", "Server Farm Login", "", False)
if str_usr == "": pass
str_pwd = crt.Dialog.Prompt("Enter password: ", "Server Farm Login", "", True)
if str_pwd == "": pass
# Delete any existing versions of the worksheet, otherwise this program will append to them
try:
os.remove("intdesc_both_10g.csv")
except Exception:
pass
try:
os.remove("intdesc_both_1g.csv")
except Exception:
pass
# Iterate thru each of the hosts, recording the available ports
for i in range(1,7):
connect("GCCSFRMs"+str(i).zfill(2),str_usr,str_pwd)
for i in range(1,13):
connect("SCCSFRMs"+str(i).zfill(2),str_usr,str_pwd)
# Done, display total port avaliability
crt.Dialog.MessageBox("Results:\n--------\n10G: "+str(int_total10g)+" available ports\n1G: "+str(int_total1g)+" available ports", "Final Report", 65)
# And write those totals on the documents
file_10g = open("intdesc_both_10g.csv", 'ab')
file_1g = open("intdesc_both_1g.csv", 'ab')
file_10g.write("Total available ports: "+str(int_total10g))
file_1g.write("Total available ports: "+str(int_total1g))
file_10g.close()
file_1g.close()
if __name__ == '__builtin__':
main()
elif __name__ == '__main__':
print "This program must be run in SecureCRT"
print "Open SecureCRT, go to Script > Run... , then select this file"
print "("+sys.argv[0]+")"
print
print "Press any key to exit",
msvcrt.getch()
|
from litex.build.generic_platform import Pins, Subsignal, IOStandard, Misc
from litex.build.xilinx import XilinxPlatform
from litex.build.openocd import OpenOCD
_io = [
# Clk / Rst
("clk50", 0, Pins("N11"), IOStandard("LVCMOS33")),
# The core board does not have a USB serial on it,
# so you will have to attach an USB to serial adapter
# on these pins
("gpio_serial", 0,
Subsignal("tx", Pins("J2:7")),
Subsignal("rx", Pins("J2:8")),
IOStandard("LVCMOS33")
),
# SPIFlash
# MT25QL128
("spiflash4x", 0, # clock needs to be accessed through STARTUPE2
Subsignal("cs_n", Pins("L12")),
Subsignal("clk", Pins("E8")),
Subsignal("dq", Pins("J13", "J14", "K15", "K16")),
IOStandard("LVCMOS33")
),
# DDR3 SDRAM
# MT41J128M16JT-125K
("ddram", 0,
Subsignal("a", Pins("B14 C8 A14 C14 C9 B10 D9 A12 D8 A13 B12 A9 A8 B11"),
IOStandard("SSTL135")),
Subsignal("ba", Pins("C16 A15 B15"), IOStandard("SSTL135")),
Subsignal("ras_n", Pins("B16"), IOStandard("SSTL135")),
Subsignal("cas_n", Pins("C11"), IOStandard("SSTL135")),
Subsignal("we_n", Pins("C12"), IOStandard("SSTL135")),
# cs_n is hardwired on the board
#Subsignal("cs_n", Pins("-"), IOStandard("SSTL135")),
Subsignal("dm", Pins("F12 H11"), IOStandard("SSTL135")),
Subsignal("dq", Pins(
"F15 F13 E16 D11 E12 E13 D16 E11",
"G12 J16 G16 J15 H14 H12 H16 H13"),
IOStandard("SSTL135"),
Misc("IN_TERM=UNTUNED_SPLIT_40")),
Subsignal("dqs_p", Pins("D14 G14"),
IOStandard("DIFF_SSTL135"),
Misc("IN_TERM=UNTUNED_SPLIT_40")),
Subsignal("dqs_n", Pins("D15 F14"),
IOStandard("DIFF_SSTL135"),
Misc("IN_TERM=UNTUNED_SPLIT_40")),
Subsignal("clk_p", Pins("B9"), IOStandard("DIFF_SSTL135")),
Subsignal("clk_n", Pins("A10"), IOStandard("DIFF_SSTL135")),
Subsignal("cke", Pins("D13"), IOStandard("SSTL135")),
Subsignal("odt", Pins("C13"), IOStandard("SSTL135")),
Subsignal("reset_n", Pins("E15"), IOStandard("SSTL135")),
Misc("SLEW=FAST"),
),
]
_connectors = [
("J2", {
# odd row even row
7: "M12", 8: "N13",
9: "N14", 10: "N16",
11: "P15", 12: "P16",
13: "R15", 14: "R16",
15: "T14", 16: "T15",
17: "P13", 18: "P14",
19: "T13", 20: "R13",
21: "T12", 22: "R12",
23: "L13", 24: "N12",
25: "K12", 26: "K13",
27: "P10", 28: "P11",
29: "N9", 30: "P9",
31: "T10", 32: "R11",
33: "T9", 34: "R10",
35: "T8", 36: "R8",
37: "T7", 38: "R7",
39: "T5", 40: "R6",
41: "P6", 42: "R5",
43: "N6", 44: "M6",
45: "L5", 46: "P5",
47: "T4", 48: "T3",
49: "R3", 50: "T2",
51: "R2", 52: "R1",
53: "M5", 54: "N4",
55: "P4", 56: "P3",
57: "N1", 58: "P1",
59: "M2", 60: "M1",
}),
("J3", {
# odd row even row
7: "B7", 8: "A7",
9: "B6", 10: "B5",
11: "E6", 12: "K5",
13: "J5", 14: "J4",
15: "G5", 16: "G4",
17: "C7", 18: "C6",
19: "D6", 20: "D5",
21: "A5", 22: "A4",
23: "B4", 24: "A3",
25: "D4", 26: "C4",
27: "C3", 28: "C2",
29: "B2", 30: "A2",
31: "C1", 32: "B1",
33: "E2", 34: "D1",
35: "E3", 36: "D3",
37: "F5", 38: "E5",
39: "F2", 40: "E1",
41: "F4", 42: "F3",
43: "G2", 44: "G1",
45: "H2", 46: "H1",
47: "K1", 48: "J1",
49: "L3", 50: "L2",
51: "H5", 52: "H4",
53: "J3", 54: "H3",
55: "K3", 56: "K2",
57: "L4", 58: "M4",
59: "N3", 60: "N2",
})
]
class Platform(XilinxPlatform):
default_clk_name = "clk50"
default_clk_period = 1e9/50e6
# these resources conflict with daughterboard resources
# so they are only used if the daughterboard is not present
core_resources = [
("user_led", 0, Pins("E6"), IOStandard("LVCMOS33")),
("cpu_reset", 0, Pins("K5"), IOStandard("LVCMOS33")),
]
def __init__(self, toolchain="vivado", with_daughterboard=False):
device = "xc7a35tftg256-1"
io = _io
connectors = _connectors
if with_daughterboard:
from litex_boards.platforms.qmtech_daughterboard import QMTechDaughterboard
daughterboard = QMTechDaughterboard(IOStandard("LVCMOS33"))
io += daughterboard.io
connectors += daughterboard.connectors
else:
io += self.core_resources
XilinxPlatform.__init__(self, device, io, connectors, toolchain=toolchain)
self.toolchain.bitstream_commands = \
["set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]"]
self.toolchain.additional_commands = \
["write_cfgmem -force -format bin -interface spix4 -size 16 "
"-loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"]
self.add_platform_command("set_property INTERNAL_VREF 0.675 [get_iobanks 15]")
self.add_platform_command("set_property CFGBVS VCCO [current_design]")
self.add_platform_command("set_property CONFIG_VOLTAGE 3.3 [current_design]")
self.toolchain.symbiflow_device = device
def create_programmer(self):
bscan_spi = "bscan_spi_xc7a35t.bit"
return OpenOCD("openocd_xc7_ft2232.cfg", bscan_spi)
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk50", loose=True), 1e9/50e6)
|
cars = 100
space_in_a_car = 4
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print "There are", cars, "cars available."
print "There are only", drivers, "drivers available."
print "There will be", cars_not_driven, "empty cars today."
print "We can transport", carpool_capacity, "people today."
print "We have", passengers, "to carpool today."
print "We need to put about", average_passengers_per_car, "in each car."
|
"""Some sample datasets"""
from __future__ import division
import os
import numpy as np
from scipy import ndimage
from sklearn.utils import check_random_state
import collections
def get_megaman_image(factor=1):
"""Return an RGBA representation of the megaman icon"""
imfile = os.path.join(os.path.dirname(__file__), 'megaman.png')
data = ndimage.imread(imfile) / 255
if factor > 1:
data = data.repeat(factor, axis=0).repeat(factor, axis=1)
return data
def generate_megaman_data(sampling=2):
"""Generate 2D point data of the megaman image"""
data = get_megaman_image()
x = np.arange(sampling * data.shape[1]) / float(sampling)
y = np.arange(sampling * data.shape[0]) / float(sampling)
X, Y = map(np.ravel, np.meshgrid(x, y))
C = data[np.floor(Y.max() - Y).astype(int),
np.floor(X).astype(int)]
return np.vstack([X, Y]).T, C
def _make_S_curve(x, range=(-0.75, 0.75)):
"""Make a 2D S-curve from a 1D vector"""
assert x.ndim == 1
x = x - x.min()
theta = 2 * np.pi * (range[0] + (range[1] - range[0]) * x / x.max())
X = np.empty((x.shape[0], 2), dtype=float)
X[:, 0] = np.sign(theta) * (1 - np.cos(theta))
X[:, 1] = np.sin(theta)
X *= x.max() / (2 * np.pi * (range[1] - range[0]))
return X
def generate_megaman_manifold(sampling=2, nfolds=2,
rotate=True, random_state=None):
"""Generate a manifold of the megaman data"""
X, c = generate_megaman_data(sampling)
for i in range(nfolds):
X = np.hstack([_make_S_curve(x) for x in X.T])
if rotate:
rand = check_random_state(random_state)
R = rand.randn(X.shape[1], X.shape[1])
U, s, VT = np.linalg.svd(R)
X = np.dot(X, U)
return X, c
def generate_noisefree_hourglass(n_size, scaling_factor=1.75, seed=None):
if seed is not None:
np.random.seed(seed)
fz = lambda z: -4*z**4 + 4*z**2 + 1
X = np.random.normal(0,1,[n_size,3])
sphere = X / np.linalg.norm(X,axis=1)[:,None]
r = np.linalg.norm(sphere,axis=1)
x,y,z = sphere.T
theta = np.arctan2(y,x)
phi = np.arccos(z/r)
r_hour = fz(z)
theta_hour = theta
z_hour = z
phi_hour = np.arccos(z_hour/r_hour)
x_hour = r_hour*np.cos(theta_hour)*np.sin(phi_hour)
y_hour = r_hour*np.sin(theta_hour)*np.sin(phi_hour)
z_hour = r_hour*np.cos(phi_hour)
x_hour *= 0.5
y_hour *= 0.5
hourglass = np.vstack((x_hour,y_hour,z_hour)).T
hourglass *= scaling_factor
return hourglass
def _genereate_noises(sigmas, size, dimensions, seed=None):
if seed is not None:
np.random.seed(seed)
if isinstance(sigmas, (collections.Sequence, np.ndarray)):
assert len(sigmas) == dimensions, \
'The size of sigmas should be the same as noises dimensions'
return np.random.multivariate_normal(np.zeros(dimensions),
np.diag(sigmas), size)
else:
return np.random.normal(0,sigmas,[size,dimensions])
def _add_noises_on_primary_dimensions(data,sigmas=0.1,seed=None):
size,dim = data.shape
noises = _genereate_noises(sigmas,size,dim)
return data + noises
def _add_noises_on_additional_dimensions(data,addition_dims,sigmas=1,seed=None):
if addition_dims == 0:
return data
else:
noises = _genereate_noises(sigmas,data.shape[0],addition_dims,seed)
return np.hstack((data,noises))
def generate_noisy_hourglass(size, sigma_primary=0.05, addition_dims=0,
sigma_additional=0.1, scaling_factor=1.75, seed=None):
hourglass = generate_noisefree_hourglass(size, scaling_factor, seed)
hourglass = _add_noises_on_primary_dimensions(hourglass, sigma_primary)
hourglass = _add_noises_on_additional_dimensions(hourglass, addition_dims,
sigma_additional)
return hourglass
|
"""
Default settings for the ``mezzanine.forms`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
register_setting(
name="FORMS_FIELD_MAX_LENGTH",
description=_("Max length allowed for field values in the forms app."),
editable=False,
default=2000,
)
register_setting(
name="FORMS_LABEL_MAX_LENGTH",
description=_("Max length allowed for field labels in the forms app."),
editable=False,
default=200,
)
register_setting(
name="FORMS_CSV_DELIMITER",
description=_("Char to use as a field delimiter when exporting form "
"responses as CSV."),
editable=False,
default=",",
)
register_setting(
name="FORMS_UPLOAD_ROOT",
description=_("Absolute path for storing file uploads for the forms app."),
editable=False,
default="",
)
register_setting(
name="FORMS_EXTRA_FIELDS",
description=_("Extra field types for the forms app. Should contain a "
"sequence of three-item sequences, each containing the ID, dotted "
"import path for the field class, and field name, for each custom "
"field type. The ID is simply a numeric constant for the field, "
"but cannot be a value already used, so choose a high number such "
"as 100 or greater to avoid conflicts."),
editable=False,
default=(),
)
|
import re
from uuid import uuid4
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.stream import RTMPStream, HDSStream
from streamlink.compat import urlparse, unquote
ITV_PLAYER_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'
LIVE_SWF_URL = "http://www.itv.com/mediaplayer/ITVMediaPlayer.swf"
ONDEMAND_SWF_URL = "http://www.itv.com/mercury/Mercury_VideoPlayer.swf"
CHANNEL_MAP = {'itv': 1, 'itv2': 2, 'itv3': 3, 'itv4': 4, 'itvbe': 8, 'citv': 7}
_url_re = re.compile(r"http(?:s)?://(?:www.)?itv.com/hub/(?P<stream>.+)")
class ITVPlayer(Plugin):
def __init__(self, url):
Plugin.__init__(self, url)
match = _url_re.match(url)
self._stream = match and match.groupdict()["stream"]
@classmethod
def can_handle_url(self, url):
match = _url_re.match(url)
return match
@property
def channel_id(self):
if self._stream in CHANNEL_MAP:
return CHANNEL_MAP[self._stream]
@property
def production_id(self):
if self._stream not in CHANNEL_MAP:
res = http.get(self.url, verify=False)
production_id_match = re.findall(r"&productionId=(.*?)['&\"]", res.text, flags=re.DOTALL)
if production_id_match:
return unquote(production_id_match[0])
else:
self.logger.error(u"No production ID found, has the page layout changed?")
def _get_streams(self):
"""
Find all the streams for the ITV url
:return: Mapping of quality to stream
"""
soap_message = self._soap_request()
headers = {'Content-Length': '{0:d}'.format(len(soap_message)),
'Content-Type': 'text/xml; charset=utf-8',
'Host': 'mercury.itv.com',
'Origin': 'http://www.itv.com',
'Referer': 'http://www.itv.com/Mercury/Mercury_VideoPlayer.swf?v=null',
'SOAPAction': "http://tempuri.org/PlaylistService/GetPlaylist",
'User-Agent': ITV_PLAYER_USER_AGENT,
"X-Requested-With": "ShockwaveFlash/16.0.0.305"}
res = http.post("http://mercury.itv.com/PlaylistService.svc?wsdl",
headers=headers,
data=soap_message)
# Parse XML
xmldoc = http.xml(res)
# Check that geo region has been accepted
faultcode = xmldoc.find('.//faultcode')
if faultcode is not None:
if 'InvalidGeoRegion' in faultcode.text:
self.logger.error('Failed to retrieve playlist data '
'(invalid geo region)')
return None
# Look for <MediaFiles> tag (RTMP streams)
mediafiles = xmldoc.find('.//VideoEntries//MediaFiles')
# Look for <ManifestFile> tag (HDS streams)
manifestfile = xmldoc.find('.//VideoEntries//ManifestFile')
# No streams
if not mediafiles and not manifestfile:
return None
streams = {}
# Proxy not needed for media retrieval (Note: probably better to use flag)
# for x in ('http', 'https'):
# if x in http.proxies:
# http.proxies.pop(x);
# Parse RTMP streams
if mediafiles:
rtmp = mediafiles.attrib['base']
for mediafile in mediafiles.findall("MediaFile"):
playpath = mediafile.find("URL").text
rtmp_url = urlparse(rtmp)
app = (rtmp_url.path[1:] + '?' + rtmp_url.query).rstrip('?')
live = app == "live"
params = dict(rtmp="{u.scheme}://{u.netloc}{u.path}".format(u=rtmp_url),
app=app.rstrip('?'),
playpath=playpath,
swfVfy=LIVE_SWF_URL if live else ONDEMAND_SWF_URL,
timeout=10)
if live:
params['live'] = True
bitrate = int(mediafile.attrib['bitrate']) / 1000
quality = "{0:d}k".format(int(bitrate))
streams[quality] = RTMPStream(self.session, params)
# Parse HDS streams
if manifestfile:
url = manifestfile.find('URL').text
if urlparse(url).path.endswith('f4m'):
streams.update(
HDSStream.parse_manifest(self.session, url, pvswf=LIVE_SWF_URL)
)
return streams
def _soap_request(self):
def sub_ns(parent, tag, ns):
return ET.SubElement(parent, "{%s}%s" % (ns, tag))
def sub_common(parent, tag):
return sub_ns(parent, tag, "http://schemas.itv.com/2009/05/Common")
def sub_soap(parent, tag):
return sub_ns(parent, tag, "http://schemas.xmlsoap.org/soap/envelope/")
def sub_item(parent, tag):
return sub_ns(parent, tag, "http://tempuri.org/")
def sub_itv(parent, tag):
return sub_ns(parent, tag, "http://schemas.datacontract.org/2004/07/Itv.BB.Mercury.Common.Types")
production_id = self.production_id
channel_id = self.channel_id
ET.register_namespace("com", "http://schemas.itv.com/2009/05/Common")
ET.register_namespace("soapenv", "http://schemas.xmlsoap.org/soap/envelope/")
ET.register_namespace("tem", "http://tempuri.org/")
ET.register_namespace("itv", "http://schemas.datacontract.org/2004/07/Itv.BB.Mercury.Common.Types")
# Start of XML
root = ET.Element("{http://schemas.xmlsoap.org/soap/envelope/}Envelope")
sub_soap(root, "Header")
body = sub_soap(root, "Body")
# build request
get_playlist = sub_item(body, "GetPlaylist")
request = sub_item(get_playlist, "request")
prode = sub_itv(request, "ProductionId")
if production_id:
# request -> ProductionId
prode.text = production_id
# request -> RequestGuid
sub_itv(request, "RequestGuid").text = str(uuid4()).upper()
vodcrid = sub_itv(request, "Vodcrid")
# request -> Vodcrid -> Id
vod_id = sub_common(vodcrid, "Id")
# request -> Vodcrid -> Partition
sub_common(vodcrid, "Partition").text = "itv.com"
if channel_id:
vod_id.text = "sim{0}".format(channel_id)
# build userinfo
userinfo = sub_item(get_playlist, "userInfo")
sub_itv(userinfo, "Broadcaster").text = "Itv"
sub_itv(userinfo, "RevenueScienceValue").text = "ITVPLAYER.2.18.14.+build.a778cd30ac"
sub_itv(userinfo, "SessionId")
sub_itv(userinfo, "SsoToken")
sub_itv(userinfo, "UserToken")
# GeoLocationToken -> Token
# sub_itv(sub_itv(userinfo, "GeoLocationToken"), "Token")
# build siteinfo
siteinfo = sub_item(get_playlist, "siteInfo")
sub_itv(siteinfo, "AdvertisingRestriction").text = "None"
sub_itv(siteinfo, "AdvertisingSite").text = "ITV"
sub_itv(siteinfo, "AdvertisingType").text = "Any"
sub_itv(siteinfo,
"Area").text = "ITVPLAYER.VIDEO" # "channels.itv{0}".format(channel_id) if channel_id else "ITVPLAYER.VIDEO"
sub_itv(siteinfo, "Category")
sub_itv(siteinfo, "Platform").text = "DotCom"
sub_itv(siteinfo, "Site").text = "ItvCom"
# build deviceInfo
deviceinfo = sub_item(get_playlist, "deviceInfo")
sub_itv(deviceinfo, "ScreenSize").text = "Big"
# build playerinfo
playerinfo = sub_item(get_playlist, "playerInfo")
sub_itv(playerinfo, "Version").text = "2"
return ET.tostring(root)
__plugin__ = ITVPlayer
|
from . import json_checker
def validate_json_checker(x):
"""
Property: Policy.SecurityServicePolicyData
"""
return json_checker(x)
|
from distutils.core import setup
DESCRIPTION = "General tools for Astronomical Time Series in Python"
LONG_DESCRIPTION = """
nufftpy: Non-Uniform FFT in Python
==================================
This is a pure python implementation of the NUFFT.
For more information, visit http://github.com/jakevdp/nufftpy
"""
NAME = "nufftpy"
AUTHOR = "Jake VanderPlas"
AUTHOR_EMAIL = "jakevdp@uw.edu"
MAINTAINER = "Jake VanderPlas"
MAINTAINER_EMAIL = "jakevdp@uw.edu"
URL = 'http://github.com/jakevdp/nufftpy'
DOWNLOAD_URL = 'http://github.com/jakevdp/nufftpy'
LICENSE = 'BSD 3-clause'
import nufftpy
VERSION = nufftpy.__version__
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['nufftpy',
'nufftpy.tests',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
)
|
"""aio -- asynchronous IO"""
from __future__ import absolute_import
import socket, ssl, select, errno, logging, fcntl
from tornado.ioloop import IOLoop
__all__ = (
'TCPServer', 'TCPClient', 'SocketError', 'would_block', 'in_progress',
'starttls', 'is_ssl',
'loop', 'start'
)
class TCPServer(object):
"""A non-blocking, single-threaded TCP server. This
implementation is heavily based on the tornado HTTPServer. A
simple echo server is:
import xmpp
from tornado.iostream import IOStream
def echo(socket, address, io):
stream = IOStream(socket, io_loop=io)
def handle(data):
if data == 'goodbye\n':
stream.write('See you later.\n', stream.close)
else:
stream.write('You said: "%s".\n' % data.strip())
loop()
def loop():
stream.read_until("\n", handle)
loop()
server = xmpp.TCPServer(echo).bind('127.0.0.1', '9000')
start([server])
"""
def __init__(self, handler, io=None):
self.handler = handler
self.io = io or loop()
self.socket = None
def stop(self):
if self.socket:
self.io.remove_handler(self.socket.fileno())
self.socket.close()
self.socket = None
return self
def bind(self, addr, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
flags = fcntl.fcntl(sock.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, flags)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind((addr, int(port)))
sock.listen(128)
self.socket = sock
return self
def start(self):
## Note: the tornado HTTPServer forks a subprocesses to match
## the number of CPU cores. It's probably worthwhile to that
## here too.
self.io.add_handler(
self.socket.fileno(),
self._accept,
self.io.READ
)
return self
def _accept(self, fd, events):
while True:
try:
conn, addr = self.socket.accept()
except SocketError as exc:
if not would_block(exc):
raise
return
try:
conn.setblocking(0)
conn.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.handler(conn, addr, self.io)
except:
logging.error(
'TCPServer: conn error (%s)' % (addr,),
exc_info=True
)
self.io.remove_handler(conn.fileno())
conn.close()
class TCPClient(object):
"""A non-blocking TCP client implemented with ioloop. For
example, here is a client that talks to the echo server in the
previous example:
def talk(socket, io):
stream = IOStream(socket, io=io)
messages = [0]
def handle(data):
stream.write('goodbye\n', stream.close)
stream.read_until("\n", handle)
stream.write('hello!\n')
client = xmpp.TCPClient(talk).connect('127.0.0.1', '9000')
xmpp.start([client])
"""
def __init__(self, handler, io=None):
self.handler = handler
self.io = io or loop()
self.socket = None
self.address = None
def stop(self):
if self.socket:
self.socket.close()
self.socket = None
return self
def connect(self, addr, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setblocking(0)
try:
self.address = (addr, int(port))
sock.connect(self.address)
except SocketError as exc:
if not in_progress(exc):
raise
self.socket = sock
return self
def start(self):
self.io.add_handler(self.socket.fileno(), self._ready, self.io.WRITE)
return self
def _ready(self, fd, events):
try:
self.handler(self.socket, self.address, self.io)
except:
logging.error(
'TCPClient: error while handling WRITE',
exc_info=True
)
self.stop()
SocketError = socket.error
def would_block(exc):
return exc[0] in (errno.EWOULDBLOCK, errno.EAGAIN)
def in_progress(exc):
return exc[0] == errno.EINPROGRESS
def starttls(socket, success=None, failure=None, io=None, **options):
"""Wrap an active socket in an SSL socket."""
## Default Options
options.setdefault('do_handshake_on_connect', False)
options.setdefault('ssl_version', ssl.PROTOCOL_SSLv23)
## Handlers
def done():
"""Handshake finished successfully."""
io.remove_handler(wrapped.fileno())
success and success(wrapped)
def error():
"""The handshake failed."""
if failure:
return failure(wrapped)
## By default, just close the socket.
io.remove_handler(wrapped.fileno())
wrapped.close()
def handshake(fd, events):
"""Handler for SSL handshake negotiation. See Python docs for
ssl.do_handshake()."""
if events & io.ERROR:
error()
return
try:
new_state = io.ERROR
wrapped.do_handshake()
return done()
except ssl.SSLError as exc:
if exc.args[0] == ssl.SSL_ERROR_WANT_READ:
new_state |= io.READ
elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE:
new_state |= io.WRITE
else:
logging.exception('starttls: caught exception during handshake')
error()
if new_state != state[0]:
state[0] = new_state
io.update_handler(fd, new_state)
## set up handshake state; use a list as a mutable cell.
io = io or loop()
state = [io.ERROR]
## Wrap the socket; swap out handlers.
io.remove_handler(socket.fileno())
wrapped = SSLSocket(socket, **options)
wrapped.setblocking(0)
io.add_handler(wrapped.fileno(), handshake, state[0])
## Begin the handshake.
handshake(wrapped.fileno(), 0)
return wrapped
def is_ssl(socket):
"""True if socket is an active SSLSocket."""
return bool(getattr(socket, '_sslobj', False))
class SSLSocket(ssl.SSLSocket):
"""Override the send() and recv() methods of SSLSocket to more
closely emulate normal non-blocking socket behavior.
The built-in SSLSocket implementation wraps self.read() and
self.write() in `while True' loops. This makes the socket
effectively blocking even if the socket is set to be non-blocking.
See also: <http://bugs.python.org/issue3890>.
The read() and write() methods may raise SSLErrors that aren't
caught by ioloop handlers. This implementation re-raises
SSL_ERROR_WANT_READ and SSL_ERROR_WANT_WRITE errors as EAGAIN
socket.errors.
"""
def __init__(self, *args, **kwargs):
super(SSLSocket, self).__init__(*args, **kwargs)
## The base socket class overrides these methods; re-override them.
cls = type(self)
self.recv = cls.recv.__get__(self, cls)
self.send = cls.send.__get__(self, cls)
def send(self, data, flags=0):
if not self._sslobj:
return socket.send(self, data, flags)
elif flags != 0:
raise ValueError(
'%s.send(): non-zero flags not allowed' % self.__class__
)
try:
return self.write(data)
except ssl.SSLError as exc:
if exc.args[0] in (ssl.SSL_ERROR_WANT_WRITE, ssl.SSL_ERROR_WANT_READ):
raise SocketError(errno.EAGAIN)
raise
def recv(self, buflen=1024, flags=0):
if not self._sslobj:
return socket.recv(self, buflen, flags)
elif flags != 0:
raise ValueError(
'%s.recv(): non-zero flags not allowed' % self.__class__
)
try:
return self.read(buflen)
except ssl.SSLError as exc:
if exc.args[0] == ssl.SSL_ERROR_WANT_READ:
raise SocketError(errno.EAGAIN)
raise
def loop():
if not hasattr(IOLoop, '_instance'):
poll = _KQueue() if hasattr(select, 'kqueue') else None
IOLoop._instance = IOLoop(poll)
return IOLoop._instance
def start(services=(), io=None):
"""Start an event loop. If services are given, start them before
starting the loop and stop them before stopping the loop."""
io = io or loop()
for svc in services:
svc.start()
try:
normal = True
io.start()
except KeyboardInterrupt:
logging.info('Received keyboard interrupt.')
except Exception:
normal = False
logging.info('Shutting down services.')
for svc in services:
try:
svc.stop()
except:
logging.exception('Error while shutting down %r.' % svc)
if normal:
logging.info('Shutting down event loop.')
io.stop()
else:
logging.exception('Uncaught exception')
raise
class _KQueue(object):
"""A kqueue-based event loop for BSD/Mac systems."""
def __init__(self):
self._kqueue = select.kqueue()
self._active = {}
def register(self, fd, events):
self._control(fd, events, select.KQ_EV_ADD)
self._active[fd] = events
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def unregister(self, fd):
events = self._active.pop(fd)
self._control(fd, events, select.KQ_EV_DELETE)
def _control(self, fd, events, flags):
kevents = []
if events & IOLoop.WRITE:
kevents.append(select.kevent(
fd, filter=select.KQ_FILTER_WRITE, flags=flags))
if events & IOLoop.READ or not kevents:
# Always read when there is not a write
kevents.append(select.kevent(
fd, filter=select.KQ_FILTER_READ, flags=flags))
# Even though control() takes a list, it seems to return EINVAL
# on Mac OS X (10.6) when there is more than one event in the list.
for kevent in kevents:
self._kqueue.control([kevent], 0)
def poll(self, timeout):
kevents = self._kqueue.control(None, 1000, timeout)
events = {}
for kevent in kevents:
fd = kevent.ident
flags = 0
if kevent.filter == select.KQ_FILTER_READ:
events[fd] = events.get(fd, 0) | IOLoop.READ
if kevent.filter == select.KQ_FILTER_WRITE:
events[fd] = events.get(fd, 0) | IOLoop.WRITE
if kevent.flags & select.KQ_EV_ERROR:
events[fd] = events.get(fd, 0) | IOLoop.ERROR
return events.items()
|
"""Calculate the band structure of graphene with Rashba spin-orbit coupling"""
import pybinding as pb
import numpy as np
import matplotlib.pyplot as plt
from math import pi, sqrt
def monolayer_graphene_soc():
"""Return the lattice specification for monolayer graphene with Rashba SOC,
see http://doi.org/10.1103/PhysRevB.95.165415 for reference"""
from pybinding.constants import pauli
from pybinding.repository.graphene import a_cc, a, t
onsite = 0.05 # [eV] onsite energy
rashba = 0.1 # [eV] strength of Rashba SOC
rashba_so = 1j * 2/3 * rashba
# create a lattice with 2 primitive vectors
a1 = np.array([a / 2 * sqrt(3), a / 2])
a2 = np.array([a / 2 * sqrt(3), -a / 2])
lat = pb.Lattice(
a1=a1, a2=a2
)
pos_a = np.array([-a_cc / 2, 0])
pos_b = np.array([+a_cc / 2, 0])
lat.add_sublattices(
('A', pos_a, [[ onsite, 0], [0, onsite]]),
('B', pos_b, [[-onsite, 0], [0, -onsite]]))
# nearest neighbor vectors
d1 = (pos_b - pos_a) / a_cc # [ 0, 0]
d2 = (pos_b - pos_a - a1) / a_cc # [-1, 0]
d3 = (pos_b - pos_a - a2) / a_cc # [ 0, -1]
nn_hopp = np.array([[t, 0], [0, t]]) # nn hopping, same spin
t1 = nn_hopp + rashba_so * (pauli.x * d1[1] - pauli.y * d1[0]) # cross([sx , sy], [dx, dy])
t2 = nn_hopp + rashba_so * (pauli.x * d2[1] - pauli.y * d2[0])
t3 = nn_hopp + rashba_so * (pauli.x * d3[1] - pauli.y * d3[0])
# name and position
lat.add_hoppings(
([0, 0], 'A', 'B', t1),
([-1, 0], 'A', 'B', t2),
([0, -1], 'A', 'B', t3)
)
return lat
lattice = monolayer_graphene_soc()
lattice.plot()
plt.show()
lattice.plot_brillouin_zone()
plt.show()
model = pb.Model(lattice, pb.translational_symmetry())
solver = pb.solver.lapack(model)
k_points = model.lattice.brillouin_zone()
Gamma = [0, 0]
K1 = k_points[0]
K2 = k_points[2]
M = (k_points[0] + k_points[1]) / 2
bands = solver.calc_bands(K1, Gamma, M, K2)
bands.plot(point_labels=['K', r'$\Gamma$', 'M', 'K'])
plt.show()
|
from django.conf import settings
from django.test import TestCase
from unittest import SkipTest, skipIf
from django.db import connection
from django.contrib.gis.geos import MultiLineString, LineString, Point
from django.utils import translation
from geotrek.core.models import Path, Topology
from geotrek.core.factories import TopologyFactory
from geotrek.altimetry.helpers import AltimetryHelper
class ElevationTest(TestCase):
def setUp(self):
# Create a simple fake DEM
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(100, 125, 0, 125, 25, -25, 0, 0, %s))', [settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
demvalues = [[0, 0, 3, 5], [2, 2, 10, 15], [5, 15, 20, 25], [20, 25, 30, 35], [30, 35, 40, 45]]
for y in range(0, 5):
for x in range(0, 4):
cur.execute('UPDATE altimetry_dem SET rast = ST_SetValue(rast, %s, %s, %s::float)', [x + 1, y + 1, demvalues[y][x]])
if settings.TREKKING_TOPOLOGY_ENABLED:
self.path = Path.objects.create(geom=LineString((78, 117), (3, 17)))
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_path(self):
self.assertEqual(self.path.ascent, 16)
self.assertEqual(self.path.descent, 0)
self.assertEqual(self.path.min_elevation, 6)
self.assertEqual(self.path.max_elevation, 22)
self.assertEqual(len(self.path.geom_3d.coords), 7)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_profile(self):
profile = self.path.get_elevation_profile()
self.assertAlmostEqual(len(profile), 7)
self.assertAlmostEqual(profile[0][0], 0.0)
self.assertAlmostEqual(profile[-1][0], 125.0)
self.assertAlmostEqual(profile[0][3], 6.0)
self.assertAlmostEqual(profile[1][3], 8.0)
self.assertAlmostEqual(profile[2][3], 10.0)
self.assertAlmostEqual(profile[3][3], 13.0)
self.assertAlmostEqual(profile[4][3], 18.0)
self.assertAlmostEqual(profile[5][3], 20.0)
self.assertAlmostEqual(profile[6][3], 22.0)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_limits(self):
limits = self.path.get_elevation_limits()
self.assertEqual(limits[0], 1106)
self.assertEqual(limits[1], -94)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_topology_line(self):
topo = TopologyFactory.create(paths=[(self.path, 0.2, 0.8)])
topo.save()
topo.get_elevation_profile()
self.assertEqual(topo.ascent, 7)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 10)
self.assertEqual(topo.max_elevation, 17)
self.assertEqual(len(topo.geom_3d.coords), 5)
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_elevation_topology_line_nds(self):
"""
No reason for this changements
"""
topo = TopologyFactory.create(geom="SRID=2154;LINESTRING(63 97, 18 37)")
topo.get_elevation_profile()
self.assertEqual(topo.ascent, 5)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 12)
self.assertEqual(topo.max_elevation, 17)
self.assertEqual(len(topo.geom_3d.coords), 5)
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_elevation_topology_point(self):
topo = TopologyFactory.create(geom="SRID=2154;POINT(33 57)")
self.assertEqual(topo.geom_3d.coords[2], 15)
self.assertEqual(topo.ascent, 0)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 15)
self.assertEqual(topo.max_elevation, 15)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_elevation_topology_point_offset(self):
topo = TopologyFactory.create(paths=[(self.path, 0.5, 0.5)], offset=1)
self.assertEqual(topo.geom_3d.coords[2], 15)
self.assertEqual(topo.ascent, 0)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 15)
self.assertEqual(topo.max_elevation, 15)
def test_elevation_topology_outside_dem(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
outside_path = Path.objects.create(geom=LineString((200, 200), (300, 300)))
topo = TopologyFactory.create(paths=[(outside_path, 0.5, 0.5)])
else:
topo = TopologyFactory.create(geom="SRID=2154;POINT(250 250)")
self.assertEqual(topo.geom_3d.coords[2], 0)
self.assertEqual(topo.ascent, 0)
self.assertEqual(topo.descent, 0)
self.assertEqual(topo.min_elevation, 0)
self.assertEqual(topo.max_elevation, 0)
class ElevationProfileTest(TestCase):
def test_elevation_profile_multilinestring(self):
geom = MultiLineString(LineString((1.5, 2.5, 8), (2.5, 2.5, 10)),
LineString((2.5, 2.5, 6), (2.5, 0, 7)),
srid=settings.SRID)
profile = AltimetryHelper.elevation_profile(geom)
self.assertEqual(len(profile), 4)
def test_elevation_profile_point(self):
geom = Point(1.5, 2.5, 8, srid=settings.SRID)
profile = AltimetryHelper.elevation_profile(geom)
self.assertEqual(profile, [[0, 1.5, 2.5, 8.0]])
def test_elevation_svg_output(self):
geom = LineString((1.5, 2.5, 8), (2.5, 2.5, 10),
srid=settings.SRID)
profile = AltimetryHelper.elevation_profile(geom)
language = translation.get_language()
svg = AltimetryHelper.profile_svg(profile, language)
self.assertIn('Generated with pygal'.encode(), svg)
self.assertIn(settings.ALTIMETRIC_PROFILE_BACKGROUND.encode(), svg)
self.assertIn(settings.ALTIMETRIC_PROFILE_COLOR.encode(), svg)
def test_elevation_altimetry_limits(self):
geom = LineString((1.5, 2.5, 8), (2.5, 2.5, 10),
srid=settings.SRID)
profile = AltimetryHelper.elevation_profile(geom)
limits = AltimetryHelper.altimetry_limits(profile)
self.assertEqual(limits[0], 1108)
self.assertEqual(limits[1], -92)
class AreaTestCase(TestCase):
def _fill_raster(self):
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(100, 125, 0, 125, 25, -25, 0, 0, %s))', [settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
demvalues = [[0, 0, 3, 5], [2, 2, 10, 15], [5, 15, 20, 25], [20, 25, 30, 35], [30, 35, 40, 45]]
for y in range(0, 5):
for x in range(0, 4):
cur.execute('UPDATE altimetry_dem SET rast = ST_SetValue(rast, %s, %s, %s::float)', [x + 1, y + 1, demvalues[y][x]])
class ElevationAreaTest(AreaTestCase):
def setUp(self):
self._fill_raster()
self.geom = LineString((100, 370), (1100, 370), srid=settings.SRID)
self.area = AltimetryHelper.elevation_area(self.geom)
def test_area_has_nice_ratio_if_horizontal(self):
self.assertEqual(self.area['size']['x'], 1300.0)
self.assertEqual(self.area['size']['y'], 800.0)
def test_area_provides_altitudes_as_matrix(self):
self.assertEqual(len(self.area['altitudes']), 33)
self.assertEqual(len(self.area['altitudes'][0]), 53)
self.assertEqual(len(self.area['altitudes'][-1]), 53)
def test_area_provides_resolution(self):
self.assertEqual(self.area['resolution']['x'], 53)
self.assertEqual(self.area['resolution']['y'], 33)
def test_resolution_step_depends_on_geometry_size(self):
self.assertEqual(self.area['resolution']['step'], 25)
geom = LineString((100, 370), (100100, 370), srid=settings.SRID)
area = AltimetryHelper.elevation_area(geom)
self.assertEqual(area['resolution']['step'], 866)
def test_area_provides_center_as_latlng(self):
self.assertAlmostEqual(self.area['center']['lng'], -1.3594758650394245)
self.assertAlmostEqual(self.area['center']['lat'], -5.981351702397734)
def test_area_provides_center_as_xy(self):
self.assertEqual(self.area['center']['x'], 600.0)
self.assertEqual(self.area['center']['y'], 369.0)
def test_area_provides_extent_as_xy(self):
extent = self.area['extent']
self.assertEqual(extent['northwest']['x'], -50.0)
self.assertEqual(extent['northwest']['y'], 769.0)
self.assertEqual(extent['southeast']['x'], 1250.0)
self.assertEqual(extent['southeast']['y'], -31.0)
def test_area_provides_extent_as_latlng(self):
extent = self.area['extent']
self.assertAlmostEqual(extent['northeast']['lat'], -5.9786368380250385)
self.assertAlmostEqual(extent['northeast']['lng'], -1.35556992351484)
self.assertAlmostEqual(extent['southwest']['lat'], -5.9840665893459875)
self.assertAlmostEqual(extent['southwest']['lng'], -1.3633815583740085)
def test_area_provides_altitudes_extent(self):
extent = self.area['extent']
self.assertEqual(extent['altitudes']['max'], 45)
self.assertEqual(extent['altitudes']['min'], 0)
class ElevationOtherGeomAreaTest(AreaTestCase):
def setUp(self):
self._fill_raster()
def test_area_small_geom(self):
geom = LineString((10, 10), (10, 5), srid=settings.SRID)
area = AltimetryHelper.elevation_area(geom)
extent = area['extent']
self.assertEqual(extent['altitudes']['max'], 30)
self.assertEqual(extent['altitudes']['min'], 30)
def test_area_has_nice_ratio_if_vertical(self):
geom = LineString((0, 0), (0, 1000), srid=settings.SRID)
area = AltimetryHelper.elevation_area(geom)
self.assertEqual(area['size']['x'], 800.0)
self.assertEqual(area['size']['y'], 1300.0)
def test_area_has_nice_ratio_if_square_enough(self):
geom = LineString((0, 0), (1000, 1000), srid=settings.SRID)
area = AltimetryHelper.elevation_area(geom)
self.assertEqual(area['size']['x'], 1300.0)
self.assertEqual(area['size']['y'], 1300.0)
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
class LengthTest(TestCase):
def setUp(self):
# Create a simple fake DEM
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(100, 125, 0, 125, 25, -25, 0, 0, %s))', [settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
demvalues = [[0, 0, 3, 5], [2, 2, 10, 15], [5, 15, 20, 25], [20, 25, 30, 35], [30, 35, 40, 45]]
for y in range(0, 5):
for x in range(0, 4):
cur.execute('UPDATE altimetry_dem SET rast = ST_SetValue(rast, %s, %s, %s::float)', [x + 1, y + 1, demvalues[y][x]])
self.path = Path.objects.create(geom=LineString((1, 101), (81, 101), (81, 99)))
def test_2dlength_is_preserved(self):
self.assertEqual(self.path.geom_3d.length, self.path.geom.length)
def test_3dlength(self):
# before smoothing: (1 101 0, 21 101 0, 41 101 0, 61 101 3, 81 101 5, 81 99 15)
# after smoothing: (1 101 0, 21 101 0, 41 101 0, 61 101 1, 81 101 3, 81 99 9)
# length: 20 + 20 + (20 ** 2 + 1) ** .5 + (20 ** 2 + 2 ** 2) ** .5 + (2 ** 2 + 6 ** 2) ** .5
self.assertEqual(round(self.path.length, 9), 83.127128724)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class SamplingTestPath(TestCase):
model = Path
step = settings.ALTIMETRIC_PROFILE_PRECISION
def setUp(self):
if self.model is None:
SkipTest()
# Create a fake empty DEM to prevent trigger optimisation to skip sampling
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_AddBand(ST_MakeEmptyRaster(100, 100, 0, 100, 25, -25, 0, 0, %s), \'16BSI\'))',
[settings.SRID])
def test_0_first(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 0), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_0_last(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_1(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 2)
def test_24(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step - 1)))
self.assertEqual(len(path.geom_3d.coords), 2)
def test_25(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_26(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step + 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_49(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 - 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_50(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2)))
self.assertEqual(len(path.geom_3d.coords), 4)
def test_51(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 + 1)))
self.assertEqual(len(path.geom_3d.coords), 4)
def test_1m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1), (1, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_24m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step - 1), (0, self.step * 2 - 2)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_25m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step), (0, self.step * 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_26m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step + 1), (0, self.step * 2 + 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_49m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 - 1), (0, self.step * 4 - 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_50m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2), (0, self.step * 4)))
self.assertEqual(len(path.geom_3d.coords), 7)
def test_51m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 + 1), (0, self.step * 4 + 2)))
self.assertEqual(len(path.geom_3d.coords), 7)
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
class SamplingTestTopology(TestCase):
model = Topology
step = settings.ALTIMETRIC_PROFILE_PRECISION
def setUp(self):
if self.model is None:
SkipTest()
# Create a fake empty DEM to prevent trigger optimisation to skip sampling
with connection.cursor() as cur:
cur.execute('INSERT INTO altimetry_dem (rast) VALUES (ST_MakeEmptyRaster(100, 125, 0, 125, 25, -25, 0, 0, %s))',
[settings.SRID])
cur.execute('UPDATE altimetry_dem SET rast = ST_AddBand(rast, \'16BSI\')')
def test_0_first(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 0), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_0_last(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_1(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1)))
self.assertEqual(len(path.geom_3d.coords), 2)
def test_24(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step - 1)))
self.assertEqual(len(path.geom_3d.coords), 2)
def test_25(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_26(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step + 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_49(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 - 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_50(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2)))
self.assertEqual(len(path.geom_3d.coords), 4)
def test_51(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 + 1)))
self.assertEqual(len(path.geom_3d.coords), 4)
def test_1m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, 1), (1, 1)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_24m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step - 1), (0, self.step * 2 - 2)))
self.assertEqual(len(path.geom_3d.coords), 3)
def test_25m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step), (0, self.step * 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_26m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step + 1), (0, self.step * 2 + 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_49m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 - 1), (0, self.step * 4 - 2)))
self.assertEqual(len(path.geom_3d.coords), 5)
def test_50m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2), (0, self.step * 4)))
self.assertEqual(len(path.geom_3d.coords), 7)
def test_51m(self):
path = self.model.objects.create(geom=LineString((0, 0), (0, self.step * 2 + 1), (0, self.step * 4 + 2)))
self.assertEqual(len(path.geom_3d.coords), 7)
|
from migen import *
from litex.soc.interconnect.csr import *
class XADC(Module, AutoCSR):
def __init__(self):
# Temperature(°C) = adc_value*503.975/4096 - 273.15
self._temperature = CSRStatus(12)
# Voltage(V) = adc_value*)/4096*3
self._vccint = CSRStatus(12)
self._vccaux = CSRStatus(12)
self._vccbram = CSRStatus(12)
# Alarms
self.alarm = Signal(8)
self.ot = Signal()
# # #
busy = Signal()
channel = Signal(7)
eoc = Signal()
eos = Signal()
data = Signal(16)
drdy = Signal()
self.specials += Instance("XADC",
# from ug480
p_INIT_40=0x9000, p_INIT_41=0x2ef0, p_INIT_42=0x0400,
p_INIT_48=0x4701, p_INIT_49=0x000f,
p_INIT_4A=0x4700, p_INIT_4B=0x0000,
p_INIT_4C=0x0000, p_INIT_4D=0x0000,
p_INIT_4E=0x0000, p_INIT_4F=0x0000,
p_INIT_50=0xb5ed, p_INIT_51=0x5999,
p_INIT_52=0xa147, p_INIT_53=0xdddd,
p_INIT_54=0xa93a, p_INIT_55=0x5111,
p_INIT_56=0x91eb, p_INIT_57=0xae4e,
p_INIT_58=0x5999, p_INIT_5C=0x5111,
o_ALM=self.alarm, o_OT=self.ot,
o_BUSY=busy, o_CHANNEL=channel, o_EOC=eoc, o_EOS=eos,
i_VAUXN=0, i_VAUXP=1, i_VN=0, i_VP=1,
i_CONVST=0, i_CONVSTCLK=0, i_RESET=ResetSignal(),
o_DO=data, o_DRDY=drdy, i_DADDR=channel, i_DCLK=ClockSignal(),
i_DEN=eoc, i_DI=0, i_DWE=0,
# o_JTAGBUSY=, o_JTAGLOCKED=, o_JTAGMODIFIED=, o_MUXADDR=,
)
channels = {
0: self._temperature,
1: self._vccint,
2: self._vccaux,
6: self._vccbram
}
self.sync += [
If(drdy,
Case(channel, dict(
(k, v.status.eq(data >> 4))
for k, v in channels.items()))
)
]
|
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='jinn',
version='0.1.0',
license='BSD',
description='A invoke wrapper.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='transcode',
author_email='team@transcode.de',
url='https://github.com/transcode-de/jinn',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
# eg: 'aspectlib==1.1.1', 'six>=1.7',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
)
|
from __future__ import print_function
from enum import Enum, IntEnum
class Color(Enum):
red = 1
green = 2
blue = 3
class Shake(Enum):
vanilla = 7
chocolate = 4
cookies = 9
# Same as Color.blue
mint = 3
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
class HeterogeneousEnum(Enum):
red = 1.0
green = 2.0
blue = 3j
class Shape(IntEnum):
# Same as Color.green
circle = 2
# Same as RequestError.internal_error
square = 500
class RequestError(IntEnum):
dummy = 2
not_found = 404
internal_error = 500
|
from numpy import *
from numpy import f2py # not part of import *
from scitools.StringFunction import StringFunction
import time, sys, os
sys.path.insert(0, os.path.join(os.environ['scripting'],
'src','py','examples'))
from Grid2D import Grid2D
try:
import ext_gridloop
except ImportError:
print 'You must first build the ext_gridloop module'
sys.exit(1)
class Grid2Deff(Grid2D):
def ext_gridloop1(self, f):
"""Compute a[i,j] = f(xi,yj) in an external routine."""
# a is made here, sent to the routine, and then returned
a = zeros((self.xcoor.size, self.ycoor.size))
# C/C++ or Fortran module?
if ext_gridloop.__doc__ is not None:
if 'f2py' in ext_gridloop.__doc__:
# Fortran extension module
a = asarray(a, order='Fortran')
# could store a as self.a to avoid making Fortran
# arrays in repeated calls
ext_gridloop.gridloop1(a, self.xcoor, self.ycoor, f)
return a
def ext_gridloop2(self, f):
"""Compute a[i,j] = f(xi,yj) in an external routine."""
# a is made in the external routine
a = ext_gridloop.gridloop2(self.xcoor, self.ycoor, f)
return a
def ext_gridloop_exceptions(self, f):
"""Test error handling in the extension module."""
try: #1
ext_gridloop.gridloop1((1,2), self.xcoor, self.ycoor[1:], f)
except:
print sys.exc_type, sys.exc_value
try: #2
ext_gridloop.gridloop1(self.xcoor, self.xcoor, self.ycoor[1:], f)
except:
print sys.exc_type, sys.exc_value
try: #3
ext_gridloop.gridloop2(self.xcoor, self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
try: #4
ext_gridloop.gridloop2(array(self.xcoor,Complex64),
self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
try: #5
ext_gridloop.gridloop2(array([[0,0],[1,2]]), self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
# NOTE: the three next functions are only available in the
# Fortran 77 extension module:
def ext_gridloop_vec1(self, f):
"""As ext_gridloop2, but vectorized callback."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec1(a, self.xcoor, self.ycoor, f)
return a
def ext_gridloop_vec2(self, f):
"""As ext_gridloop_vec1, but callback to func. w/grid arg."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec2(a, f, func1_extra_args=(self,))
return a
def myfuncf3(self, a):
a[:,:] = myfunc(self.xcoorv, self.ycoorv) # in-place mod.
def ext_gridloop_vec3(self, f):
"""As ext_gridloop_vec2, but callback to class method."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec2(a, f)
return a
def ext_gridloop2_str(self, f77_name):
"""
Call an interface to ext_gridloop.gridloop2, which avoids
callbacks to Python and calls the f77_name F77 function
instead.
"""
a = ext_gridloop.gridloop2_str(self.xcoor, self.ycoor,
f77_name)
return a
def ext_gridloop_noalloc(self, f77_name, a):
"""
As ext_gridloop2_str, but a is intent(in,out), i.e., there is
no allocation of a in the wrapper code. If the function
is called a large number of times (as in our efficiency
tests), intent(in,out) increases the performance.
"""
a = ext_gridloop.gridloop_noalloc(a, self.xcoor, self.ycoor,
f77_name)
return a
def ext_gridloop2_fcb(self):
"""As ext_gridloop2, but compiled Fortran callback func."""
import callback
a = callback.gridloop2_fcb(self.xcoor, self.ycoor)
return a
def ext_gridloop2_fcb_compile(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not %s', type(fstr)
# generate Fortran source
source = """
real*8 function fcb(x, y)
real*8 x, y
fcb = %s
return
end
subroutine gridloop2_fcb(a, xcoor, ycoor, nx, ny)
integer nx, ny
real*8 a(nx,ny), xcoor(nx), ycoor(ny)
Cf2py intent(out) a
Cf2py intent(in) xcoor
Cf2py intent(in) ycoor
Cf2py depend(nx,ny) a
real*8 fcb
external fcb
call gridloop2(a, xcoor, ycoor, nx, ny, fcb)
return
end
""" % fstr
# compile callback code and link with ext_gridloop.so:
f2py_args = "--fcompiler=Gnu --build-dir=tmp2"\
" -DF2PY_REPORT_ON_ARRAY_COPY=1 "\
" ./ext_gridloop.so"
r = f2py.compile(source, modulename='callback',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import callback # see if we can import successfully
def ext_gridloop2_fcb_ptr(self):
"""As ext_gridloop2, but compiled Fortran callback func."""
from callback import fcb
a = ext_gridloop.gridloop2(self.xcoor, self.ycoor,
fcb._cpointer)
return a
def ext_gridloop2_fcb_ptr_compile(self, fstr):
if not isinstance(fstr, StringFunction):
raise TypeError, \
'fstr must be StringFunction, not %s', type(fstr)
source = fstr.F77_code('fcb')
f2py_args = "--fcompiler=Gnu --build-dir=tmp2"
r = f2py.compile(source, modulename='callback',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import callback # see if we can import successfully
def ext_gridloop2_compile(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# generate Fortran source for gridloop2:
source = """
subroutine gridloop2(a, xcoor, ycoor, nx, ny)
integer nx, ny
real*8 a(nx,ny), xcoor(nx), ycoor(ny)
Cf2py intent(out) a
Cf2py depend(nx,ny) a
integer i,j
real*8 x, y
do j = 1,ny
y = ycoor(j)
do i = 1,nx
x = xcoor(i)
a(i,j) = %s
end do
end do
return
end
""" % fstr
f2py_args = "--fcompiler=Gnu --build-dir tmp1"\
" -DF2PY_REPORT_ON_ARRAY_COPY=1"
r = f2py.compile(source, modulename='ext_gridloop2',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import ext_gridloop2 # see if we can import successfully
def ext_gridloop2_v2(self):
"""
As ext_gridloop2, but the Fortran gridloop2 function was
generated and compiled in Python (in ext_gridloop2_compile).
"""
import ext_gridloop2
return ext_gridloop2.gridloop2(self.xcoor, self.ycoor)
def ext_gridloop2_weave(self, fstr):
"""Migrate loop to C++ with aid of Weave."""
try:
from scipy import weave
except ImportError:
print 'Could not import weave.\nContinue...'
return
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# the callback function is now coded in C++
# (fstr must be valid C++ code):
extra_code = r"""
double cppcb(double x, double y) {
return %s;
}
""" % fstr
# the loop in C++ (with Blitz++ array syntax):
code = r"""
int i,j;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
a(i,j) = cppcb(xcoor(i), ycoor(j));
}
}
"""
nx = self.nx; ny = self.ny
a = zeros((nx, ny))
xcoor = self.xcoor; ycoor = self.ycoor
err = weave.inline(code, ['a', 'nx', 'ny', 'xcoor', 'ycoor'],
type_converters=weave.converters.blitz,
support_code=extra_code, compiler='gcc')
# a is filled
return a
def ext_gridloop1_instant(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# generate C source for gridloop1:
# (no call to C function f(x,y), fstr is inserted in the loop)
source = """
void gridloop1(double *a, int nx, int ny,
double *xcoor, double *ycoor)
{
int i, j; double x, y;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
x = xcoor[i]; y = ycoor[i];
index(a, i, j) = %s
}
}
}
""" % fstr
try:
from instant import inline_with_numpy
a = zeros((self.nx, self.ny))
arrays = [['nx', 'ny', 'a'],
['nx', 'xcoor'],
['ny', 'ycoor']]
self.gridloop1_instant = \
inline_with_numpy(source, arrays=arrays)
except:
self.gridloop1_instant = None
def dump(self, a):
"""Nice printout of a 2D array a."""
for i in xrange(a.shape[0]):
for j in xrange(a.shape[1]):
print 'value at (%g,%g) \t = a[%d,%d] = %g' % \
(self.xcoor[i], self.ycoor[j], i, j, a[i,j])
def gridloop_psyco_init(self, method):
"""Try to accelerate Grid2D.gridloop with psyco."""
# define method self.gridloop_psyco:
try:
import psyco
self.gridloop_psyco = psyco.proxy(method)
except ImportError:
self.gridloop_psyco = method
def f1(x,y):
print 'x+2*y =',x+2*y
return x+2*y
def verify1():
"""Basic test of the extension module."""
g = Grid2Deff(dx=0.5, dy=1)
f_exact = g(f1) # NumPy computation
expression1 = StringFunction('x + 2*y',
independent_variables=('x','y'),
globals=globals())
f = g.ext_gridloop1(f1)
print 'f computed by external gridloop1 function and f1:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop2(f1)
print 'f computed by external gridloop2 function and f1:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop1(expression1)
print 'f computed by external gridloop1 function and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop2(expression1)
print 'f computed by external gridloop2 function and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
fast_func = expression1.__call__
f = g.ext_gridloop2(fast_func)
print 'f computed by external gridloop2 function and StringFunction.__call__:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g(expression1)
print 'f computed by __call__ and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
# check printing:
print 'array seen from Python:'
g.dump(f)
if 'dump' in dir(ext_gridloop):
print 'array seen from Fortran (transposed, but right values):'
ext_gridloop.dump(f, g.xcoor, g.ycoor)
def myfunc(x, y):
return sin(x*y) + 8*x
def myfuncf1(a, xcoor, ycoor, nx, ny):
"""Vectorized function to be called from extension module."""
#print 'myfuncf1; type of args:',type(a),type(xcoor),type(nx)
x = xcoor[:,newaxis]
y = ycoor[newaxis,:]
a[:,:] = myfunc(x, y) # in-place modification of a
print 'myfuncf1, a=',a
def myfuncf2(a, g):
"""Vectorized function to be called from extension module."""
#print 'myfuncf2; type of args:',type(a),type(g)
a[:,:] = myfunc(g.xcoorv, g.ycoorv) # in-place modification of a
def verify2(n=3):
"""
Test of some methods in class Grid2Deff that call up
some F77 routines for improving the efficiency of callbacks
to Python.
"""
if not 'gridloop_vec2' in dir(ext_gridloop):
raise ImportError, 'verify2 works only for F77 module'
dx = 1.0/n
g = Grid2Deff(dx=dx, dy=dx)
from StringIO import StringIO
from scitools.numpyutils import arr
a_exact = arr(file_=StringIO("""
0. 0. 0. 0.
2.66666667 2.7775493 2.88706441 2.99386136
5.33333333 5.55373108 5.7632897 5.95170314
8. 8.3271947 8.6183698 8.84147098"""))
def _check():
if not allclose(a, a_exact):
print 'ERROR, a is wrong, correct a reads\n', a_exact
else:
print 'correct array'
a = g.ext_gridloop_vec1(myfuncf1)
print "g.ext_gridloop_vec1(myfuncf1): a=\n",a
_check()
a = g.ext_gridloop_vec2(myfuncf2)
print "g.ext_gridloop_vec2(myfuncf2): a=\n",a
_check()
# need f2py version > 2.42 (callback to class method):
a = g.ext_gridloop_vec3(g.myfuncf3)
print "g.ext_gridloop_vec3(g.myfuncf3): a=\n",a
_check()
a = g.ext_gridloop2_str('myfunc')
print "g.ext_gridloop_str('myfunc'): a=\n",a
_check()
a = g.ext_gridloop_noalloc('myfunc', a)
print "g.ext_gridloop_str_noalloc('myfunc'): a=\n",a
_check()
fstr = 'sin(x*y) + 8*x'
g.ext_gridloop2_fcb_compile(fstr)
a = g.ext_gridloop2_fcb()
print "g.gridloop2_fcb: a=\n",a
_check()
import callback
print 'contents of callback module:', dir(callback)
fstr = StringFunction('sin(x*y) + 8*x')
g.ext_gridloop2_fcb_ptr_compile(fstr)
a = g.ext_gridloop2_fcb_ptr()
print "g.gridloop2_fcb_ptr: a=\n",a
_check()
import callback
print 'fcb callback module:', dir(callback), dir(callback.fcb)
g.ext_gridloop2_compile(fstr)
a = g.ext_gridloop2_v2()
print "g.gridloop2_v2: a=\n",a
_check()
a = g.ext_gridloop2_weave(fstr)
print "g.gridloop2_weave: a=\n",a
_check()
g.gridloop_psyco_init(g.gridloop)
a = g.gridloop_psyco(fstr)
print "g.gridloop_psyco(str): a=\n",a
_check()
a = g.gridloop_psyco(myfunc)
print "g.gridloop_psyco(func): a=\n",a
_check()
g.ext_gridloop1_instant(fstr)
g.gridloop1_instant(a, g.nx, g.ny, g.xcoor, g.ycoor)
print "g.gridloop1_instant: a=\n", a
def timing2(n=2000, best_time=1.0):
"""Time different implementations of the extension module."""
print 'Grid2Deff.timing2: reference CPU time = %g' % best_time
dx = 1.0/n
g = Grid2Deff(dx=dx, dy=dx)
# here we use straight NumPy sin in a scalar context:
def myfunc1(x, y):
return sin(x*y) + 8*x
def myfunc2(x, y):
return math.sin(x*y) + 8*x
expression1 = StringFunction('sin(x*y) + 8*x',
independent_variables=('x','y'),
globals=globals())
expression1_f = expression1.__call__ # for efficiency and F77 callback
expression2 = StringFunction('math.sin(x*y) + 8*x',
independent_variables=('x','y'),
globals=globals())
expression2_f = expression2.__call__ # for efficiency and F77 callback
from scitools.misc import timer
from scitools.EfficiencyTable import EfficiencyTable
e = EfficiencyTable('Grid2Deff tests, %dx%d grid' % (n,n), best_time)
t0a = timer(g.gridloop, (myfunc1,), repetitions=1)
e.add('g.gridloop, myfunc1', t0a)
t0b = timer(g.gridloop, (myfunc2,), repetitions=1)
e.add('g.gridloop, myfunc2', t0b)
t0c = timer(g.__call__, (myfunc1,), repetitions=1)
e.add('g.__call__, myfunc1', t0c)
t0d = timer(g.__call__, (expression1_f,), repetitions=1)
e.add('g.__call__, expression1_f', t0d)
t0e = timer(g.gridloop_itemset, (myfunc2,), repetitions=1)
e.add('g.gridloop_itemset, myfunc2', t0e)
t1a = timer(g.ext_gridloop1, (myfunc1,), repetitions=1)
e.add('g.ext_gridloop1, myfunc1', t1a)
t1b = timer(g.ext_gridloop1, (myfunc2,), repetitions=1)
e.add('g.ext_gridloop1, myfunc2', t1b)
t2a = timer(g.ext_gridloop2, (myfunc1,), repetitions=1)
e.add('g.ext_gridloop2, myfunc1', t2a)
t2b = timer(g.ext_gridloop2, (myfunc2,), repetitions=1)
e.add('g.ext_gridloop2, myfunc2', t2b)
t3a = timer(g.ext_gridloop2, (expression1_f,), repetitions=1)
e.add('g.ext_gridloop2, expression1_f', t3a)
t3b = timer(g.ext_gridloop2, (expression2_f,), repetitions=1)
e.add('g.ext_gridloop2, expression2_f', t3b)
nrep = 20
# try the improved functions (works only for the F77 module):
if 'gridloop_vec2' in dir(ext_gridloop):
t4 = timer(g.ext_gridloop_vec2, (myfuncf2,), repetitions=nrep)
e.add('g.ext_gridloop_vec2, myfuncf2', t4)
if 'gridloop2_str' in dir(ext_gridloop):
t5 = timer(g.ext_gridloop2_str, ('myfunc',), repetitions=nrep)
e.add('g.ext_gridloop2_str, myfunc', t5)
# try the version without allocation (first, make an a array):
a = g.ext_gridloop2(myfunc1) # a has now Fortran storage
t5b = timer(g.ext_gridloop_noalloc,
('myfunc', a), repetitions=nrep)
e.add('g.ext_gridloop_noalloc, myfunc', t5b)
# try 'inline' F77 compiled callback too:
# (give F77 source for core of callback function as argument)
g.ext_gridloop2_fcb_compile(str(expression1))
t6 = timer(g.ext_gridloop2_fcb, (), repetitions=nrep)
e.add('g.ext_gridloop2_fcb(%s)' % repr(str(expression1)), t6)
g.ext_gridloop2_fcb_ptr_compile(expression1)
t6b = timer(g.ext_gridloop2_fcb_ptr, (), repetitions=nrep)
e.add('g.ext_gridloop2_fcb_ptr(%s)' % repr(expression1), t6b)
g.ext_gridloop2_compile(str(expression1))
t7 = timer(g.ext_gridloop2_v2, (), repetitions=nrep)
e.add('g.ext_gridloop2_v2(%s)' % repr(str(expression1)), t7)
# weave version:
t8 = timer(g.ext_gridloop2_weave, (str(expression1),), repetitions=nrep)
e.add('g.ext_gridloop2_weave(%s)' % repr(str(expression1)), t8)
# psyco:
g.gridloop_psyco_init(g.gridloop)
if g.gridloop_psyco != g.gridloop: # has psyco
t9a = timer(g.gridloop_psyco, (myfunc2,), repetitions=1)
e.add('g.gridloop_psyco, myfunc2', t9a)
t9b = timer(g.gridloop_psyco, (expression2_f,), repetitions=1)
e.add('g.gridloop_psyco, expression2_f', t9b)
g.gridloop_psyco_init(g.gridloop_itemset)
if g.gridloop_psyco != g.gridloop_itemset: # has psyco
t9a = timer(g.gridloop_psyco, (myfunc2,), repetitions=1)
e.add('g.gridloop_psyco (itemset), myfunc2', t9a)
t9b = timer(g.gridloop_psyco, (expression2_f,), repetitions=1)
e.add('g.gridloop_psyco (itemset), expression2_f', t9b)
# instant:
g.ext_gridloop1_instant(str(expression1))
if g.gridloop1_instant is not None:
a = zeros((self.nx, self.ny))
t10 = timer(g.gridloop1_instant,
(a, self.nx, g.ny, g.xcoor, g.ycoor),
repetitions=nrep)
e.add('g.gridloop1_instant', t10)
print '\n\n\n\nrun from directory', os.getcwd()
print e
#print 'Experiments in table:', e.experiments
def exceptions1():
"""Test exceptions raised by the extension module."""
g = Grid2Deff(dx=0.5, dy=1)
def myfunc(x, y):
return sin(x*y) + 8*x
g.ext_gridloop_exceptions(myfunc)
def run():
# provide function to call (verify1, timing2, exceptions1, etc.)
# as command-line argument
try:
func = sys.argv[1]
except:
# basic test if no command-line argument
func = 'verify1'
if func == 'timing2':
# in case of timing, specify grid size as 2nd argument:
try:
n = int(sys.argv[2])
except:
n = 1100
# specify reference executing time as 3rd argument:
try:
best_time = float(sys.argv[3])
except:
best_time = 1.0
exec 'timing2(%d, %g)' % (n, best_time)
else:
exec func + '()'
if __name__ == '__main__':
# lots of experiments:
# Grid2Deff.py timing2 1100 0.13
# 1100 is grid size, 0.13 is reference time
run()
|
from setuptools import setup
setup(name='speakeasy',
version='1.0',
description='A Communications Platform for Paranoid People',
author='Zhehao Mao',
author_email='zhehao.mao@gmail.com',
url='http://speakeasy-zhehao.rhcloud.com/',
install_requires=['Flask', 'Flask-PyMongo', 'pycrypto'],
)
|
import capstone as _capstone
try:
import unicorn as _unicorn
except ImportError:
_unicorn = None
from .arch import Arch
class ArchMIPS32(Arch):
def __init__(self, endness="Iend_LE"):
super(ArchMIPS32, self).__init__(endness)
if endness == 'Iend_BE':
self.function_prologs = {
r"\x27\xbd\xff[\x00-\xff]" # addiu $sp, xxx
r"\x3c\x1c[\x00-\xff][\x00-\xff]\x9c\x27[\x00-\xff][\x00-\xff]" # lui $gp, xxx; addiu $gp, $gp, xxxx
}
self.function_epilogs = {
r"\x8f\xbf[\x00-\xff]{2}([\x00-\xff]{4}){0,4}\x03\xe0\x00\x08" # lw ra, off(sp); ... ; jr ra
}
self.qemu_name = 'mips'
self.triplet = 'mips-linux-gnu'
self.linux_name = 'mips'
bits = 32
vex_arch = "VexArchMIPS32"
name = "MIPS32"
ida_processor = 'mipsb'
qemu_name = 'mipsel'
linux_name = 'mipsel' # ???
triplet = 'mipsel-linux-gnu'
max_inst_bytes = 4
ip_offset = 128
sp_offset = 116
bp_offset = 120
ret_offset = 8
syscall_num_offset = 8
call_pushes_ret = False
stack_change = -4
sizeof = {'short': 16, 'int': 32, 'long': 32, 'long long': 64}
cs_arch = _capstone.CS_ARCH_MIPS
cs_mode = _capstone.CS_MODE_32 + _capstone.CS_MODE_LITTLE_ENDIAN
uc_arch = _unicorn.UC_ARCH_MIPS if _unicorn else None
uc_mode = (_unicorn.UC_MODE_32 + _unicorn.UC_MODE_LITTLE_ENDIAN) if _unicorn else None
uc_const = _unicorn.mips_const if _unicorn else None
uc_prefix = "UC_MIPS_" if _unicorn else None
function_prologs = {
r"[\x00-\xff]\xff\xbd\x27", # addiu $sp, xxx
r"[\x00-\xff][\x00-\xff]\x1c\x3c[\x00-\xff][\x00-\xff]\x9c\x27" # lui $gp, xxx; addiu $gp, $gp, xxxx
}
function_epilogs = {
r"[\x00-\xff]{2}\xbf\x8f([\x00-\xff]{4}){0,4}\x08\x00\xe0\x03" # lw ra, off(sp); ... ; jr ra
}
ret_instruction = "\x08\x00\xE0\x03" + "\x25\x08\x20\x00"
nop_instruction = "\x00\x00\x00\x00"
instruction_alignment = 4
persistent_regs = ['gp', 'ra', 't9']
default_register_values = [
( 'sp', Arch.initial_sp, True, 'global' ), # the stack
]
entry_register_values = {
'v0': 'ld_destructor',
'ra': 0
}
default_symbolic_registers = [ 'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12',
'r13', 'r14', 'r15', 'r16', 'r17', 'r18', 'r19', 'r20', 'r21', 'r22', 'r23', 'r24',
'r25', 'r26', 'r27', 'r28', 'sp', 'bp', 'lr', 'pc', 'hi', 'lo' ]
register_names = {
0: 'zero',
4: 'at',
8: 'v0',
12: 'v1',
16: 'a0',
20: 'a1',
24: 'a2',
28: 'a3',
32: 't0',
36: 't1',
40: 't2',
44: 't3',
48: 't4',
52: 't5',
56: 't6',
60: 't7',
64: 's0',
68: 's1',
72: 's2',
76: 's3',
80: 's4',
84: 's5',
88: 's6',
92: 's7',
96: 't8',
100: 't9',
104: 'k0',
108: 'k1',
112: 'gp',
116: 'sp',
120: 's8',
124: 'ra',
128: 'pc',
132: 'hi',
136: 'lo',
144: 'f0',
152: 'f1',
160: 'f2',
168: 'f3',
176: 'f4',
184: 'f5',
192: 'f6',
200: 'f7',
208: 'f8',
216: 'f9',
224: 'f10',
232: 'f11',
240: 'f12',
248: 'f13',
256: 'f14',
264: 'f15',
272: 'f16',
280: 'f17',
288: 'f18',
296: 'f19',
304: 'f20',
312: 'f21',
320: 'f22',
328: 'f23',
336: 'f24',
344: 'f25',
352: 'f26',
360: 'f27',
368: 'f28',
376: 'f29',
384: 'f30',
392: 'f31',
400: 'fir',
404: 'fccr',
408: 'fexr',
412: 'fenr',
416: 'fcsr',
420: 'ulr',
424: 'emnote',
428: 'cmstart',
432: 'cmlen',
436: 'nraddr',
440: 'evc_failaddr',
444: 'evc_counter',
448: 'cond',
452: 'dspcontrol',
456: 'ac0',
464: 'ac1',
472: 'ac2',
480: 'ac3',
488: 'ip_at_syscall'
}
registers = {
'r0': (0, 4), 'zero': (0, 4),
'r1': (4, 4), 'at': (4, 4),
'r2': (8, 4), 'v0': (8, 4),
'r3': (12, 4), 'v1': (12, 4),
'r4': (16, 4), 'a0': (16, 4),
'r5': (20, 4), 'a1': (20, 4),
'r6': (24, 4), 'a2': (24, 4),
'r7': (28, 4), 'a3': (28, 4),
'r8': (32, 4), 't0': (32, 4),
'r9': (36, 4), 't1': (36, 4),
'r10': (40, 4), 't2': (40, 4),
'r11': (44, 4), 't3': (44, 4),
'r12': (48, 4), 't4': (48, 4),
'r13': (52, 4), 't5': (52, 4),
'r14': (56, 4), 't6': (56, 4),
'r15': (60, 4), 't7': (60, 4),
'r16': (64, 4), 's0': (64, 4),
'r17': (68, 4), 's1': (68, 4),
'r18': (72, 4), 's2': (72, 4),
'r19': (76, 4), 's3': (76, 4),
'r20': (80, 4), 's4': (80, 4),
'r21': (84, 4), 's5': (84, 4),
'r22': (88, 4), 's6': (88, 4),
'r23': (92, 4), 's7': (92, 4),
'r24': (96, 4), 't8': (96, 4),
'r25': (100, 4), 't9': (100, 4),
'r26': (104, 4), 'k0': (104, 4),
'r27': (108, 4), 'k1': (108, 4),
'r28': (112, 4), 'gp': (112, 4),
'r29': (116, 4), 'sp': (116, 4),
'r30': (120, 4), 's8': (120, 4), 'bp': (120, 4), 'fp': (120, 4),
'r31': (124, 4), 'ra': (124, 4), 'lr': (124, 4),
'pc': (128, 4),
'ip': (128, 4),
'hi': (132, 4),
'lo': (136, 4),
# these registers are allocated 64 bits by VEX but they are only 32-bit
# it's a little sketchy tbh because some 32-bit mips arches DO in fact have a
# 64-bit FPU but I have no idea how to deal with those
'f0': (144, 4),
'f1': (152, 4),
'f2': (160, 4),
'f3': (168, 4),
'f4': (176, 4),
'f5': (184, 4),
'f6': (192, 4),
'f7': (200, 4),
'f8': (208, 4),
'f9': (216, 4),
'f10': (224, 4),
'f11': (232, 4),
'f12': (240, 4),
'f13': (248, 4),
'f14': (256, 4),
'f15': (264, 4),
'f16': (272, 4),
'f17': (280, 4),
'f18': (288, 4),
'f19': (296, 4),
'f20': (304, 4),
'f21': (312, 4),
'f22': (320, 4),
'f23': (328, 4),
'f24': (336, 4),
'f25': (344, 4),
'f26': (352, 4),
'f27': (360, 4),
'f28': (368, 4),
'f29': (376, 4),
'f30': (384, 4),
'f31': (392, 4),
'fir': (400, 4),
'fccr': (404, 4),
'fexr': (408, 4),
'fenr': (412, 4),
'fcsr': (416, 4),
'ulr': (420, 4),
'emnote': (424, 4),
'cmstart': (428, 4),
'cmlen': (432, 4),
'nraddr': (436, 4),
'evc_failaddr': (440, 4),
'evc_counter': (444, 4),
'cond': (448, 4),
'dspcontrol': (452, 4),
'ac0': (456, 8),
'ac1': (464, 8),
'ac2': (472, 8),
'ac3': (480, 8),
'ip_at_syscall': (488, 4)
}
argument_registers = {
registers['v0'][0],
registers['v1'][0],
registers['a0'][0],
registers['a2'][0],
registers['a3'][0],
registers['t0'][0],
registers['t1'][0],
registers['t2'][0],
registers['t3'][0],
registers['t4'][0],
registers['t5'][0],
registers['t6'][0],
registers['t7'][0],
registers['s0'][0],
registers['s1'][0],
registers['s2'][0],
registers['s3'][0],
registers['s4'][0],
registers['s5'][0],
registers['s6'][0],
registers['t8'][0],
registers['t9'][0]
}
dynamic_tag_translation = {
0x70000001: 'DT_MIPS_RLD_VERSION',
0x70000005: 'DT_MIPS_FLAGS',
0x70000006: 'DT_MIPS_BASE_ADDRESS',
0x7000000a: 'DT_MIPS_LOCAL_GOTNO',
0x70000011: 'DT_MIPS_SYMTABNO',
0x70000012: 'DT_MIPS_UNREFEXTNO',
0x70000013: 'DT_MIPS_GOTSYM',
0x70000016: 'DT_MIPS_RLD_MAP',
0x70000032: 'DT_MIPS_PLTGOT'
}
got_section_name = '.got'
ld_linux_name = 'ld.so.1'
|
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import inspect
import os
import wrapt
from tombstones.log import LogEntry
from tombstones.log import save_log_entry
def line_number_for_tombstone(lines, line_number):
for line in lines:
if line.strip() == "@tombstone":
break
line_number += 1
return line_number
@wrapt.decorator
def tombstone(wrapped, instance, args, kwargs):
"""Tombstone decorator to save log entry."""
save_log_entry(LogEntry(
name=wrapped.__name__,
source_file=os.path.abspath(
inspect.getsourcefile(wrapped)
),
line_number=line_number_for_tombstone(
*inspect.getsourcelines(wrapped)
),
datetime=str(datetime.datetime.now()),
))
return wrapped(*args, **kwargs)
|
from os import path
from setuptools import setup
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules']
def extract_version(filepath=None, name='__version__'):
"""Parse __version__ out of Python file, default parlor/__init__.py."""
if filepath is None:
filepath = path.join('parlor', '__init__.py')
context = {}
with open(filepath) as fd:
for line in fd:
if name in line:
exec(line, context)
break
else:
raise RuntimeError('{} not found in {}'.format(name, filepath))
return context[name]
README = 'README.rst'
with open(path.join(path.dirname(__file__), README)) as fd:
long_description = '\n' + fd.read()
setup(
name='parlor',
version=extract_version(),
url='https://github.com/rduplain/parlor',
license='BSD',
author='Ron DuPlain',
author_email='ron.duplain@gmail.com',
description='gathers dependencies & routes',
long_description=long_description,
packages=['parlor', 'parlor.app'],
install_requires=[
'Werkzeug>=0.9',
'jeni>=0.3.3',
],
extras_require={
'flask': ['Flask>=0.10'],
'sql': ['SQLAlchemy>=0.6'],
},
classifiers=CLASSIFIERS)
|
from fabric.api import task, env
from fabric.colors import green, red
from fabric_colors.environment import set_target_env
@task
@set_target_env
def distro():
"""
Usage: `fab -R all distro`. Determine the distro of given target host(s).
"""
if env.get('distro'):
print(green("Host distro is {0}.".format(env.distro)))
else:
print(red("Unable to determine host's distro."))
|
import numpy as np
import pickle
import bson
import util.associate as ass
import core.benchmark_comparison
class RPEBenchmarkComparison(core.benchmark_comparison.BenchmarkComparison):
"""
Comparison of two Relative Pose Error benchmark results.
Basically just the difference in translational error.
"""
def __init__(self, offset=0, max_difference=0.02, id_=None):
"""
Make a Comparison Benchmark for RPE,
parameters are for configuring the matches between the two compared benchmarks
:param offset: offset applied to the computed benchmark timestamps
:param max_difference: Maximum acceptable difference between timestamps
"""
super().__init__(id_=id_)
self._offset = offset
self._max_difference = max_difference
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, offset):
self._offset = offset
@property
def max_difference(self):
return self._max_difference
@max_difference.setter
def max_difference(self, max_difference):
if max_difference >= 0:
self._max_difference = max_difference
def get_settings(self):
return {
'offset': self.offset,
'max_difference': self.max_difference
}
def serialize(self):
output = super().serialize()
output['offset'] = self.offset
output['max_difference'] = self.max_difference
return output
@classmethod
def deserialize(cls, serialized_representation, db_client, **kwargs):
if 'offset' in serialized_representation:
kwargs['offset'] = serialized_representation['offset']
if 'max_difference' in serialized_representation:
kwargs['max_difference'] = serialized_representation['max_difference']
return super().deserialize(serialized_representation, db_client, **kwargs)
@classmethod
def get_benchmark_requirements(cls):
"""
Get the requirements for benchmark results that can be compared by this BenchmarkComparison.
Both benchmark results must be RelativePoseError results.
:return:
"""
return {'benchmark': 'RelativePoseError'}
def is_result_appropriate(self, benchmark_result):
"""
Can this particular benchmark result be used in the benchmark?
:param benchmark_result:
:return:
"""
return (hasattr(benchmark_result, 'identifier') and
hasattr(benchmark_result, 'translational_error') and
hasattr(benchmark_result, 'rotational_error'))
def compare_results(self, benchmark_result, reference_benchmark_result):
"""
Compare the first Relative Pose Error result with a reference benchmark result.
:param benchmark_result:
:param reference_benchmark_result:
:return:
"""
matches = ass.associate(benchmark_result.translational_error, reference_benchmark_result.translational_error,
offset=self.offset, max_difference=self.max_difference)
trans_error_diff = {}
rot_error_diff = {}
for result_stamp, ref_stamp in matches:
trans_error_diff[ref_stamp] = (reference_benchmark_result.translational_error[ref_stamp] -
benchmark_result.translational_error[result_stamp])
rot_error_diff[ref_stamp] = (reference_benchmark_result.rotational_error[ref_stamp] -
benchmark_result.rotational_error[result_stamp])
return RPEBenchmarkComparisonResult(benchmark_comparison_id=self.identifier,
benchmark_result=benchmark_result.identifier,
reference_benchmark_result=reference_benchmark_result.identifier,
difference_in_translational_error=trans_error_diff,
difference_in_rotational_error=rot_error_diff,
settings=self.get_settings())
class RPEBenchmarkComparisonResult(core.benchmark_comparison.BenchmarkComparisonResult):
"""
The result of comparing two Relative Pose Error measurements.
Is just the difference in each of the error metrics.
"""
def __init__(self, benchmark_comparison_id, benchmark_result, reference_benchmark_result,
difference_in_translational_error, difference_in_rotational_error, settings, id_=None, **kwargs):
kwargs['success'] = True
super().__init__(benchmark_comparison_id=benchmark_comparison_id,
benchmark_result=benchmark_result,
reference_benchmark_result=reference_benchmark_result,
id_=id_, **kwargs)
self._trans_error_diff = difference_in_translational_error
self._rot_error_diff = difference_in_rotational_error
self._settings = settings
@property
def translational_error_difference(self):
return self._trans_error_diff
@property
def trans_rmse(self):
trans_error = np.array(list(self.translational_error_difference.values()))
return np.sqrt(np.dot(trans_error, trans_error) / len(trans_error))
@property
def trans_mean(self):
trans_error = np.array(list(self.translational_error_difference.values()))
return np.mean(trans_error)
@property
def trans_median(self):
trans_error = np.array(list(self.translational_error_difference.values()))
return np.median(trans_error)
@property
def trans_std(self):
trans_error = np.array(list(self.translational_error_difference.values()))
return np.std(trans_error)
@property
def trans_min(self):
trans_error = np.array(list(self.translational_error_difference.values()))
return np.min(trans_error)
@property
def trans_max(self):
trans_error = np.array(list(self.translational_error_difference.values()))
return np.max(trans_error)
@property
def rotational_error_difference(self):
return self._rot_error_diff
@property
def rot_rmse(self):
rot_error = np.array(list(self.rotational_error_difference.values()))
return np.sqrt(np.dot(rot_error, rot_error) / len(rot_error))
@property
def rot_mean(self):
rot_error = np.array(list(self.rotational_error_difference.values()))
return np.mean(rot_error)
@property
def rot_median(self):
rot_error = np.array(list(self.rotational_error_difference.values()))
return np.median(rot_error)
@property
def rot_std(self):
rot_error = np.array(list(self.rotational_error_difference.values()))
return np.std(rot_error)
@property
def rot_min(self):
rot_error = np.array(list(self.rotational_error_difference.values()))
return np.min(rot_error)
@property
def rot_max(self):
rot_error = np.array(list(self.rotational_error_difference.values()))
return np.max(rot_error)
@property
def settings(self):
return self._settings
def serialize(self):
output = super().serialize()
output['trans_error_diff'] = bson.Binary(pickle.dumps(self.translational_error_difference,
protocol=pickle.HIGHEST_PROTOCOL))
output['rot_error_diff'] = bson.Binary(pickle.dumps(self.rotational_error_difference,
protocol=pickle.HIGHEST_PROTOCOL))
output['settings'] = self.settings
return output
@classmethod
def deserialize(cls, serialized_representation, db_client, **kwargs):
if 'trans_error_diff' in serialized_representation:
kwargs['difference_in_translational_error'] = pickle.loads(serialized_representation['trans_error_diff'])
if 'rot_error_diff' in serialized_representation:
kwargs['difference_in_rotational_error'] = pickle.loads(serialized_representation['rot_error_diff'])
if 'settings' in serialized_representation:
kwargs['settings'] = serialized_representation['settings']
return super().deserialize(serialized_representation, db_client, **kwargs)
|
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from widgets import HoneypotWidget
class HoneypotField(forms.BooleanField):
def __init__(self, *args, **kwargs):
super(HoneypotField, self).__init__(
widget = HoneypotWidget,
required = False,
error_messages = { 'checked': _("Please don't check this box.") },
*args, **kwargs)
def clean(self, value):
val = super(HoneypotField, self).clean(value)
if val:
raise ValidationError(self.error_messages['checked'])
return val
|
def accelerator_ipaddresstype(type):
"""
Property: Accelerator.IpAddressType
"""
valid_types = ["IPV4"]
if type not in valid_types:
raise ValueError(
'IpAddressType must be one of: "%s"' % (", ".join(valid_types))
)
return type
def endpointgroup_healthcheckprotocol(protocol):
"""
Property: EndpointGroup.HealthCheckProtocol
"""
valid_protocols = ["HTTP", "HTTPS", "TCP"]
if protocol not in valid_protocols:
raise ValueError(
'HealthCheckProtocol must be one of: "%s"' % (", ".join(valid_protocols))
)
return protocol
def listener_clientaffinity(affinity):
"""
Property: Listener.ClientAffinity
"""
valid_affinities = ["NONE", "SOURCE_IP"]
if affinity not in valid_affinities:
raise ValueError(
'ClientAffinity must be one of: "%s"' % (", ".join(valid_affinities))
)
return affinity
def listener_protocol(protocol):
"""
Property: Listener.Protocol
"""
valid_protocols = ["TCP", "UDP"]
if protocol not in valid_protocols:
raise ValueError('Protocol must be one of: "%s"' % (", ".join(valid_protocols)))
return protocol
|
import numpy as np
import sys
from jug import TaskGenerator
from os.path import expanduser
HOME = expanduser("~")
if "storage" in HOME:
HOME = "/storage/home/geffroy"
sys.path.append(HOME + "/Code/PG/Source")
from phase_fluctuations import DWaveModel
from MCMC import MCMCDriver
N_RUNS = 32
TARGET_SNAPSHOTS = 32
TEMPERATURES = [275.0, 320.0, 360.0, 395.0,
430.0, 470.0, 490.0, 550.0, 575.0, 625.0]
THERMALIZATION_STEPS = 150
ROOT_PHASE_SEED = 123456789
ROOT_MC_SEED = 234567
OBSERVABLE_NAME = "correlation_length"
OBSERVABLE_NAME2 = "DOS"
MC_SEEDS = [ROOT_MC_SEED + i for i in np.arange(N_RUNS)]
PHASE_SEEDS = [ROOT_PHASE_SEED + i for i in np.arange(N_RUNS)]
COMPUTATION_PARAMS = []
for i in range(N_RUNS):
for temp in TEMPERATURES:
COMPUTATION_PARAMS.append({"mc_seed": MC_SEEDS[i],
"phase_seed": PHASE_SEEDS[i],
"temperature": temp})
@TaskGenerator
def get_result(in_phase_seed, in_mc_seed, in_temperature):
"""generic task generator constructor"""
t_cst = 0.25
bcs_params = {"width": 32, "chem_potential": 0.0,
"hopping_constant": t_cst, "J_constant": 0.1 * t_cst / 0.89,
"g_constant": 0.25, "delta": 1.0 * t_cst, "use_assaad": True,
"uniform_phase": False, "temperature": in_temperature,
"seed": in_phase_seed}
my_model = DWaveModel(bcs_params)
mc_params = {"seed": in_mc_seed, "intervals": 5,
"target_snapshots": TARGET_SNAPSHOTS,
"observable_list": [OBSERVABLE_NAME, OBSERVABLE_NAME2],
"algorithm": "cluster"}
my_driver = MCMCDriver(my_model, mc_params)
my_driver.thermalize(THERMALIZATION_STEPS)
my_driver.execute()
return my_driver.result
@TaskGenerator
def join(partials):
"""Put all the results together"""
return np.array([my_elem for my_elem in partials])
fullresults = join([get_result(elem["phase_seed"],
elem["mc_seed"],
elem["temperature"])
for elem in COMPUTATION_PARAMS])
|
import os
from .ProactiveScript import *
class ProactiveForkEnv(ProactiveScript):
"""
Represents a generic proactive fork env script
script_language (ProactiveScriptLanguage)
implementation (string)
java_home (string)
"""
def __init__(self, script_language):
super(ProactiveForkEnv, self).__init__(script_language)
self.java_home = '/usr'
def setJavaHome(self, java_home):
self.java_home = java_home
def getJavaHome(self):
return self.java_home
|
from copy import deepcopy
import numpy as np
class DBN(object):
def __init__(self, number_visible_units=1, number_hidden_units=1, layers=1, network=None):
"""Creates an architecture for a deep belief network.
Keyword arguments:
number_visible_units -- An integer denoting the number of visible units.
number_hidden_units -- A list containing the number of hidden units at each level of the network.
layers -- The number of layers in the network (default 1, resulting in a single restricted Boltzmann machine).
"""
self.neuron_values = list()
self.biases = list()
self.connection_weights = list()
if network == None:
self.number_visible_units = number_visible_units
self.number_hidden_units = list(number_hidden_units)
self.layers = layers
self.neuron_values.append(np.zeros(number_visible_units))
self.neuron_values.append(np.zeros(number_hidden_units[0]))
self.biases.append(np.random.rand(number_visible_units))
self.biases.append(np.random.rand(number_hidden_units[0]))
#we initialise the connection weights randomly and scale them to the range (0,0.05)
self.connection_weights.append(np.random.rand(number_visible_units,number_hidden_units[0]) * 0.05)
for i in xrange(1,layers):
self.neuron_values.append(np.zeros(number_hidden_units[i]))
self.biases.append(np.random.rand(number_hidden_units[i]))
weights = np.random.rand(number_hidden_units[i-1],number_hidden_units[i]) * 0.05
self.connection_weights.append(weights)
else:
self.number_visible_units = network.number_visible_units
self.number_hidden_units = list(network.number_hidden_units)
self.layers = network.layers
self.connection_weights.append(np.array(network.connection_weights[0]))
self.neuron_values.append(np.zeros(network.number_visible_units))
self.neuron_values.append(np.zeros(network.number_hidden_units[0]))
self.biases.append(np.array(network.biases[0]))
self.biases.append(np.array(network.biases[1]))
for i in xrange(1,self.layers):
self.neuron_values.append(np.zeros(network.number_hidden_units[i]))
self.biases.append(np.array(network.biases[i]))
self.connection_weights.append(np.array(network.connection_weights[i]))
def __deepcopy__(self, memo):
return DBN(network=self)
def train(self, data, epochs=100, learning_rate=0.1):
"""Trains the belief network with a given set of training vectors.
Keyword arguments:
data -- A 'numpy.array' containing data for training the RBM. Each row of the array should be a training vector of dimension 'number_visible_units'.
epochs -- The number of iterations of the learning algorithm (default 100).
learning_rate -- The algorithm's learning rate (default).
"""
number_training_vectors = data.shape[0]
for current_layer in xrange(self.layers):
for _ in xrange(epochs):
for vector in xrange(number_training_vectors):
np.copyto(self.neuron_values[0],data[vector])
#we assign values to all neurons up to the current layer;
#if we are training the first layer, no hidden units will be assigned;
#if we are training an upper layer, then hidden neurons below it will be assigned values
for layer in xrange(current_layer):
for neuron in xrange(self.number_hidden_units[layer]):
prob = self._logistic(np.sum(self.connection_weights[layer][:,neuron] * self.neuron_values[layer]) + self.biases[layer+1][neuron])
threshold = np.random.rand()
if prob > threshold:
self.neuron_values[layer+1][neuron] = 1.
else:
self.neuron_values[layer+1][neuron] = 0.
#we sample from the current layer of the network
layer_sample = self._sample_layer(current_layer, 1)
#we update the connection weights between the visible and hidden units of the current layer
for i in xrange(self.connection_weights[current_layer].shape[0]):
#we update the bias values of the visible units in the current visible layer
visible_bias_delta = learning_rate * (self.neuron_values[current_layer][i] - layer_sample[i])
self.biases[current_layer][i] = self.biases[current_layer][i] + visible_bias_delta
for j in xrange(self.connection_weights[current_layer].shape[1]):
data_expectation = self._logistic(np.sum(self.connection_weights[current_layer][:,j] * self.neuron_values[current_layer]) + self.biases[current_layer+1][j])
sample_expectation = self._logistic(np.sum(self.connection_weights[current_layer][:,j] * layer_sample) + self.biases[current_layer+1][j])
#we update the connection weight between the i-th visible unit and the j-th hidden unit
weight_change_delta = learning_rate * (data_expectation * self.neuron_values[current_layer][i] - sample_expectation * layer_sample[i])
self.connection_weights[current_layer][i,j] = self.connection_weights[current_layer][i,j] + weight_change_delta
#we update the bias values of the hidden units in the current visible layer
hidden_bias_delta = learning_rate * (data_expectation - sample_expectation)
self.biases[current_layer+1][j] = self.biases[current_layer+1][j] + hidden_bias_delta
def sample_network(self, vector=None):
"""Samples a vector from the network.
Keyword arguments:
vector -- An input vector for the network's visible layer.
Returns:
visible_units -- A 'numpy.array' containing the sampled values.
"""
if vector==None:
for i in xrange(self.number_visible_units):
self.neuron_values[0][i] = np.random.rand()
else:
self.neuron_values[0] = np.array(vector)
for layer in xrange(self.layers):
number_hidden_units = len(self.neuron_values[layer+1])
for neuron in xrange(number_hidden_units):
prob = self._logistic(np.sum(self.connection_weights[layer][:,neuron] * self.neuron_values[layer]) + self.biases[layer+1][neuron])
self.neuron_values[layer+1][neuron] = prob
for layer in xrange(self.layers-1,-1,-1):
number_visible_units = len(self.neuron_values[layer])
for neuron in xrange(number_visible_units):
prob = self._logistic(np.sum(self.connection_weights[layer][neuron,:] * self.neuron_values[layer+1]) + self.biases[layer][neuron])
self.neuron_values[layer][neuron] = prob
return np.array(self.neuron_values[0])
def _sample_layer(self, layer, k):
"""Samples a visible vector at the layer-th layer of the network.
Uses Contrastive Divergence for sampling the values.
Keyword arguments:
layer -- The layer at which we want to sample.
k -- The number of samples created by Contrastive Divergence before a sample is accepted.
Returns:
visible_units -- A 'numpy.array' containing the sampled values.
"""
visible_units = np.array(self.neuron_values[layer])
hidden_units = np.array(self.neuron_values[layer+1])
visible_biases = np.array(self.biases[layer])
hidden_biases = np.array(self.biases[layer+1])
number_visible_units = len(visible_units)
number_hidden_units = len(hidden_units)
for sample in xrange(k):
for neuron in xrange(number_hidden_units):
prob = self._logistic(np.sum(self.connection_weights[layer][:,neuron] * visible_units) + hidden_biases[neuron])
hidden_units[neuron] = prob
for neuron in xrange(number_visible_units):
prob = self._logistic(np.sum(self.connection_weights[layer][neuron,:] * hidden_units) + visible_biases[neuron])
visible_units[neuron] = prob
return visible_units
def _logistic(self, x):
if x < -30:
return 0.
elif x > 30:
return 1.
return 1. / (1. + np.exp(-x))
|
import Orange
from orangecontrib.recommendation.tests.coverage import TestRankingModels
from orangecontrib.recommendation import CLiMFLearner
from orangecontrib.recommendation.optimizers import *
import unittest
__dataset__ = 'binary_data.tab'
__dataset2__ = 'binary_data_dis.tab'
__optimizers__ = [SGD(), Momentum(momentum=0.9),
NesterovMomentum(momentum=0.9), AdaGrad(),
RMSProp(rho=0.9), AdaDelta(rho=0.95),
Adam(beta1=0.9, beta2=0.999)]
class TestCLiMF(unittest.TestCase, TestRankingModels):
def test_input_data_continuous(self, *args):
learner = CLiMFLearner(num_factors=2, num_iter=1, verbose=3,
callback=lambda x: None)
# Test SGD optimizers too
for opt in __optimizers__:
learner.optimizer = opt
print(learner.optimizer)
super().test_input_data_continuous(learner, filename=__dataset__)
def test_input_data_discrete(self, *args):
learner = CLiMFLearner(num_factors=2, num_iter=1)
super().test_input_data_discrete(learner, filename=__dataset2__)
@unittest.skip("Skipping test")
def test_CV(self, *args):
learner = CLiMFLearner(num_factors=2, num_iter=1)
super().test_CV(learner, filename=__dataset__)
def test_warnings(self, *args):
learner = CLiMFLearner(num_factors=2, num_iter=1, learning_rate=0.0)
super().test_warnings(learner, filename=__dataset__)
def test_divergence(self, *args):
learner = CLiMFLearner(num_factors=2, num_iter=1, learning_rate=1e20)
super().test_divergence(learner, filename=__dataset__)
def test_mrr(self, *args):
learner = CLiMFLearner(num_factors=2, num_iter=1, verbose=0)
super().test_mrr(learner, filename=__dataset__)
@unittest.skip("Skipping test")
def test_mrr2(self):
learner = CLiMFLearner(num_factors=10, num_iter=10, verbose=3)
super().test_mrr(learner, filename='epinions_train.tab',
testdata='epinions_test.tab')
def test_objective(self):
from orangecontrib.recommendation.ranking.climf import compute_loss
# Load data
data = Orange.data.Table(__dataset__)
steps = [1, 10, 30]
objectives = []
learner = CLiMFLearner(num_factors=10, random_state=42, verbose=0)
for step in steps:
learner.num_iter = step
recommender = learner(data)
# Set parameters
low_rank_matrices = (recommender.U, recommender.V)
params = learner.lmbda
objective = compute_loss(data, low_rank_matrices, params)
objectives.append(objective)
# Assert objective values decrease
test = list(
map(lambda t: t[0] <= t[1], zip(objectives, objectives[1:])))
self.assertTrue(all(test))
def test_outputs(self):
# Load data
data = Orange.data.Table(__dataset__)
# Train recommender
learner = CLiMFLearner(num_factors=2, num_iter=1)
# Train recommender
recommender = learner(data)
# Check tables P, Q
U = recommender.getUTable()
V = recommender.getVTable()
diff = len({U.X.shape[1], V.X.shape[1]})
self.assertEqual(diff, 1)
if __name__ == "__main__":
# # Test all
# unittest.main()
# Test single test
suite = unittest.TestSuite()
suite.addTest(TestCLiMF("test_input_data_continuous"))
runner = unittest.TextTestRunner()
runner.run(suite)
|
import hoggorm as ho
import hoggormplot as hop
import pandas as pd
import numpy as np
X_df = pd.read_csv('cheese_fluorescence.txt', index_col=0, sep='\t')
X_df
Y_df = pd.read_csv('cheese_sensory.txt', index_col=0, sep='\t')
Y_df
X = X_df.values
Y = Y_df.values
X_varNames = list(X_df.columns)
Y_varNames = list(Y_df.columns)
X_objNames = list(X_df.index)
Y_objNames = list(Y_df.index)
X_cent = ho.center(X_df.values, axis=0)
Y_cent = ho.center(Y_df.values, axis=0)
X_cent
Y_cent
rv_results_cent = ho.RVcoeff([X_cent, Y_cent])
rv_results_cent
rv2_results_cent = ho.RV2coeff([X_cent, Y_cent])
rv2_results_cent
X_stand = ho.standardise(X_df.values, mode=0)
Y_stand = ho.standardise(Y_df.values, mode=0)
rv_results_stand = ho.RVcoeff([X_stand, Y_stand])
rv_results_stand
rv2_results_stand = ho.RV2coeff([X_stand, Y_stand])
rv2_results_stand
|
"""
@file shuffle_lines.py
@brief shuffle lines in a file
@author ChenglongChen
"""
import sys
import csv
import random
import numpy as np
from string import atoi
def main():
# collect argvs
seed = atoi(sys.argv[1])
file_in = sys.argv[2]
file_out = sys.argv[3]
# read
with open(file_in) as in_:
lines = in_.readlines()
# shuffle
random.seed(seed)
random.shuffle(lines)
# write
with open(file_out, "w") as out_:
for line in lines:
out_.write(line)
if __name__ == "__main__":
main()
|
import config
import shutil
import jinja2
import markdown2 as markdown
import os.path
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
import re
import yaml
POST_HEADER_SEP_RE = re.compile('^---$', re.MULTILINE)
DATE_FORMAT = '%Y-%m-%d %H:%M'
SOURCECODE_RE = re.compile(
r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
def pygments_preprocess(lines):
formatter = HtmlFormatter(noclasses=False)
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, formatter)
code = code.replace('\n\n', '\n \n').strip().replace('\n', '<br />')
return '\n\n<div class="code">%s</div>\n\n' % code
return SOURCECODE_RE.sub(repl, lines)
class Blog(object):
def __init__(self, title, posts):
self.title = title
self.posts = posts
def __str__(self):
return config.TITLE
@property
def path(self):
return '%s%sindex.html' % (
config.OUT_PATH,
config.BLOG_URL, )
@property
def url(self):
return '/%s' % (config.BLOG_URL, )
class Post(object):
def __init__(self, pub_date, title, slug, content):
self.pub_date = pub_date
self.title = title
self.slug = slug
self.content = content
def __cmp__(self, other):
return cmp(self.pub_date, other.pub_date)
def __str__(self):
return self.title
@property
def path(self):
return '%s%s%s/index.html' % (
config.OUT_PATH,
config.BLOG_URL,
config.POST_URL.format(post=self), )
@property
def url(self):
return '/%s%s/' % (
config.BLOG_URL,
config.POST_URL.format(post=self), )
class Page(object):
def __init__(self, path, content, meta_data=None):
self._path = path
self.content = content
self.meta_data = meta_data
@property
def path(self):
return '%s%s/index.html' % (
config.OUT_PATH,
self._path, )
@property
def url(self):
return '/%s/' % (self._path, )
def post_from_filename(filename):
with open(filename) as post_file:
post_data = post_file.read()
headers, content = re.split(POST_HEADER_SEP_RE, post_data, 1)
headers = yaml.load(headers)
content = markdown.markdown(pygments_preprocess(content)).strip()
pub_date = headers['date']
title = headers['title']
slug, __ = os.path.splitext(os.path.basename(filename))
match = re.match('\d{4}-\d{2}-\d{2}-(.+)', slug)
if match:
slug = match.group(1)
return Post(pub_date, title, slug, content)
def blog_from_path(title, path):
posts = []
posts_path = os.path.join(path, 'posts/')
for filename in os.listdir(posts_path):
posts.append(post_from_filename(os.path.join(posts_path, filename)))
return Blog(title, list(reversed(sorted(posts))))
def page_from_filename(filename, base_path):
with open(filename) as page_file:
page_data = page_file.read()
header, content = re.split(POST_HEADER_SEP_RE, page_data, 1)
meta_data = yaml.load(header)
content = markdown.markdown(pygments_preprocess(content)).strip()
slug, __ = os.path.splitext(os.path.relpath(filename, base_path))
match = re.match('\d{4}-\d{2}-\d{2}-(.+)', slug)
if match:
slug = match.group(1)
return Page(slug, content, meta_data=meta_data)
def pages_from_path(path):
pages = []
for dirname, folders, filenames in os.walk(path):
for filename in filenames:
page_path = os.path.join(dirname, filename)
pages.append(page_from_filename(page_path, path))
return pages
def build():
blog = blog_from_path(config.TITLE, config.IN_PATH)
environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(config.IN_PATH, 'templates/')))
# Copy static files
shutil.copytree(
os.path.join(config.IN_PATH, 'static/'),
config.OUT_PATH)
# Render static pages
pages = pages_from_path(os.path.join(config.IN_PATH, 'pages/'))
for page in pages:
page_template_name = page.meta_data.get('template', 'page.html')
page_template = environment.get_template(page_template_name)
if not os.path.isdir(os.path.dirname(page.path)):
os.makedirs(os.path.dirname(page.path))
with open(page.path, 'w') as out_file:
out_file.write(page_template.render(page=page))
# Render the base blog page
blog_template = environment.get_template('index.html')
if not os.path.isdir(os.path.dirname(blog.path)):
os.makedirs(os.path.dirname(blog.path))
with open(blog.path, 'w') as out_file:
out_file.write(blog_template.render(blog=blog))
# Render post pages
post_template = environment.get_template('post.html')
for post in blog.posts:
if not os.path.isdir(os.path.dirname(post.path)):
os.makedirs(os.path.dirname(post.path))
with open(post.path, 'w') as out_file:
out_file.write(post_template.render(blog=blog, post=post))
def clean():
try:
shutil.rmtree(config.OUT_PATH)
except OSError:
print '%s could not be deleted.' % config.OUT_PATH
def serve():
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
MIMETYPES = {
'.css': 'text/css',
'.html': 'text/html',
'.js': 'application/javascript',
}
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
path = self.path[1:]
path = os.path.join(config.OUT_PATH, path)
if self.path[-1] == '/':
path = os.path.join(path, 'index.html')
f = open(path)
self.send_response(200)
__, ext = os.path.splitext(self.path)
mimetype = MIMETYPES.get(ext, 'text/html')
self.send_header('Content-type', mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
PORT = 8000
server = HTTPServer(('', PORT), Handler)
print "Serving at port", PORT
server.serve_forever()
|
import peachpy.x86_64.uarch as uarch
import peachpy.x86_64.isa as isa
from peachpy.x86_64.function import Function
from peachpy.function import Argument
from peachpy.c.types import ptr
import peachpy.c.types as ctypes
from common.YepStatus import YepStatus
from kernels.sum_reduction import sum_Haswell, sum_squared_Haswell
arg_x = Argument(ptr(ctypes.const_Yep32f), name="xPointer")
arg_z = Argument(ptr(ctypes.Yep32f), name="zPointer")
arg_n = Argument(ctypes.YepSize, name="length")
with Function("yepCore_Sum_V32f_S32f",
(arg_x, arg_z, arg_n),
YepStatus, target=uarch.haswell + isa.avx2) as yepCore_Sum_V32f_S32f:
sum_Haswell(arg_x, arg_z, arg_n)
with Function("yepCore_SumSquares_V32f_S32f",
(arg_x, arg_z, arg_n),
YepStatus, target=uarch.haswell + isa.avx2) as yepCore_SumSquares_V32f_S32f:
sum_squared_Haswell(arg_x, arg_z, arg_n)
arg_x = Argument(ptr(ctypes.const_Yep64f), name="xPointer")
arg_z = Argument(ptr(ctypes.Yep64f), name="zPointer")
arg_n = Argument(ctypes.YepSize, name="length")
with Function("yepCore_Sum_V64f_S64f",
(arg_x, arg_z, arg_n),
YepStatus, target=uarch.haswell + isa.avx2) as yepCore_Sum_V64f_S64f:
sum_Haswell(arg_x, arg_z, arg_n)
with Function("yepCore_SumSquares_V64f_S64f",
(arg_x, arg_z, arg_n),
YepStatus, target=uarch.haswell + isa.avx2) as yepCore_SumSquares_V64f_S64f:
sum_squared_Haswell(arg_x, arg_z, arg_n)
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'EventTag.group_id'
db.add_column(
'sentry_eventtag',
'group_id',
self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')(null=True),
keep_default=False
)
def backwards(self, orm):
# Deleting field 'EventTag.group_id'
db.delete_column('sentry_eventtag', 'group_id')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 5, 18, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together': "(('event_id', 'key_id', 'value_id'),)",
'object_name': 'EventTag',
'index_together': "(('project_id', 'key_id', 'value_id'),)"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
"""
NWBIO
=====
IO class for reading data from a Neurodata Without Borders (NWB) dataset
Documentation : https://www.nwb.org/
Depends on: h5py, nwb, dateutil
Supported: Read, Write
Python API - https://pynwb.readthedocs.io
Sample datasets from CRCNS - https://crcns.org/NWB
Sample datasets from Allen Institute
- http://alleninstitute.github.io/AllenSDK/cell_types.html#neurodata-without-borders
"""
from __future__ import absolute_import, division
import json
import logging
import os
from collections import defaultdict
from itertools import chain
from json.decoder import JSONDecodeError
import numpy as np
import quantities as pq
from neo.core import (Segment, SpikeTrain, Epoch, Event, AnalogSignal,
IrregularlySampledSignal, Block, ImageSequence)
from neo.io.baseio import BaseIO
from neo.io.proxyobjects import (
AnalogSignalProxy as BaseAnalogSignalProxy,
EventProxy as BaseEventProxy,
EpochProxy as BaseEpochProxy,
SpikeTrainProxy as BaseSpikeTrainProxy
)
try:
import pynwb
from pynwb import NWBFile, TimeSeries
from pynwb.base import ProcessingModule
from pynwb.ecephys import ElectricalSeries, Device, EventDetection
from pynwb.behavior import SpatialSeries
from pynwb.misc import AnnotationSeries
from pynwb import image
from pynwb.image import ImageSeries
from pynwb.spec import NWBAttributeSpec, NWBDatasetSpec, NWBGroupSpec, NWBNamespace, \
NWBNamespaceBuilder
from pynwb.device import Device
# For calcium imaging data
from pynwb.ophys import TwoPhotonSeries, OpticalChannel, ImageSegmentation, Fluorescence
have_pynwb = True
except ImportError:
have_pynwb = False
try:
from hdmf.spec import (LinkSpec, GroupSpec, DatasetSpec, SpecNamespace,
NamespaceBuilder, AttributeSpec, DtypeSpec, RefSpec)
have_hdmf = True
except ImportError:
have_hdmf = False
except SyntaxError:
have_hdmf = False
logger = logging.getLogger("Neo")
GLOBAL_ANNOTATIONS = (
"session_start_time", "identifier", "timestamps_reference_time", "experimenter",
"experiment_description", "session_id", "institution", "keywords", "notes",
"pharmacology", "protocol", "related_publications", "slices", "source_script",
"source_script_file_name", "data_collection", "surgery", "virus", "stimulus_notes",
"lab", "session_description"
)
POSSIBLE_JSON_FIELDS = (
"source_script", "description"
)
prefix_map = {
1e9: 'giga',
1e6: 'mega',
1e3: 'kilo',
1: '',
1e-3: 'milli',
1e-6: 'micro',
1e-9: 'nano',
1e-12: 'pico'
}
def try_json_field(content):
"""
Try to interpret a string as JSON data.
If successful, return the JSON data (dict or list)
If unsuccessful, return the original string
"""
try:
return json.loads(content)
except JSONDecodeError:
return content
def get_class(module, name):
"""
Given a module path and a class name, return the class object
"""
module_path = module.split(".")
assert len(module_path) == 2 # todo: handle the general case where this isn't 2
return getattr(getattr(pynwb, module_path[1]), name)
def statistics(block): # todo: move this to be a property of Block
"""
Return simple statistics about a Neo Block.
"""
stats = {
"SpikeTrain": {"count": 0},
"AnalogSignal": {"count": 0},
"IrregularlySampledSignal": {"count": 0},
"Epoch": {"count": 0},
"Event": {"count": 0},
}
for segment in block.segments:
stats["SpikeTrain"]["count"] += len(segment.spiketrains)
stats["AnalogSignal"]["count"] += len(segment.analogsignals)
stats["IrregularlySampledSignal"]["count"] += len(segment.irregularlysampledsignals)
stats["Epoch"]["count"] += len(segment.epochs)
stats["Event"]["count"] += len(segment.events)
return stats
def get_units_conversion(signal, timeseries_class):
"""
Given a quantity array and a TimeSeries subclass, return
the conversion factor and the expected units
"""
# it would be nice if the expected units was an attribute of the PyNWB class
if "CurrentClamp" in timeseries_class.__name__:
expected_units = pq.volt
elif "VoltageClamp" in timeseries_class.__name__:
expected_units = pq.ampere
else:
# todo: warn that we don't handle this subclass yet
expected_units = signal.units
return float((signal.units / expected_units).simplified.magnitude), expected_units
def time_in_seconds(t):
return float(t.rescale("second"))
def _decompose_unit(unit):
"""
Given a quantities unit object, return a base unit name and a conversion factor.
Example:
>>> _decompose_unit(pq.mV)
('volt', 0.001)
"""
assert isinstance(unit, pq.quantity.Quantity)
assert unit.magnitude == 1
conversion = 1.0
def _decompose(unit):
dim = unit.dimensionality
if len(dim) != 1:
raise NotImplementedError("Compound units not yet supported") # e.g. volt-metre
uq, n = list(dim.items())[0]
if n != 1:
raise NotImplementedError("Compound units not yet supported") # e.g. volt^2
uq_def = uq.definition
return float(uq_def.magnitude), uq_def
conv, unit2 = _decompose(unit)
while conv != 1:
conversion *= conv
unit = unit2
conv, unit2 = _decompose(unit)
return list(unit.dimensionality.keys())[0].name, conversion
def _recompose_unit(base_unit_name, conversion):
"""
Given a base unit name and a conversion factor, return a quantities unit object
Example:
>>> _recompose_unit("ampere", 1e-9)
UnitCurrent('nanoampere', 0.001 * uA, 'nA')
"""
unit_name = None
for cf in prefix_map:
# conversion may have a different float precision to the keys in
# prefix_map, so we can't just use `prefix_map[conversion]`
if abs(conversion - cf) / cf < 1e-6:
unit_name = prefix_map[cf] + base_unit_name
if unit_name is None:
raise ValueError(f"Can't handle this conversion factor: {conversion}")
if unit_name[-1] == "s": # strip trailing 's', e.g. "volts" --> "volt"
unit_name = unit_name[:-1]
try:
return getattr(pq, unit_name)
except AttributeError:
logger.warning(f"Can't handle unit '{unit_name}'. Returning dimensionless")
return pq.dimensionless
class NWBIO(BaseIO):
"""
Class for "reading" experimental data from a .nwb file, and "writing" a .nwb file from Neo
"""
supported_objects = [Block, Segment, AnalogSignal, IrregularlySampledSignal,
SpikeTrain, Epoch, Event, ImageSequence]
readable_objects = supported_objects
writeable_objects = supported_objects
has_header = False
support_lazy = True
name = 'NeoNWB IO'
description = 'This IO reads/writes experimental data from/to an .nwb dataset'
extensions = ['nwb']
mode = 'one-file'
is_readable = True
is_writable = True
is_streameable = False
def __init__(self, filename, mode='r'):
"""
Arguments:
filename : the filename
"""
if not have_pynwb:
raise Exception("Please install the pynwb package to use NWBIO")
if not have_hdmf:
raise Exception("Please install the hdmf package to use NWBIO")
BaseIO.__init__(self, filename=filename)
self.filename = filename
self.blocks_written = 0
self.nwb_file_mode = mode
def read_all_blocks(self, lazy=False, **kwargs):
"""
Load all blocks in the file.
"""
assert self.nwb_file_mode in ('r',)
io = pynwb.NWBHDF5IO(self.filename, mode=self.nwb_file_mode,
load_namespaces=True) # Open a file with NWBHDF5IO
self._file = io.read()
self.global_block_metadata = {}
for annotation_name in GLOBAL_ANNOTATIONS:
value = getattr(self._file, annotation_name, None)
if value is not None:
if annotation_name in POSSIBLE_JSON_FIELDS:
value = try_json_field(value)
self.global_block_metadata[annotation_name] = value
if "session_description" in self.global_block_metadata:
self.global_block_metadata["description"] = self.global_block_metadata[
"session_description"]
self.global_block_metadata["file_origin"] = self.filename
if "session_start_time" in self.global_block_metadata:
self.global_block_metadata["rec_datetime"] = self.global_block_metadata[
"session_start_time"]
if "file_create_date" in self.global_block_metadata:
self.global_block_metadata["file_datetime"] = self.global_block_metadata[
"file_create_date"]
self._blocks = {}
self._read_acquisition_group(lazy=lazy)
self._read_stimulus_group(lazy)
self._read_units(lazy=lazy)
self._read_epochs_group(lazy)
return list(self._blocks.values())
def read_block(self, lazy=False, block_index=0, **kargs):
"""
Load the first block in the file.
"""
return self.read_all_blocks(lazy=lazy)[block_index]
def _get_segment(self, block_name, segment_name):
# If we've already created a Block with the given name return it,
# otherwise create it now and store it in self._blocks.
# If we've already created a Segment in the given block, return it,
# otherwise create it now and return it.
if block_name in self._blocks:
block = self._blocks[block_name]
else:
block = Block(name=block_name, **self.global_block_metadata)
self._blocks[block_name] = block
segment = None
for seg in block.segments:
if segment_name == seg.name:
segment = seg
break
if segment is None:
segment = Segment(name=segment_name)
segment.block = block
block.segments.append(segment)
return segment
def _read_epochs_group(self, lazy):
if self._file.epochs is not None:
try:
# NWB files created by Neo store the segment, block and epoch names as extra
# columns
segment_names = self._file.epochs.segment[:]
block_names = self._file.epochs.block[:]
epoch_names = self._file.epochs._name[:]
except AttributeError:
epoch_names = None
if epoch_names is not None:
unique_epoch_names = np.unique(epoch_names)
for epoch_name in unique_epoch_names:
index, = np.where((epoch_names == epoch_name))
epoch = EpochProxy(self._file.epochs, epoch_name, index)
if not lazy:
epoch = epoch.load()
segment_name = np.unique(segment_names[index])
block_name = np.unique(block_names[index])
assert segment_name.size == block_name.size == 1
segment = self._get_segment(block_name[0], segment_name[0])
segment.epochs.append(epoch)
epoch.segment = segment
else:
epoch = EpochProxy(self._file.epochs)
if not lazy:
epoch = epoch.load()
segment = self._get_segment("default", "default")
segment.epochs.append(epoch)
epoch.segment = segment
def _read_timeseries_group(self, group_name, lazy):
group = getattr(self._file, group_name)
for timeseries in group.values():
try:
# NWB files created by Neo store the segment and block names in the comments field
hierarchy = json.loads(timeseries.comments)
except JSONDecodeError:
# For NWB files created with other applications, we put everything in a single
# segment in a single block
# todo: investigate whether there is a reliable way to create multiple segments,
# e.g. using Trial information
block_name = "default"
segment_name = "default"
else:
block_name = hierarchy["block"]
segment_name = hierarchy["segment"]
segment = self._get_segment(block_name, segment_name)
if isinstance(timeseries, AnnotationSeries):
event = EventProxy(timeseries, group_name)
if not lazy:
event = event.load()
segment.events.append(event)
event.segment = segment
elif timeseries.rate: # AnalogSignal
signal = AnalogSignalProxy(timeseries, group_name)
if not lazy:
signal = signal.load()
segment.analogsignals.append(signal)
signal.segment = segment
else: # IrregularlySampledSignal
signal = AnalogSignalProxy(timeseries, group_name)
if not lazy:
signal = signal.load()
segment.irregularlysampledsignals.append(signal)
signal.segment = segment
def _read_units(self, lazy):
if self._file.units:
for id in range(len(self._file.units)):
try:
# NWB files created by Neo store the segment and block names as extra columns
segment_name = self._file.units.segment[id]
block_name = self._file.units.block[id]
except AttributeError:
# For NWB files created with other applications, we put everything in a single
# segment in a single block
segment_name = "default"
block_name = "default"
segment = self._get_segment(block_name, segment_name)
spiketrain = SpikeTrainProxy(self._file.units, id)
if not lazy:
spiketrain = spiketrain.load()
segment.spiketrains.append(spiketrain)
spiketrain.segment = segment
def _read_acquisition_group(self, lazy):
self._read_timeseries_group("acquisition", lazy)
def _read_stimulus_group(self, lazy):
self._read_timeseries_group("stimulus", lazy)
def write_all_blocks(self, blocks, **kwargs):
"""
Write list of blocks to the file
"""
# todo: allow metadata in NWBFile constructor to be taken from kwargs
annotations = defaultdict(set)
for annotation_name in GLOBAL_ANNOTATIONS:
if annotation_name in kwargs:
annotations[annotation_name] = kwargs[annotation_name]
else:
for block in blocks:
if annotation_name in block.annotations:
try:
annotations[annotation_name].add(block.annotations[annotation_name])
except TypeError:
if annotation_name in POSSIBLE_JSON_FIELDS:
encoded = json.dumps(block.annotations[annotation_name])
annotations[annotation_name].add(encoded)
else:
raise
if annotation_name in annotations:
if len(annotations[annotation_name]) > 1:
raise NotImplementedError(
"We don't yet support multiple values for {}".format(annotation_name))
# take single value from set
annotations[annotation_name], = annotations[annotation_name]
if "identifier" not in annotations:
annotations["identifier"] = self.filename
if "session_description" not in annotations:
annotations["session_description"] = blocks[0].description or self.filename
# todo: concatenate descriptions of multiple blocks if different
if "session_start_time" not in annotations:
raise Exception("Writing to NWB requires an annotation 'session_start_time'")
# todo: handle subject
# todo: store additional Neo annotations somewhere in NWB file
nwbfile = NWBFile(**annotations)
assert self.nwb_file_mode in ('w',) # possibly expand to 'a'ppend later
if self.nwb_file_mode == "w" and os.path.exists(self.filename):
os.remove(self.filename)
io_nwb = pynwb.NWBHDF5IO(self.filename, mode=self.nwb_file_mode)
if sum(statistics(block)["SpikeTrain"]["count"] for block in blocks) > 0:
nwbfile.add_unit_column('_name', 'the name attribute of the SpikeTrain')
# nwbfile.add_unit_column('_description',
# 'the description attribute of the SpikeTrain')
nwbfile.add_unit_column(
'segment', 'the name of the Neo Segment to which the SpikeTrain belongs')
nwbfile.add_unit_column(
'block', 'the name of the Neo Block to which the SpikeTrain belongs')
if sum(statistics(block)["Epoch"]["count"] for block in blocks) > 0:
nwbfile.add_epoch_column('_name', 'the name attribute of the Epoch')
# nwbfile.add_epoch_column('_description', 'the description attribute of the Epoch')
nwbfile.add_epoch_column(
'segment', 'the name of the Neo Segment to which the Epoch belongs')
nwbfile.add_epoch_column('block',
'the name of the Neo Block to which the Epoch belongs')
for i, block in enumerate(blocks):
self.write_block(nwbfile, block)
io_nwb.write(nwbfile)
io_nwb.close()
with pynwb.NWBHDF5IO(self.filename, "r") as io_validate:
errors = pynwb.validate(io_validate, namespace="core")
if errors:
raise Exception(f"Errors found when validating {self.filename}")
def write_block(self, nwbfile, block, **kwargs):
"""
Write a Block to the file
:param block: Block to be written
:param nwbfile: Representation of an NWB file
"""
electrodes = self._write_electrodes(nwbfile, block)
if not block.name:
block.name = "block%d" % self.blocks_written
for i, segment in enumerate(block.segments):
assert segment.block is block
if not segment.name:
segment.name = "%s : segment%d" % (block.name, i)
self._write_segment(nwbfile, segment, electrodes)
self.blocks_written += 1
def _write_electrodes(self, nwbfile, block):
# this handles only icephys_electrode for now
electrodes = {}
devices = {}
for segment in block.segments:
for signal in chain(segment.analogsignals, segment.irregularlysampledsignals):
if "nwb_electrode" in signal.annotations:
elec_meta = signal.annotations["nwb_electrode"].copy()
if elec_meta["name"] not in electrodes:
# todo: check for consistency if the name is already there
if elec_meta["device"]["name"] in devices:
device = devices[elec_meta["device"]["name"]]
else:
device = nwbfile.create_device(**elec_meta["device"])
devices[elec_meta["device"]["name"]] = device
elec_meta.pop("device")
electrodes[elec_meta["name"]] = nwbfile.create_icephys_electrode(
device=device, **elec_meta
)
return electrodes
def _write_segment(self, nwbfile, segment, electrodes):
# maybe use NWB trials to store Segment metadata?
for i, signal in enumerate(
chain(segment.analogsignals, segment.irregularlysampledsignals)):
assert signal.segment is segment
if not signal.name:
signal.name = "%s : analogsignal%d" % (segment.name, i)
self._write_signal(nwbfile, signal, electrodes)
for i, train in enumerate(segment.spiketrains):
assert train.segment is segment
if not train.name:
train.name = "%s : spiketrain%d" % (segment.name, i)
self._write_spiketrain(nwbfile, train)
for i, event in enumerate(segment.events):
assert event.segment is segment
if not event.name:
event.name = "%s : event%d" % (segment.name, i)
self._write_event(nwbfile, event)
for i, epoch in enumerate(segment.epochs):
if not epoch.name:
epoch.name = "%s : epoch%d" % (segment.name, i)
self._write_epoch(nwbfile, epoch)
def _write_signal(self, nwbfile, signal, electrodes):
hierarchy = {'block': signal.segment.block.name, 'segment': signal.segment.name}
if "nwb_neurodata_type" in signal.annotations:
timeseries_class = get_class(*signal.annotations["nwb_neurodata_type"])
else:
timeseries_class = TimeSeries # default
additional_metadata = {name[4:]: value
for name, value in signal.annotations.items()
if name.startswith("nwb:")}
if "nwb_electrode" in signal.annotations:
electrode_name = signal.annotations["nwb_electrode"]["name"]
additional_metadata["electrode"] = electrodes[electrode_name]
if timeseries_class != TimeSeries:
conversion, units = get_units_conversion(signal, timeseries_class)
additional_metadata["conversion"] = conversion
else:
units = signal.units
if isinstance(signal, AnalogSignal):
sampling_rate = signal.sampling_rate.rescale("Hz")
tS = timeseries_class(
name=signal.name,
starting_time=time_in_seconds(signal.t_start),
data=signal,
unit=units.dimensionality.string,
rate=float(sampling_rate),
comments=json.dumps(hierarchy),
**additional_metadata)
# todo: try to add array_annotations via "control" attribute
elif isinstance(signal, IrregularlySampledSignal):
tS = timeseries_class(
name=signal.name,
data=signal,
unit=units.dimensionality.string,
timestamps=signal.times.rescale('second').magnitude,
comments=json.dumps(hierarchy),
**additional_metadata)
else:
raise TypeError(
"signal has type {0}, should be AnalogSignal or IrregularlySampledSignal".format(
signal.__class__.__name__))
nwb_group = signal.annotations.get("nwb_group", "acquisition")
add_method_map = {
"acquisition": nwbfile.add_acquisition,
"stimulus": nwbfile.add_stimulus
}
if nwb_group in add_method_map:
add_time_series = add_method_map[nwb_group]
else:
raise NotImplementedError("NWB group '{}' not yet supported".format(nwb_group))
add_time_series(tS)
return tS
def _write_spiketrain(self, nwbfile, spiketrain):
nwbfile.add_unit(spike_times=spiketrain.rescale('s').magnitude,
obs_intervals=[[float(spiketrain.t_start.rescale('s')),
float(spiketrain.t_stop.rescale('s'))]],
_name=spiketrain.name,
# _description=spiketrain.description,
segment=spiketrain.segment.name,
block=spiketrain.segment.block.name)
# todo: handle annotations (using add_unit_column()?)
# todo: handle Neo Units
# todo: handle spike waveforms, if any (see SpikeEventSeries)
return nwbfile.units
def _write_event(self, nwbfile, event):
hierarchy = {'block': event.segment.block.name, 'segment': event.segment.name}
tS_evt = AnnotationSeries(
name=event.name,
data=event.labels,
timestamps=event.times.rescale('second').magnitude,
description=event.description or "",
comments=json.dumps(hierarchy))
nwbfile.add_acquisition(tS_evt)
return tS_evt
def _write_epoch(self, nwbfile, epoch):
for t_start, duration, label in zip(epoch.rescale('s').magnitude,
epoch.durations.rescale('s').magnitude,
epoch.labels):
nwbfile.add_epoch(t_start, t_start + duration, [label], [],
_name=epoch.name,
segment=epoch.segment.name,
block=epoch.segment.block.name)
return nwbfile.epochs
class AnalogSignalProxy(BaseAnalogSignalProxy):
common_metadata_fields = (
# fields that are the same for all TimeSeries subclasses
"comments", "description", "unit", "starting_time", "timestamps", "rate",
"data", "starting_time_unit", "timestamps_unit", "electrode"
)
def __init__(self, timeseries, nwb_group):
self._timeseries = timeseries
self.units = timeseries.unit
if timeseries.conversion:
self.units = _recompose_unit(timeseries.unit, timeseries.conversion)
if timeseries.starting_time is not None:
self.t_start = timeseries.starting_time * pq.s
else:
self.t_start = timeseries.timestamps[0] * pq.s
if timeseries.rate:
self.sampling_rate = timeseries.rate * pq.Hz
else:
self.sampling_rate = None
self.name = timeseries.name
self.annotations = {"nwb_group": nwb_group}
self.description = try_json_field(timeseries.description)
if isinstance(self.description, dict):
self.annotations["notes"] = self.description
if "name" in self.annotations:
self.annotations.pop("name")
self.description = None
self.shape = self._timeseries.data.shape
if len(self.shape) == 1:
self.shape = (self.shape[0], 1)
metadata_fields = list(timeseries.__nwbfields__)
for field_name in self.__class__.common_metadata_fields: # already handled
try:
metadata_fields.remove(field_name)
except ValueError:
pass
for field_name in metadata_fields:
value = getattr(timeseries, field_name)
if value is not None:
self.annotations[f"nwb:{field_name}"] = value
self.annotations["nwb_neurodata_type"] = (
timeseries.__class__.__module__,
timeseries.__class__.__name__
)
if hasattr(timeseries, "electrode"):
# todo: once the Group class is available, we could add electrode metadata
# to a Group containing all signals that share that electrode
# This would reduce the amount of redundancy (repeated metadata in every signal)
electrode_metadata = {"device": {}}
metadata_fields = list(timeseries.electrode.__class__.__nwbfields__) + ["name"]
metadata_fields.remove("device") # needs special handling
for field_name in metadata_fields:
value = getattr(timeseries.electrode, field_name)
if value is not None:
electrode_metadata[field_name] = value
for field_name in timeseries.electrode.device.__class__.__nwbfields__:
value = getattr(timeseries.electrode.device, field_name)
if value is not None:
electrode_metadata["device"][field_name] = value
self.annotations["nwb_electrode"] = electrode_metadata
def load(self, time_slice=None, strict_slicing=True):
"""
Load AnalogSignalProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
i_start, i_stop, sig_t_start = None, None, self.t_start
if time_slice:
if self.sampling_rate is None:
i_start, i_stop = np.searchsorted(self._timeseries.timestamps, time_slice)
else:
i_start, i_stop, sig_t_start = self._time_slice_indices(
time_slice, strict_slicing=strict_slicing)
signal = self._timeseries.data[i_start: i_stop]
if self.sampling_rate is None:
return IrregularlySampledSignal(
self._timeseries.timestamps[i_start:i_stop] * pq.s,
signal,
units=self.units,
t_start=sig_t_start,
sampling_rate=self.sampling_rate,
name=self.name,
description=self.description,
array_annotations=None,
**self.annotations) # todo: timeseries.control / control_description
else:
return AnalogSignal(
signal,
units=self.units,
t_start=sig_t_start,
sampling_rate=self.sampling_rate,
name=self.name,
description=self.description,
array_annotations=None,
**self.annotations) # todo: timeseries.control / control_description
class EventProxy(BaseEventProxy):
def __init__(self, timeseries, nwb_group):
self._timeseries = timeseries
self.name = timeseries.name
self.annotations = {"nwb_group": nwb_group}
self.description = try_json_field(timeseries.description)
if isinstance(self.description, dict):
self.annotations.update(self.description)
self.description = None
self.shape = self._timeseries.data.shape
def load(self, time_slice=None, strict_slicing=True):
"""
Load EventProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
if time_slice:
raise NotImplementedError("todo")
else:
times = self._timeseries.timestamps[:]
labels = self._timeseries.data[:]
return Event(times * pq.s,
labels=labels,
name=self.name,
description=self.description,
**self.annotations)
class EpochProxy(BaseEpochProxy):
def __init__(self, time_intervals, epoch_name=None, index=None):
"""
:param time_intervals: An epochs table,
which is a specific TimeIntervals table that stores info about long periods
:param epoch_name: (str)
Name of the epoch object
:param index: (np.array, slice)
Slice object or array of bool values masking time_intervals to be used. In case of
an array it has to have the same shape as `time_intervals`.
"""
self._time_intervals = time_intervals
if index is not None:
self._index = index
self.shape = (index.sum(),)
else:
self._index = slice(None)
self.shape = (len(time_intervals),)
self.name = epoch_name
def load(self, time_slice=None, strict_slicing=True):
"""
Load EpochProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is all of the intervals.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
if time_slice:
raise NotImplementedError("todo")
else:
start_times = self._time_intervals.start_time[self._index]
stop_times = self._time_intervals.stop_time[self._index]
durations = stop_times - start_times
labels = self._time_intervals.tags[self._index]
return Epoch(times=start_times * pq.s,
durations=durations * pq.s,
labels=labels,
name=self.name)
class SpikeTrainProxy(BaseSpikeTrainProxy):
def __init__(self, units_table, id):
"""
:param units_table: A Units table
(see https://pynwb.readthedocs.io/en/stable/pynwb.misc.html#pynwb.misc.Units)
:param id: the cell/unit ID (integer)
"""
self._units_table = units_table
self.id = id
self.units = pq.s
obs_intervals = units_table.get_unit_obs_intervals(id)
if len(obs_intervals) == 0:
t_start, t_stop = None, None
elif len(obs_intervals) == 1:
t_start, t_stop = obs_intervals[0]
else:
raise NotImplementedError("Can't yet handle multiple observation intervals")
self.t_start = t_start * pq.s
self.t_stop = t_stop * pq.s
self.annotations = {"nwb_group": "acquisition"}
try:
# NWB files created by Neo store the name as an extra column
self.name = units_table._name[id]
except AttributeError:
self.name = None
self.shape = None # no way to get this without reading the data
def load(self, time_slice=None, strict_slicing=True):
"""
Load SpikeTrainProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire spike train.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
interval = None
if time_slice:
interval = (float(t) for t in time_slice) # convert from quantities
spike_times = self._units_table.get_unit_spike_times(self.id, in_interval=interval)
return SpikeTrain(
spike_times * self.units,
self.t_stop,
units=self.units,
# sampling_rate=array(1.) * Hz,
t_start=self.t_start,
# waveforms=None,
# left_sweep=None,
name=self.name,
# file_origin=None,
# description=None,
# array_annotations=None,
**self.annotations)
|
import StringIO
from collections import OrderedDict
import csv
from django.http.response import HttpResponse
from tastypie.api import Api
from tastypie.resources import ModelResource
from tastypie.serializers import Serializer
from tastypie.utils.mime import build_content_type
from cvservices.models import Unit
from rdfserializer.api import ModelRdfResource
from models import ActionType, MethodType, OrganizationType, SamplingFeatureGeotype, SamplingFeatureType, SiteType, \
AggregationStatistic, AnnotationType, CensorCode, DatasetType, DirectiveType, ElevationDatum, EquipmentType, \
PropertyDataType, QualityCode, Medium, ResultType, SpatialOffsetType, UnitsType, Speciation, Status, \
TaxonomicClassifierType, VariableName, VariableType, SpecimenType, DataQualityType, RelationshipType
class CSVSerializer(Serializer):
formats = ['csv']
content_types = {
'csv': 'text/plain'
}
def to_csv(self, data, options=None, writer=None):
options = options or {}
data = self.to_simple(data, options)
excluded_fields = [u'resource_uri']
raw_data = StringIO.StringIO()
first = True
if "meta" in data.keys():
objects = data.get("objects")
for value in objects:
test = {}
for excluded_field in excluded_fields:
del value[excluded_field]
self.flatten(value, test)
odict = OrderedDict()
odict['Term'] = test['term']
del test['term']
odict['UnitsName'] = test['name']
del test['name']
odict['UnitsTypeCV'] = test['type']
del test['type']
odict['UnitsAbbreviation'] = test['abbreviation']
del test['abbreviation']
odict['UnitsLink'] = test['link']
del test['link']
if first:
writer = csv.DictWriter(raw_data, odict.keys())
writer.writeheader()
writer.writerow(odict)
first = False
else:
writer.writerow({k: (v.encode('utf-8') if isinstance(v, int) is not True and isinstance(v, type(
None)) is not True else v) for k, v in odict.items()})
else:
test = {}
for excluded_field in excluded_fields:
del data[excluded_field]
self.flatten(data, test)
odict = OrderedDict()
odict['Term'] = test['term']
del test['term']
odict['UnitsName'] = test['name']
del test['name']
odict['UnitsTypeCV'] = test['type']
del test['type']
odict['UnitsAbbreviation'] = test['abbreviation']
del test['abbreviation']
odict['UnitsLink'] = test['link']
del test['link']
if first:
writer = csv.DictWriter(raw_data, odict.keys())
writer.writeheader()
writer.writerow(odict)
first = False
else:
writer.writerow(odict)
CSVContent = raw_data.getvalue()
return CSVContent
def flatten(self, data, odict={}):
if isinstance(data, list):
for value in data:
self.flatten(value, odict)
elif isinstance(data, dict):
for (key, value) in data.items():
if not isinstance(value, (dict, list)):
odict[key] = value
else:
self.flatten(value, odict)
class UnitsResource(ModelResource):
class Meta:
queryset = Unit.objects.all()
serializer = CSVSerializer()
allowed_methods = ['get']
resource_name = 'units'
excludes = ['unit_id']
class UnitsTypeResource(ModelRdfResource):
scheme = 'unitsType'
class Meta(ModelRdfResource.Meta):
queryset = UnitsType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'unitstype'
class ActionTypeResource(ModelRdfResource):
scheme = 'actionType'
class Meta(ModelRdfResource.Meta):
queryset = ActionType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'actiontype'
class MethodTypeResource(ModelRdfResource):
scheme = 'methodType'
class Meta(ModelRdfResource.Meta):
queryset = MethodType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'methodtype'
class OrganizationTypeResource(ModelRdfResource):
scheme = 'organizationType'
class Meta(ModelRdfResource.Meta):
queryset = OrganizationType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'organizationtype'
class SamplingFeatureGeotypeResource(ModelRdfResource):
scheme = 'samplingFeatureGeotype'
class Meta(ModelRdfResource.Meta):
queryset = SamplingFeatureGeotype.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'samplingfeaturegeotype'
class SamplingFeatureTypeResource(ModelRdfResource):
scheme = 'samplingFeatureType'
class Meta(ModelRdfResource.Meta):
queryset = SamplingFeatureType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'samplingfeaturetype'
class SiteTypeResource(ModelRdfResource):
scheme = 'siteType'
class Meta(ModelRdfResource.Meta):
queryset = SiteType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'sitetype'
class AggregationStatisticResource(ModelRdfResource):
scheme = 'aggregationStatistic'
class Meta(ModelRdfResource.Meta):
queryset = AggregationStatistic.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'aggregationstatistic'
class AnnotationTypeResource(ModelRdfResource):
scheme = 'annotationType'
class Meta(ModelRdfResource.Meta):
queryset = AnnotationType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'annotationtype'
class CensorCodeResource(ModelRdfResource):
scheme = 'censorCode'
class Meta(ModelRdfResource.Meta):
queryset = CensorCode.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'censorcode'
class DatasetTypeResource(ModelRdfResource):
scheme = 'datasetType'
class Meta(ModelRdfResource.Meta):
queryset = DatasetType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'datasettype'
class DirectiveTypeResource(ModelRdfResource):
scheme = 'directiveType'
class Meta(ModelRdfResource.Meta):
queryset = DirectiveType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'directivetype'
class ElevationDatumResource(ModelRdfResource):
scheme = 'elevationDatum'
class Meta(ModelRdfResource.Meta):
queryset = ElevationDatum.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'elevationdatum'
class EquipmentTypeResource(ModelRdfResource):
scheme = 'equipmentType'
class Meta(ModelRdfResource.Meta):
queryset = EquipmentType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'equipmenttype'
class PropertyDataTypeResource(ModelRdfResource):
scheme = 'propertyDataType'
class Meta(ModelRdfResource.Meta):
queryset = PropertyDataType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'propertydatatype'
class QualityCodeResource(ModelRdfResource):
scheme = 'qualityCode'
class Meta(ModelRdfResource.Meta):
queryset = QualityCode.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'qualitycode'
class ResultTypeResource(ModelRdfResource):
scheme = 'resultType'
class Meta(ModelRdfResource.Meta):
queryset = ResultType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'resulttype'
class SpatialOffsetTypeResource(ModelRdfResource):
scheme = 'spatialOffsetType'
class Meta(ModelRdfResource.Meta):
queryset = SpatialOffsetType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'spatialoffsettype'
class SpeciationResource(ModelRdfResource):
scheme = 'speciation'
class Meta(ModelRdfResource.Meta):
queryset = Speciation.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'speciation'
class SpecimenTypeResource(ModelRdfResource):
scheme = 'specimenType'
class Meta(ModelRdfResource.Meta):
queryset = SpecimenType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'specimentype'
class StatusResource(ModelRdfResource):
scheme = 'status'
class Meta(ModelRdfResource.Meta):
queryset = Status.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'status'
class TaxonomicClassifierTypeResource(ModelRdfResource):
scheme = 'taxonomicClassifierType'
class Meta(ModelRdfResource.Meta):
queryset = TaxonomicClassifierType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'taxonomicclassifiertype'
class VariableNameResource(ModelRdfResource):
scheme = 'variableName'
class Meta(ModelRdfResource.Meta):
queryset = VariableName.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'variablename'
class VariableTypeResource(ModelRdfResource):
scheme = 'variableType'
class Meta(ModelRdfResource.Meta):
queryset = VariableType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'variabletype'
class DataQualityTypeResource(ModelRdfResource):
scheme = 'dataQualityType'
class Meta(ModelRdfResource.Meta):
queryset = DataQualityType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'dataqualitytype'
class RelationshipTypeResource(ModelRdfResource):
scheme = 'relationshipType'
class Meta(ModelRdfResource.Meta):
queryset = RelationshipType.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'relationshiptype'
class MediumResource(ModelRdfResource):
scheme = 'medium'
class Meta(ModelRdfResource.Meta):
queryset = Medium.objects.filter(ModelRdfResource.vocabulary_filter)
resource_name = 'medium'
v1_api = Api(api_name='v1')
v1_api.register(ActionTypeResource())
v1_api.register(MethodTypeResource())
v1_api.register(OrganizationTypeResource())
v1_api.register(SamplingFeatureGeotypeResource())
v1_api.register(SamplingFeatureTypeResource())
v1_api.register(SiteTypeResource())
v1_api.register(AggregationStatisticResource())
v1_api.register(AnnotationTypeResource())
v1_api.register(CensorCodeResource())
v1_api.register(DatasetTypeResource())
v1_api.register(DirectiveTypeResource())
v1_api.register(ElevationDatumResource())
v1_api.register(EquipmentTypeResource())
v1_api.register(PropertyDataTypeResource())
v1_api.register(QualityCodeResource())
v1_api.register(ResultTypeResource())
v1_api.register(SpatialOffsetTypeResource())
v1_api.register(SpeciationResource())
v1_api.register(SpecimenTypeResource())
v1_api.register(StatusResource())
v1_api.register(TaxonomicClassifierTypeResource())
v1_api.register(VariableNameResource())
v1_api.register(VariableTypeResource())
v1_api.register(DataQualityTypeResource())
v1_api.register(RelationshipTypeResource())
v1_api.register(MediumResource())
v1_api.register(UnitsTypeResource())
v1_api.register(UnitsResource())
|
from flexbe_core import EventState, Logger
from moveit_commander import MoveGroupCommander
from tf.transformations import quaternion_from_euler
from geometry_msgs.msg import Pose, Point, Quaternion, PointStamped
import math
import rospy
import tf
class GenGripperPose(EventState):
'''
Apply a transformation to a pose to get a gripper friendly pose
-- l float radial offset in meters
-- z float z offset in meters
-- planar bool Whether to use a planar approach or not (l will also be along the plane if true)
#> pose_in Pose/point input pose in the map frame
<= pose_out Pose output pose in the base_link frame
<= done success
<= fail failure
'''
def __init__(self, l, z, planar):
super(GenGripperPose, self).__init__(outcomes=['done', 'fail'], input_keys=['pose_in'], output_keys=['pose_out'])
self.l = l
self.zo = z
self.planar = planar
self.group = MoveGroupCommander("RightArm")
self.listener = tf.TransformListener()
def execute(self, userdata):
# verifie si on recoit une pose ou un point
out = Pose()
if type(userdata.pose_in) is Pose:
out.position.x = userdata.pose_in.position.x
out.position.y = userdata.pose_in.position.y
out.position.z = userdata.pose_in.position.z
elif type(userdata.pose_in) is Point:
out.position.x = userdata.pose_in.x
out.position.y = userdata.pose_in.y
out.position.z = userdata.pose_in.z
else:
Logger.loginfo('ERROR in ' + str(self.name) + ' : pose_in is not a Pose() nor a Point()')
return 'fail'
out.position.z += self.zo
gripperPose = self.group.get_current_pose().pose
# calcul des angles
yaw = math.atan2((out.position.y - gripperPose.position.y), (out.position.x - gripperPose.position.x))
dist = ((out.position.y - gripperPose.position.y) ** 2 + (out.position.x - gripperPose.position.x) ** 2) ** 0.5
if self.planar:
pitch = 0
else:
pitch = -math.atan2((out.position.z - gripperPose.position.z), dist)
# calcul du quaternion
quat = quaternion_from_euler(0, pitch, yaw)
self.quat = Quaternion()
self.quat.x = quat[0]
self.quat.y = quat[1]
self.quat.z = quat[2]
self.quat.w = quat[3]
#calcul du vecteur dapproche avec les points
vector = Point()
vector.x = (gripperPose.position.x - out.position.x)
vector.y = (gripperPose.position.y - out.position.y)
if not self.planar:
vector.z = (gripperPose.position.z - out.position.z)
# calcul de la norme du vecteur
norm = (vector.x ** 2 + vector.y ** 2 + vector.z ** 2) ** 0.5
vector.x *= self.l / norm
vector.y *= self.l / norm
vector.z *= self.l / norm
#applique le vecteur dapproche
out.position.x += vector.x
out.position.y += vector.y
out.position.z += vector.z
out.orientation = self.quat
userdata.pose_out = out
return 'done'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.