prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010-2011, Antons Rebguns.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns'
__license__ = 'BS | D'
__maintainer__ = 'Antons Rebguns'
__email__ = 'anton@email.arizona.edu'
import sys
from optparse import OptionParser
import roslib
roslib.load_manifest('dyna | mixel_driver')
from dynamixel_driver import dynamixel_io
if __name__ == '__main__':
usage_msg = 'Usage: %prog [options] MOTOR_IDs'
desc_msg = 'Sets various configuration options of specified Dynamixel servo motor.'
epi_msg = 'Example: %s --port=/dev/ttyUSB1 --baud=57600 --baud-rate=1 --return-delay=1 5 9 23' % sys.argv[0]
parser = OptionParser(usage=usage_msg, description=desc_msg, epilog=epi_msg)
parser.add_option('-p', '--port', metavar='PORT', default='/dev/ttyUSB0',
help='motors of specified controllers are connected to PORT [default: %default]')
parser.add_option('-b', '--baud', metavar='BAUD', type='int', default=1000000,
help='connection to serial port will be established at BAUD bps [default: %default]')
parser.add_option('-r', '--baud-rate', type='int', metavar='RATE', dest='baud_rate',
help='set servo motor communication speed')
parser.add_option('-d', '--return-delay', type='int', metavar='DELAY', dest='return_delay',
help='set servo motor return packet delay time')
parser.add_option('--cw-angle-limit', type='int', metavar='CW_ANGLE', dest='cw_angle_limit',
help='set servo motor CW angle limit')
parser.add_option('--ccw-angle-limit', type='int', metavar='CCW_ANGLE', dest='ccw_angle_limit',
help='set servo motor CCW angle limit')
parser.add_option('--min-voltage-limit', type='int', metavar='MIN_VOLTAGE', dest='min_voltage_limit',
help='set servo motor minimum voltage limit')
parser.add_option('--max-voltage-limit', type='int', metavar='MAX_VOLTAGE', dest='max_voltage_limit',
help='set servo motor maximum voltage limit')
(options, args) = parser.parse_args(sys.argv)
print options
if len(args) < 2:
parser.print_help()
exit(1)
port = options.port
baudrate = options.baud
motor_ids = args[1:-2]
print 'motor ids', motor_ids
try:
dxl_io = dynamixel_io.DynamixelIO(port, baudrate)
except dynamixel_io.SerialOpenError, soe:
print 'ERROR:', soe
else:
for motor_id in motor_ids:
print motor_id, type(motor_id)
motor_id = int(motor_id)
ping_res = dxl_io.ping(motor_id)
if ping_res:
# check if baud rate needs to be changed
if options.baud_rate:
valid_rates = (1,3,4,7,9,16,34,103,207,250,251,252)
if options.baud_rate not in valid_rates:
print 'Requested baud rate is invalid, please use one of the following: %s' % str(valid_rates)
if options.baud_rate <= 207:
print 'Setting baud rate to %d bps' % int(2000000.0/(options.baud_rate + 1))
elif options.baud_rate == 250:
print 'Setting baud rate to %d bps' % 2250000
elif options.baud_rate == 251:
print 'Setting baud rate to %d bps' % 2500000
elif options.baud_rate == 252:
print 'Setting baud rate to %d bps' % 3000000
dxl_io.set_baud_rate(motor_id, options.baud_rate)
# check if return delay time needs to be changed
if options.return_delay is not None:
if options.return_delay < 0 or options.return_delay > 254:
print 'Requested return delay time is out of valie range (0 - 254)'
print 'Setting return delay time to %d us' % (options.return_delay * 2)
dxl_io.set_return_delay_time(motor_id, options.return_delay)
# check if CW angle limit needs to be changed
if options.cw_angle_limit is not None:
print 'Setting CW angle limit to %d' % options.cw_angle_limit
dxl_io.set_angle_limit_cw(motor_id, options.cw_angle_limit)
# check if CCW angle limit needs to be changed
if options.ccw_angle_limit is not None:
print 'Setting CCW angle limit to %d' % options.ccw_angle_limit
dxl_io.set_angle_limit_ccw(motor_id, options.ccw_angle_limit)
else:
print "NOT SETTING CCW ANGLE LIMIT"
# check if minimum voltage limit needs to be changed
if options.min_voltage_limit:
print 'Setting minimum voltage limit to %d' % options.min_voltage_limit
dxl_io.set_voltage_limit_min(motor_id, options.min_voltage_limit)
# check if maximum voltage limit needs to be changed
if options.max_voltage_limit:
print 'Setting maximum voltage limit to %d' % options.max_voltage_limit
dxl_io.set_voltage_limit_max(motor_id, options.max_voltage_limit)
print 'done'
else:
print 'Unable to connect to Dynamixel motor with ID %d' % motor_id
|
from direct.distributed import DistributedObject
class DistributedTestObject(DistributedObject.DistributedObject):
def setRequiredField(self, r):
| self.requiredField = r
def setB(self, B):
self.B = B
def setBA(self, BA):
self.BA = BA
def setBO(self, BO):
self.BO = BO
def setBR(self, BR):
self.BR = BR
def setBRA(self, BRA):
self.BRA = BRA
def setBRO(self, BRO):
self.BRO = BRO
def setBROA(self, BROA):
self.BROA = BROA
def gotNonReqThatWasntSet(self):
for field in ('B', 'BA', 'BO', 'BR', 'BRA', 'BRO', 'BROA'):
if ha | sattr(self, field):
return True
return False
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "csrf_example.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
i | mport django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv) | |
from django.http import Http | ResponseRedirect
from django.utils.encoding import smart_str
def serve_file(request, file, **kwargs):
"""Serves files by redirecting to file.url (e.g., useful for Amazon S3)"""
return HttpResponseRedirect(smart_str(file.url))
def public_download_url(file, **kwargs):
"""Directs downlo | ads to file.url (useful for normal file system storage)"""
return file.url
|
ing : parameter from client side
@return : succ xxxxx
ex. jsonString => {vid:100, pif:eth0}
ex. return =>
"""
#Pre-condition
#check Physical Interface Name
if vlan.pif not in self.pifs.keys():
msg = "Physical Interface(%s) does not exist" % vlan.pif
logger.debug(self._createVlan, msg)
raise Exception(msg)
#Pre-condition
#check Vlan Interface Name
ifName = "%s.%s" % (vlan.pif, vlan.vid)
if ifName in self.vlans.keys():
msg = "Vlan Interface(%s) already exist, return it" % ifName
logger.debug(self._createVlan, msg)
return self.vlans[ifName]
doCmd(['vconfig', 'add', vlan.pif, vlan.vid])
self.bringUP(ifName)
logger.debug(self._createVlan, "Create vlan %s successfully"%ifName)
return self.vlans[ifName]
def _deleteVlan(self, name):
if name not in self.vlans.keys():
raise Exception("No vlan device %s found"%name)
vlan = self.vlans[name]
self.bringDown(vlan.name)
doCmd(['vconfig', 'rem', vlan.name])
logger.debug(self._deleteVlan, "Delete vlan %s successfully"%vlan.name)
def _createBridge(self, bridge):
"""
@return : success
ex. {bridge:xapi100, attach:eth0.100}
create bridge interface, and attached it
cmd 1: ip link add bridge
cmd 2: ip link set dev
"""
if "xenbr" not in bridge.name and "vlan" not in bridge.name:
raise Exception("Invalid bridge name %s. Bridge name must be in partten xenbr/vlan, e.g. xenbr0"%bridge.name)
#pre-condition
#check Bridge Interface Name
if bridge.name in self.bridges.keys():
msg = "Bridge(%s) already exist, return it" % bridge.name
logger.debug(self._createBridge, msg)
return self.bridges[bridge.name]
#pre-condition
#check attach must exist
#possible to attach in PIF or VLAN
if bridge.attach not in self.vlans.keys() and bridge.attach not in self.pifs.keys():
msg = "%s is not either pif or vlan" % bridge.attach
logger.error(self._createBridge, msg)
raise Exception(msg)
doCmd(['ip', 'link', 'add', 'name', bridge.name, 'type', 'bridge'])
doCmd(['ip', 'link', 'set', 'dev', bridge.attach, 'master', bridge.name])
self.bringUP(bridge.name)
logger.debug(self._createBridge, "Create bridge %s on %s successfully"%(bridge.name, bridge.attach))
return self.bridges[bridge.name]
def _getBridges(self):
return self.bridges.keys()
def _getVlans(self):
return self.vlans.keys()
def _deleteBridge(self, name):
if name not in self.bridges.keys():
raise Exception("Can not find bridge %s"%name)
bridge = self.bridges[name]
if bridge.attach in bridge.interfaces: bridge.interfaces.remove(bridge.attach)
if len(bridge.interfaces) != 0:
logger.debug(self._deleteBridge, "There are still some interfaces(%s) on bridge %s"%(bridge.interfaces, bridge.name))
return False
self.bringDown(bridge.name)
doCmd(['ip', 'link', 'del', bridge.name])
logger.debug(self._deleteBridge, "Delete bridge %s successfully"%bridge.name)
return True
def _getInterfaces(self, type):
"""
@param type : ["pif", "bridge", "tap"]
@return : dictionary of Interface Objects
get All Interfaces based on type
"""
devices = os.listdir('/sys/class/net')
ifs = {}
if type == "pif":
devs = self.Parser.findall(Filter.Network.IFNAME_PIF, devices)
for dev in set(devs):
ifInst = OvmInterface()
ifInst.name = dev
ifs[dev] = ifInst
elif type == "vlan":
devs = self.Parser.findall(Filter.Network.IFNAME_VLAN, devices)
for dev in set(devs):
ifInst = OvmVlan()
ifInst.name = dev
(pif, vid) = dev.split('.')
ifInst.pif = pif
ifInst.vid = vid
ifs[dev] = ifInst
elif type == "bridge":
devs = self.Parser.findall(Filter.Network.IFNAME_BRIDGE, devices)
for dev in set(devs):
ifInst = OvmBridge()
ifInst.name = dev
devs = os.listdir(join('/sys/class/net', dev, 'brif'))
ifInst.interfaces = devs
attches = self.Parser.findall(Filter.Network.IFNAME_PIF, devs) + self.Parser.findall(Filter.Network.IFNAME_VLAN, devs)
if len(attches) > 1: raise Exception("Multiple PIF on bridge %s (%s)"%(dev, attches))
elif len(attches) == 0: ifInst.attach = "null"
elif len(attches) == 1: ifInst.attach = attches[0]
ifs[dev] = ifInst
return ifs
def bringUP(self, ifName):
doCmd(['ifconfig', ifName, 'up'])
def bringDown(self, ifName):
doCmd(['ifconfig', ifName, 'down'])
@staticmethod
def createBridge(jStr):
try:
network = OvmNetwork()
network._createBridge(toOvmBridge(jStr))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.createBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createBridge), errmsg)
@staticmethod
def deleteBridge(name):
try:
network = OvmNetwork()
network._deleteBridge(name)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.deleteBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteBridge), errmsg)
@staticmethod
def getAllBridges():
try:
network = OvmNetwork()
rs = toGson(network._getBridges())
logger.debug(OvmNetwork.getAllBridges, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getAllBridges, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getAllBridges), errmsg)
@staticmethod
def getBridgeByIp(ip):
try:
routes = doCmd(['ip', 'route']).split('\n')
brName = None
for r in routes:
if ip in r and "xenbr" in r or "vlan" in r:
brName = r.split(' ')[2]
break
if not brName: raise Exception("Cannot find bridge with IP %s"%ip)
logger.debug(OvmNetwork.getBridgeByIp, "bridge:%s, ip:%s"%(brName, ip))
return toGson({"bridge":brName})
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getBridgeByIp, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getBridgeByIp), errmsg) |
@staticmethod
def getVlans():
| try:
network = OvmNetwork()
rs = toGson(network._getVlans())
logger.debug(OvmNetwork.getVlans, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getVlans, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getVlans), errmsg)
@staticmethod
def createVlan(jStr):
try:
network = OvmNetwork()
vlan = network._createVlan(toOvmVlan(jStr))
rs = fromOvmVlan(vlan)
logger.debug(OvmNetwork.createVlan, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.createVlan, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createVlan), errmsg)
@staticmethod
def createVlan |
#!/bin/python3
impo | rt math
import os
import random
import re
import sys
# Complete the migratoryBirds function below.
# {1:2, 2:4, 3:3, 4:4}
def migratoryBirds(arr):
frequentBird, frequency = 1, 0
birdsDict = {}
for i in arr:
if i not in birdsDict.keys():
birdsDict[i | ] = 1
else:
birdsDict[i] = birdsDict[i] + 1
for bird in birdsDict.keys():
if birdsDict[bird] > frequency:
frequency = birdsDict[bird]
frequentBird = bird
if birdsDict[bird] == frequency:
if bird < frequentBird:
frequentBird = bird
return frequentBird
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = migratoryBirds(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import, print_function
import sys
import click
import cProfile
import pstats
import frappe
import frappe.utils
from functools import wraps
from six import StringIO
click.disable_unicode_literals_warning = True
def pass_context(f):
@wraps(f)
def _func(ctx, *args, **kwargs):
profile = ctx.obj['profile']
if profile:
pr = cProfile.Profile()
pr.enable()
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s)\
.sort_stats('cumtime', 'tottime', 'ncalls')
ps.print_stats()
# print the top-100
for line in s.getvalue().splitlines()[:100]:
print(line)
return ret
return click.pass_context(_func)
def get_site(context):
try:
site = context.sites[0]
return site
except (IndexError, TypeError):
print('Please specify --site sitename')
sys.exit(1)
def call_command(cmd, context):
return click.Context(cmd, o | bj=context).forward(cmd)
def get_commands() | :
# prevent circular imports
from .docs import commands as doc_commands
from .scheduler import commands as scheduler_commands
from .site import commands as site_commands
from .translate import commands as translate_commands
from .utils import commands as utils_commands
return list(set(doc_commands + scheduler_commands + site_commands + translate_commands + utils_commands))
commands = get_commands()
|
import csv
import numpy
import pandas
import pymongo
import requests
from datetime import datetime
from io import StringIO
mongo_client = pymongo.MongoClient("localhost", 27017)
financial_db = mongo_client.financial_data
financial_collection = financial_db.data
class Pull:
def __call__(self, source, tickers, start_date, end_date):
if source=='Google':
results = self.google_call(tickers, start_date, end_date)
return results
elif source=='Database':
results = self.database_call(tickers, start_date, end_date)
return results
def google_call(self, tickers, start_date, end_date):
"""
google_call makes a call to the google finance api for historical data
Args:
None (uses the class variables)
Returns:
None (sets self.results)
"""
results = {}
for ticker in tickers:
data_string = "https://www.google.com/finance/historical?q={ticker_symbol}&startdate={start_date}&enddate={end_date}&output=csv".format(
ticker_symbol = ticker,
start_date = start_date,
end_date = end_date
)
df = pandas.read_csv(StringIO(requests.get(data_string).text))
df['Return'] = df.Close - df.Close.shift(-1)
df['DailyPeriodicReturn'] = (df['Return'] / df.Close.shift(-1))
df['ContinuouslyCompoundingDailyPeriodicReturn'] = numpy.log(df.Close / df.Close.shift(-1))
df = df.fillna(0.0)
results[ticker] = {
"symbol": ticker,
"dat | e_added": datetime.utcnow(),
"data": df.to_dict(orient="records"),
"close_prices": list(df.Close.values),
"returns": list(df.Return.values),
"daily_periodic_return": list(df.DailyPeriodicReturn.values),
"continuous_daily_periodic_return": list(df.ContinuouslyCompoundingDailyPeriodicReturn.values),
"start_date": start_date,
"end_ | date": end_date,
"url": data_string
}
return results
def database_call(self, tickers, start_date, end_date):
"""
database_call makes a call to mongodb for the latest data
Args:
None
"""
results = {}
for ticker in tickers:
results[ticker] = financial_collection.find({
"ticker": ticker})[:][0]
return results
|
#
# MLDB-2126-export-structured.py
# Mathieu Marquis Bolduc, 2017-01-25
# This file is part of MLDB. Copyright 2017 mldb.ai inc. All rights reserved.
#
import tempfile
import codecs
import os
from mldb import mldb, MldbUnitTest, ResponseException
tmp_dir = os.getenv('TMP')
class MLDB2126exportstructuredTest(MldbUnitTest): # noqa
def assert_file_content(self, filename, lines_expect):
f = codecs.open(filename, 'rb', 'utf8')
for index, expect in enumerate(lines_expect):
line = f.readline()[:-1]
self.assertEqual(line, expect)
def test_row(self):
# create the dataset
mldb.put('/v1/datasets/patate', {
'type': 'tabular'
})
mldb.post('/v1/datasets/patate/rows', {
'rowName': 0,
'columns': [
['x.a', 1, 0],
['x.b', 2, 0]
]}
)
mldb.post('/v1/datasets/patate/commit')
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_dir)
res = mldb.post('/v1/procedures', {
'type': 'export.csv',
'params': {
'exportData': 'select x as x from patate',
'dataFileUrl': 'file://' + tmp_file.name,
}
})
mldb.log(res)
lines_expect = ['x.a,x.b',
| '1,2'
| ]
self.assert_file_content(tmp_file.name, lines_expect)
if __name__ == '__main__':
mldb.run_tests()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-03-17 20:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('astrobin_apps_platesolving', '0011_update_platesolvingadvanced_settings_sample_ | raw_frame_file_verbose_name'),
]
operations = [
migrations.CreateModel(
name='PlateSolvingAdvancedTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serial_number', models.CharField(max_length=32)),
('created', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
| ('task_params', models.TextField()),
],
),
]
|
# -*- co | ding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from .modes import Modes
from . import models
from . import bridges
from . import layers
from . import processing
from .libs import * # noqa
from . import activations
from . import initializations
from . import losses
from . import metrics
from . import optimizers
from . import regularizations
from .rl import explorations, environments as envs, memories, stats, utils as rl_utils
from | . import variables
from . import datasets
from . import estimators
from . import experiments
|
else:
raise UpdateError("Conflicting metadata values")
for conditional_node, value in conditionals:
if value != unconditional_value:
self.node.set(self.property_name, value, condition=conditional_node.children[0])
class MaxAssertsUpdate(PropertyUpdate):
property_name = "max-asserts"
cls_default_value = 0
value_type = int
def update_value(self, old_value, new_value):
new_value = self.value_type(new_value)
if old_value is not None:
old_value = self.value_type(old_value)
if old_value is not None and old_value < new_value:
return new_value + 1
if old_value is None:
return new_value + 1
return old_value
def update_default(self):
"""For asserts we always update the default value and never add new conditionals.
The value we set as the default is the maximum the current default or one more than the
number of asserts we saw in any configuration."""
# Current values
values = []
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
values.append(int(current_default[0].value))
values.extend(item.value for item in self.new)
values.extend(item.value for item in
itertools.chain.from_iterable(results for _, results in self.updated))
new_value = max(values)
return True, new_value
class MinAssertsUpdate(PropertyUpdate):
property_name = "min-asserts"
cls_default_value = 0
value_type = int
def update_value(self, old_value, new_value):
new_value = self.value_type(new_value)
if old_value is not None:
old_value = self.value_type(old_value)
if old_value is not None and new_value < old_value:
return 0
if old_value is None:
# If we are getting some asserts for the first time, set the minimum to 0
return new_value
return old_value
def update_default(self):
"""For asserts we always update the default value and never add new conditionals.
This is either set to the current value or one less than the number of asserts
we saw, whichever is lower."""
values = []
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
values.append(current_default[0].value_as(self.value_type))
values.extend(max(0, item.value) for item in self.new)
values.extend(max(0, item.value) for item in
itertools.chain.from_iterable(results for _, results in self.updated))
new_value = min(values)
return True, new_value
class LsanUpdate(PropertyUpdate):
property_name = "lsan-allowed"
cls_default_value = None
def get_value(self, result):
# If we have an allowed_match that matched, return None
# This value is ignored later (because it matches the default)
# We do that because then if we allow a failure in foo/__dir__.ini
# we don't want to update foo/bar/__dir__.ini with the same rule
if result[1]:
return None
# Otherwise return the topmost stack frame
# TODO: there is probably some improvement to be made by looking for a "better" stack frame
return result[0][0]
def update_value(self, old_value, new_value):
if isinstance(new_value, (str, unicode)):
new_value = {new_value}
else:
new_value = set(new_value)
if old_value is None:
old_value = set()
old_value = set(old_value)
return sorted((old_value | new_value) - {None})
def update_default(self):
current_default = None
if self.property_name in self.node._data:
current_default = [item for item in
self.node._data[self.property_name]
if item.condition_node is None]
if current_default:
current_default = current_default[0].value
new_values = [item.value for item in self.new]
new_value = self.update_v | alue(current_default, new_values)
return True, new_value if new_value else None
def group_conditionals(values, property_order=None, boolean_properties=None):
"""Given a list of Value objects, return a list of
(conditional_node, status) pairs | representing the conditional
expressions that are required to match each status
:param values: List of Values
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
by_property = defaultdict(set)
for run_info, value in values:
for prop_name, prop_value in run_info.iteritems():
by_property[(prop_name, prop_value)].add(value)
if property_order is None:
property_order = ["debug", "os", "version", "processor", "bits"]
if boolean_properties is None:
boolean_properties = set(["debug"])
else:
boolean_properties = set(boolean_properties)
# If we have more than one value, remove any properties that are common
# for all the values
if len(values) > 1:
for key, statuses in by_property.copy().iteritems():
if len(statuses) == len(values):
del by_property[key]
if not by_property:
raise ConditionError
properties = set(item[0] for item in by_property.iterkeys())
include_props = []
for prop in property_order:
if prop in properties:
include_props.append(prop)
conditions = {}
for run_info, value in values:
prop_set = tuple((prop, run_info[prop]) for prop in include_props)
if prop_set in conditions:
if conditions[prop_set][1] != value:
# A prop_set contains contradictory results
raise ConditionError(make_expr(prop_set, value, boolean_properties))
continue
expr = make_expr(prop_set, value, boolean_properties=boolean_properties)
conditions[prop_set] = (expr, value)
return conditions.values()
def make_expr(prop_set, rhs, boolean_properties=None):
"""Create an AST that returns the value ``status`` given all the
properties in prop_set match.
:param prop_set: tuple of (property name, value) pairs for each
property in this expression and the value it must match
:param status: Status on RHS when all the given properties match
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
root = ConditionalNode()
assert len(prop_set) > 0
expressions = []
for prop, value in prop_set:
number_types = (int, float, long)
value_cls = (NumberNode
if type(value) in number_types
else StringNode)
if prop not in boolean_properties:
expressions.append(
BinaryExpressionNode(
BinaryOperatorNode("=="),
VariableNode(prop),
value_cls(unicode(value))
))
else:
if value:
expressions.append(VariableNode(prop))
else:
expressions.append(
UnaryExpressionNode(
UnaryOperatorNode("not"),
VariableNode(prop)
))
if len(expressions) > 1:
prev = expressions[-1]
for cur |
import os
class Config(object):
DEBUG = False
TESTING = False
SECRET_KEY = 'A0Zr18h/3yX R~XHH!jmN]LWX/,?RT'
DATA | BASE = {
'engine': 'playhouse.pool.PooledPostgresqlExtDatabase',
'name': 'middleware',
'user': 'comunitea',
'port': '5434',
'host': 'localhost',
'max_connections': None,
'autocommit': True,
'autorollback': True,
'stale_timeout': 600}
NOTIFY_URL = "https://www.visiotechsecurity.com/?option=c | om_sync&task=sync.syncOdoo"
NOTIFY_USER = os.environ.get('NOTIFY_USER')
NOTIFY_PASSWORD = os.environ.get('NOTIFY_PASSWORD')
|
der the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os.path
import boto3.session
import botocore.exceptions
import freezegun
import pretend
import pytest
import redis
from zope.interface.verify import verifyClass
from warehouse.packaging.interfaces import IDownloadStatService, IFileStorage
from warehouse.packaging.services import (
RedisDownloadStatService, LocalFileStorage, S3FileStorage,
)
@freezegun.freeze_time("2012-01-14")
class TestRedisDownloadStatService:
def test_verify_service(self):
assert verifyClass(IDownloadStatService, RedisDownloadStatService)
def test_creates_redis(self, monkeypatch):
redis_obj = pretend.stub()
redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda u: redis_obj),
)
monkeypatch.setattr(redis, "StrictRedis", redis_cls)
url = pretend.stub()
svc = RedisDownloadStatService(url)
assert svc.redis is redis_obj
assert redis_cls.from_url.calls == [pretend.call(url)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_daily_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = (
["downloads:hour:12-01-14-00:foo"] +
[
"downloads:hour:12-01-13-{:02d}:foo".format(i)
for i in reversed(range(24))
] +
["downloads:hour:12-01-12-23:foo"]
)
assert svc.get_daily_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_weekly_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = [
"downloads:daily:12-01-{:02d}:foo".format(i + 7)
for i in reversed(range(8))
]
assert svc.get_weekly_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_monthly_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: | keys))
call_keys = [
"downloads:daily:12-01-{:02d}:foo".format(i)
for i in reversed(range(1, 15))
] + [
"downloads:daily:11-12-{:02d}:foo".format(i + 15)
for i in reversed(range(17))
]
assert svc.get_monthly_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
class TestLocalFileS | torage:
def test_verify_service(self):
assert verifyClass(IFileStorage, LocalFileStorage)
def test_basic_init(self):
storage = LocalFileStorage("/foo/bar/")
assert storage.base == "/foo/bar/"
def test_create_service(self):
request = pretend.stub(
registry=pretend.stub(
settings={"files.path": "/the/one/two/"},
),
)
storage = LocalFileStorage.create_service(None, request)
assert storage.base == "/the/one/two/"
def test_gets_file(self, tmpdir):
with open(str(tmpdir.join("file.txt")), "wb") as fp:
fp.write(b"my test file contents")
storage = LocalFileStorage(str(tmpdir))
file_object = storage.get("file.txt")
assert file_object.read() == b"my test file contents"
def test_raises_when_file_non_existant(self, tmpdir):
storage = LocalFileStorage(str(tmpdir))
with pytest.raises(FileNotFoundError):
storage.get("file.txt")
def test_stores_file(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
storage_dir = str(tmpdir.join("storage"))
storage = LocalFileStorage(storage_dir)
storage.store("foo/bar.txt", filename)
with open(os.path.join(storage_dir, "foo/bar.txt"), "rb") as fp:
assert fp.read() == b"Test File!"
def test_stores_two_files(self, tmpdir):
filename1 = str(tmpdir.join("testfile1.txt"))
with open(filename1, "wb") as fp:
fp.write(b"First Test File!")
filename2 = str(tmpdir.join("testfile2.txt"))
with open(filename2, "wb") as fp:
fp.write(b"Second Test File!")
storage_dir = str(tmpdir.join("storage"))
storage = LocalFileStorage(storage_dir)
storage.store("foo/first.txt", filename1)
storage.store("foo/second.txt", filename2)
with open(os.path.join(storage_dir, "foo/first.txt"), "rb") as fp:
assert fp.read() == b"First Test File!"
with open(os.path.join(storage_dir, "foo/second.txt"), "rb") as fp:
assert fp.read() == b"Second Test File!"
class TestS3FileStorage:
def test_verify_service(self):
assert verifyClass(IFileStorage, S3FileStorage)
def test_basic_init(self):
bucket = pretend.stub()
storage = S3FileStorage(bucket)
assert storage.bucket is bucket
def test_create_service(self):
session = boto3.session.Session()
request = pretend.stub(
find_service=pretend.call_recorder(lambda name: session),
registry=pretend.stub(settings={"files.bucket": "froblob"}),
)
storage = S3FileStorage.create_service(None, request)
assert request.find_service.calls == [pretend.call(name="aws.session")]
assert storage.bucket.name == "froblob"
def test_gets_file(self):
s3key = pretend.stub(get=lambda: {"Body": io.BytesIO(b"my contents")})
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: s3key))
storage = S3FileStorage(bucket)
file_object = storage.get("file.txt")
assert file_object.read() == b"my contents"
assert bucket.Object.calls == [pretend.call("file.txt")]
def test_raises_when_key_non_existant(self):
def raiser():
raise botocore.exceptions.ClientError(
{"Error": {"Code": "NoSuchKey", "Message": "No Key!"}},
"some operation",
)
s3key = pretend.stub(get=raiser)
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: s3key))
storage = S3FileStorage(bucket)
with pytest.raises(FileNotFoundError):
storage.get("file.txt")
assert bucket.Object.calls == [pretend.call("file.txt")]
def test_passes_up_error_when_not_no_such_key(self):
def raiser():
raise botocore.exceptions.ClientError(
{"Error": {"Code": "SomeOtherError", "Message": "Who Knows!"}},
"some operation",
)
s3key = pretend.stub(get=raiser)
bucket = pretend.stub(Object=lambda path: s3key)
storage = S3FileStorage(bucket)
with pytest.raises(botocore.exceptions.ClientError):
storage.get("file.txt")
def test_stores_file(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
bucket = pretend.stub(
upload_file=pretend.call_recorder(lambda filename, key: None),
)
storage = S3FileStorage(bucket)
storage.store("foo/bar.txt", filename)
assert bucket.upload_file.calls == [
pretend.call(filename, "foo/bar.txt"),
]
def test_stores_two_files(self, tmpdir):
filename1 = str(tmpdir.join("testfile1.txt"))
with open(filename1, "wb |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope | that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# | You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Hazard Risk",
"version": "8.0.1.1.0",
"author": "Savoir-faire Linux, Odoo Community Association (OCA)",
"website": "http://www.savoirfairelinux.com",
"license": "AGPL-3",
"category": "Management System",
"depends": [
'mgmtsystem_hazard',
'hr'
],
"data": [
'security/ir.model.access.csv',
'data/mgmtsystem_hazard_risk_computation.xml',
'data/mgmtsystem_hazard_risk_type.xml',
'views/res_company.xml',
'views/mgmtsystem_hazard.xml',
'views/mgmtsystem_hazard_risk_type.xml',
'views/mgmtsystem_hazard_risk_computation.xml',
'views/mgmtsystem_hazard_residual_risk.xml',
],
"installable": True,
}
|
# -*- coding: utf-8 -*-
"""
celery.events.dumper
~~~~~~~~~~~~~~~~~~~~
THis is a simple program that dumps events to the console
as they happen. Think of it like a `tcpdump` for Celery events.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
from datetime import datetime
from ..app import app_or_default
from ..datastructures import LRUCache
TASK_NAMES = LRUCache(limit=0xFFF)
HUMAN_TYPES = {"worker-offline": "shutdown",
"worker-online": "started",
"worker-heartbeat": "heartbeat"}
def humanize_type(type):
try:
return HUMAN_TYPES[type.lowe | r()]
except KeyError:
return type.lower().replace("-", " ")
class Dumper(object):
def on_event(self, event):
timestamp = datetime.fromtimestamp(eve | nt.pop("timestamp"))
type = event.pop("type").lower()
hostname = event.pop("hostname")
if type.startswith("task-"):
uuid = event.pop("uuid")
if type in ("task-received", "task-sent"):
task = TASK_NAMES[uuid] = "%s(%s) args=%s kwargs=%s" % (
event.pop("name"), uuid,
event.pop("args"),
event.pop("kwargs"))
else:
task = TASK_NAMES.get(uuid, "")
return self.format_task_event(hostname, timestamp,
type, task, event)
fields = ", ".join("%s=%s" % (key, event[key])
for key in sorted(event.keys()))
sep = fields and ":" or ""
print("%s [%s] %s%s %s" % (hostname, timestamp,
humanize_type(type), sep, fields))
def format_task_event(self, hostname, timestamp, type, task, event):
fields = ", ".join("%s=%s" % (key, event[key])
for key in sorted(event.keys()))
sep = fields and ":" or ""
print("%s [%s] %s%s %s %s" % (hostname, timestamp,
humanize_type(type), sep, task, fields))
def evdump(app=None):
sys.stderr.write("-> evdump: starting capture...\n")
app = app_or_default(app)
dumper = Dumper()
conn = app.broker_connection()
recv = app.events.Receiver(conn, handlers={"*": dumper.on_event})
try:
recv.capture()
except (KeyboardInterrupt, SystemExit):
conn and conn.close()
if __name__ == "__main__":
evdump()
|
"""Config flow to configure the OVO Energy integration."""
import aiohttp
from ovoenergy.ovoenergy import OVOEnergy
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import DOMAIN # pylint: disable=unused-import
REAUTH_SCHEMA = vol.Schema({vol.Required(CONF_PASSWORD): str})
USER_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
class OVOEnergyFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a OVO Energy config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the flow."""
self.username = None
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
client = OVOEnergy()
try:
authen | ticated = await client.authenticate(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD]
)
except aiohttp.ClientError:
errors["base"] = "cannot_connect"
else:
if authenticated:
| await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=client.username,
data={
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
errors["base"] = "invalid_auth"
return self.async_show_form(
step_id="user", data_schema=USER_SCHEMA, errors=errors
)
async def async_step_reauth(self, user_input):
"""Handle configuration by re-auth."""
errors = {}
if user_input and user_input.get(CONF_USERNAME):
self.username = user_input[CONF_USERNAME]
self.context["title_placeholders"] = {CONF_USERNAME: self.username}
if user_input is not None and user_input.get(CONF_PASSWORD) is not None:
client = OVOEnergy()
try:
authenticated = await client.authenticate(
self.username, user_input[CONF_PASSWORD]
)
except aiohttp.ClientError:
errors["base"] = "connection_error"
else:
if authenticated:
await self.async_set_unique_id(self.username)
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
self.hass.config_entries.async_update_entry(
entry,
data={
CONF_USERNAME: self.username,
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
return self.async_abort(reason="reauth_successful")
errors["base"] = "authorization_error"
return self.async_show_form(
step_id="reauth", data_schema=REAUTH_SCHEMA, errors=errors
)
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import _mssql
import pymssql
except ImportError:
pass
import logging
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://pymssql.sourceforge.net/
User gu | ide: http://pymssql.sourceforge.net/examples_pymssql.php
API: http://pymssql.sourceforge.net/ref_pymssql.php
Debian package: python-pymssql
License: LGPL
Possible connectors: http://wiki.python.org/moin/SQL%20Server
Important note: pymssql library on your system MUST be version 1.0.2
to work, get it from http://sourceforge.net/projects/pymssql/files/pymssql/1.0.2/
"""
def __init__(self):
GenericConnector.__init__(self)
def connect(self):
| self.initConnection()
try:
self.connector = pymssql.connect(host="%s:%d" % (self.hostname, self.port), user=self.user, password=self.password, database=self.db, login_timeout=conf.timeout, timeout=conf.timeout)
except pymssql.OperationalError, msg:
raise SqlmapConnectionException(msg)
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except (pymssql.ProgrammingError, pymssql.OperationalError, _mssql.MssqlDatabaseException), msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % str(msg).replace("\n", " "))
return None
def execute(self, query):
retVal = False
try:
self.cursor.execute(utf8encode(query))
retVal = True
except (pymssql.OperationalError, pymssql.ProgrammingError), msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % str(msg).replace("\n", " "))
except pymssql.InternalError, msg:
raise SqlmapConnectionException(msg)
return retVal
def select(self, query):
retVal = None
if self.execute(query):
retVal = self.fetchall()
try:
self.connector.commit()
except pymssql.OperationalError:
pass
return retVal
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
diff.py
---------------------
Date : November 2013
Copyright : (C) 2013-2016 Boundless, http://boundlessgeo.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later | version. *
* *
********************** | *****************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2013'
__copyright__ = '(C) 2013-2016 Boundless, http://boundlessgeo.com'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from feature import Feature
from geogig import NULL_ID
TYPE_MODIFIED = "Modified"
TYPE_ADDED = "Added"
TYPE_REMOVED = "Removed"
ATTRIBUTE_DIFF_MODIFIED, ATTRIBUTE_DIFF_ADDED, ATTRIBUTE_DIFF_REMOVED, ATTRIBUTE_DIFF_UNCHANGED = ["M", "A", "R", "U"]
class Diffentry(object):
'''A difference between two references for a given path'''
def __init__(self, repo, oldcommitref, newcommitref, oldref, newref, path):
self.repo = repo
self.path = path
self.oldref = oldref
self.newref = newref
self.oldcommitref = oldcommitref
self.newcommitref = newcommitref
def oldobject(self):
if self.oldref == NULL_ID:
return None
else:
return Feature(self.repo, self.oldcommitref, self.path)
def newobject(self):
if self.newref == NULL_ID:
return None
else:
return Feature(self.repo, self.newcommitref, self.path)
def featurediff(self):
return self.repo.featurediff(self.oldcommitref, self.newcommitref, self.path)
def type(self):
if self.oldref == NULL_ID:
return TYPE_ADDED
elif self.newref == NULL_ID:
return TYPE_REMOVED
else:
return TYPE_MODIFIED
def __str__(self):
if self.oldref == NULL_ID:
return "%s %s (%s)" % (TYPE_ADDED, self.path, self.newref)
elif self.newref == NULL_ID:
return TYPE_REMOVED + " " + self.path
else:
return "%s %s (%s --> %s)" % (TYPE_MODIFIED, self.path, self.oldref, self.newref)
|
The API version you wish to use for your
application. Subsonic will throw an error if you
try to use/send an api version higher than what
the server supports. See the Subsonic API docs
to find the Subsonic version -> API version table.
This is useful if you are connecting to an older
version of Subsonic.
insecure:bool This will allow you to use self signed
certificates when connecting if set to True.
useNetrc:str|bool You can either specify a specific netrc
formatted file or True to use your default
netrc file ($HOME/.netrc).
"""
self._baseUrl = baseUrl
self._hostname = baseUrl.split('://')[1].strip()
self._username = username
self._rawPass = password
self._netrc = None
if useNetrc is not None:
self._process_netrc(useNetrc)
elif username is None or password is None:
raise CredentialError('You must specify either a username/password '
'combination or "useNetrc" must be either True or a string '
'representing a path to a netrc file')
self._port = int(port)
self._apiVersion = apiVersion
self._appName = appName
self._serverPath = serverPath.strip('/')
self._insecure = insecure
self._opener = self._getOpener(self._username, self._rawPass)
# Properties
def setBaseUrl(self, url):
self._baseUrl = url
self._opener = self._getOpener(self._username, self._rawPass)
baseUrl = property(lambda s: s._baseUrl, setBaseUrl)
def setPort(self, port):
self._port = int(port)
port = property(lambda s: s._port, setPort)
def setUsername(self, username):
self._username = username
self._opener = self._getOpener(self._username, self._rawPass)
username = property(lambda s: s._username, setUsername)
def setPassword(self, password):
self._rawPass = password
# Redo the opener with the new creds
self._opener = self._getOpener(self._username, self._rawPass)
password = property(lambda s: s._rawPass, setPassword)
apiVersion = property(lambda s: s._apiVersion)
def setAppName(self, appName):
self._appName = appName
appName = property(lambda s: s._appName, setAppName)
def setServerPath(self, path):
self._serverPath = | path.strip('/')
serverPath = property(lambda s: s._serverPath, setServerPath)
def setInsecure(self, insecure):
self._insecure = insecure
insecure = | property(lambda s: s._insecure, setInsecure)
# API methods
def ping(self):
"""
since: 1.0.0
Returns a boolean True if the server is alive, False otherwise
"""
methodName = 'ping'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
try:
res = self._doInfoReq(req)
except:
return False
if res['status'] == 'ok':
return True
elif res['status'] == 'failed':
exc = getExcByCode(res['error']['code'])
raise exc(res['error']['message'])
return False
def getLicense(self):
"""
since: 1.0.0
Gets details related to the software license
Returns a dict like the following:
{u'license': {u'date': u'2010-05-21T11:14:39',
u'email': u'email@example.com',
u'key': u'12345678901234567890123456789012',
u'valid': True},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getLicense'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getMusicFolders(self):
"""
since: 1.0.0
Returns all configured music folders
Returns a dict like the following:
{u'musicFolders': {u'musicFolder': [{u'id': 0, u'name': u'folder1'},
{u'id': 1, u'name': u'folder2'},
{u'id': 2, u'name': u'folder3'}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getMusicFolders'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getNowPlaying(self):
"""
since: 1.0.0
Returns what is currently being played by all users
Returns a dict like the following:
{u'nowPlaying': {u'entry': {u'album': u"Jazz 'Round Midnight 12",
u'artist': u'Astrud Gilberto',
u'bitRate': 172,
u'contentType': u'audio/mpeg',
u'coverArt': u'98349284',
u'duration': 325,
u'genre': u'Jazz',
u'id': u'2424324',
u'isDir': False,
u'isVideo': False,
u'minutesAgo': 0,
u'parent': u'542352',
u'path': u"Astrud Gilberto/Jazz 'Round Midnight 12/01 - The Girl From Ipanema.mp3",
u'playerId': 1,
u'size': 7004089,
u'suffix': u'mp3',
u'title': u'The Girl From Ipanema',
u'track': 1,
u'username': u'user1',
u'year': 1996}},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getNowPlaying'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getIndexes(self, musicFolderId=None, ifModifiedSince=0):
"""
since: 1.0.0
Returns an indexed structure of all artists
musicFolderId:int If this is specified, it will only return
artists for the given folder ID from
the getMusicFolders call
ifModifiedSince:int If specified, return a result if the artist
collection has changed since the given
unix timestamp
Returns a dict like the following:
{u'indexes': {u'index': [{u'artist': [{u'id': u'29834728934',
u'name': u'A Perfect Circle'},
{u'id': u'238472893',
u'name': u'A Small Good Thing'},
{u'id': u'9327842983',
u'name': u'A Tribe Called Quest'},
{u'id': u'29348729874',
u'name': u'A-Teens, The'},
{u'id': u'298472938',
u'name': u'ABA STRUCTURE'}],
u'lastModified': 1303318347000L},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getIndexes'
viewName = '%s.view' % methodName
q = self._getQueryDict({'musicFolderId': musicFolderId,
'ifModifiedSince': self._ts2milli(ifModifiedSince)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
self._fixLastModified(res)
return res
def get |
# -*- coding: utf-8 -*-
from | __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('universidades', '0001_initial'),
]
operations = [
migrations.AlterField(
| model_name='universidademodel',
name='nome',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='universidademodel',
name='sigla',
field=models.CharField(max_length=32),
preserve_default=True,
),
]
|
from .common import *
INTERNAL_IPS = ['127.0.0.1', ]
CORS_ORIGIN_WHITELIST = (
'localhost:8000',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
Q_CLUSTER = {
'name': 'DjangORM',
'workers': 2,
'timeout': 90,
'retry': 120,
'queue_limit': 50,
'bulk': 10,
'orm': 'default',
'catch_up': False # do not replay missed schedules past
}
| LOGGING = {
'version': 1,
'disable_existing_loggers': | False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'reminders': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'messages': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
} |
CO | NFIG_SCREEN = 'config'
RUNNING_SCREEN = 'running'
SUCCESS_SCREEN = 'success'
ERROR_SCREEN | = 'error'
|
from itertools import combinations
START_P_HP = 100
START_P_DMG = 0
START_P_A = 0
START_B_HP = 100
START_B_DMG = 8
START_B_A = 2
WEAPONS = [ [8,4,0], [10,5,0], [25,6,0], [40,7,0], [74,8,0] ]
ARMOR = [ [13,0,1], [31,0,2], [53,0,3], [75,0,4], [102,0,5] ]
#Include 'no armor' option
ARMOR.append([0,0,0])
RINGS = [ [25,1,0], [50,2,0], [100,3,0], [20,0,1], [40,0,2], [80,0,3] ]
#Include 'no ring' options
RINGS.append([0,0,0])
RINGS.append([0,0,0])
def main():
cost = None
#1 Weapon
for w in combinations(WEAPONS, 1):
#0-1 Armor
for a in combinations(ARMOR, 1):
#0-2 Rings
for r in combinations(RINGS, 2):
bonuses = calc_bonuses(w,a,r)
p_hp = START_P_HP
p_cost = bonuses[0]
p_dmg = bonuses[1] + START_P_DMG
p_a = bonuses[2] + START_P_A
win = is_win(START_B_HP, START_B_DMG, START_B_A, p_hp, p_dmg, p_a) |
#We are seeking to lose the fight, so not win
#We are also looking for highest cost
if not win and (cost is | None or p_cost > cost):
cost = p_cost
print cost
def is_win(b_hp, b_dmg, b_a, p_hp, p_dmg, p_a):
b_dmg = max(b_dmg - p_a, 1)
p_dmg = max(p_dmg - b_a, 1)
#<= because we start first
return (b_hp / p_dmg) <= (p_hp / b_dmg)
def calc_bonuses(w,a,r):
ret = [0, 0, 0]
for i in [w,a,r]:
for j in i:
ret[0] += j[0]
ret[1] += j[1]
ret[2] += j[2]
return ret
if __name__ == "__main__":
main()
|
import datetime
import sys
import pdb
from directory import directory
if False:
pdb.set_trace() # avoid warning message from pyflakes
class Logger(object):
# from stack overflow: how do i duplicat sys stdout to a log file in python
def __init__(self, logfile_path=None, logfile_mode='w', base_name=None):
def path(s):
return directory('log') + s + datetime.datetime.now().isoformat('T') + '.log'
self.terminal = sys.stdout
clean_path = logfile_path.replace(':', '-') if base_name is None else path | (base_name)
self.log = open(clean_path, logfile_mode)
def wri | te(self, message):
self.terminal.write(message)
self.log.write(message)
def flush():
pass
if False:
# usage example
sys.stdout = Logger('path/to/log/file')
# now print statements write on both stdout and the log file
|
"""
Higher order classes and functions for Libvirt Sandbox (lxc) container testing
:copyright: 2013 Red Hat Inc.
"""
import datetime
import time
import logging
import lvsb_base
# This utility function lets test-modules quickly create a list of all
# sandbox aggregate types, themselves containing a list of individual
# sandboxes.
def make_sandboxes(params, env, extra_ns=None):
"""
Return list of instantiated lvsb_testsandboxes classes from params
:param params: an undiluted Params instance
:param env: the current env instance
:param extra_ns: An extra, optional namespace to search for classes
"""
namespace = globals() # stuff in this module
# For specialized sandbox types, allow their class to be defined
# inside test module or elsewhere.
if extra_ns is not None:
namespace.update(extra_ns) # copy in additional symbols
names = namespace.keys()
# Test may require more than one sandbox agregator class
pobs = params.objects('lvsb_testsandboxes') # manditory parameter
# filter out non-TestSandboxes subclasses
for name in names:
try:
if not issubclass(namespace[name], lvsb_base.TestSandboxes):
# Working on name list, okay to modify dict
del namespace[name]
except TypeError:
# Symbol wasn't a class, just ignore it
pass
# Return a list of instantiated sandbox_testsandboxes's classes
return [namespace[type_name](params, env) for type_name in pobs]
# TestSandboxes subclasses defined below, or inside other namespaces like
# a test module. They simply help the test-module iterate over many
# aggregate manager classes and the sandboxes they contain.
class TestSimpleSandboxes(lvsb_base.TestSandboxes):
"""
Simplistic sandbox aggregate manager that just executes a command
"""
def __init__(self, params, env):
"""
Initialize to run, all SandboxCommandBase's
"""
super(TestSimpleSandboxes, self).__init__(params, env)
self.init_sandboxes() # create instances of SandboxCommandBase
# Point all of them at the same local uri
self.for_each(lambda sb: sb.add_optarg('-c', self.uri))
# Use each instances name() method to produce name argument
self.for_each(lambda sb: sb.add_optarg('-n', sb.name))
# Command should follow after a --
self.for_each(lambda sb: sb.add_mm())
# Each one gets the same command (that's why it's simple)
self.for_each(lambda sb: sb.add_pos(self.command))
def results(self, each_timeout=5):
"""
Run sandboxe(s), allowing each_timeout to complete, return output list
"""
# Sandboxes run asynchronously, prevent them from running forever
start = datetime.datetime.now()
total_timeout_seconds = each_timeout * self.count
timeout_at = start + datetime.timedelta(seconds=total_timeout_seconds)
# No need to write a method just to call the run method
self.for_each(lambda sb: sb.run())
while datetime.datetime.now() < timeout_at:
# | Wait until numb | er of running sandboxes is zero
if bool(self.are_running()):
time.sleep(0.1) # Don't busy-wait
continue
else: # none are running
break
# Needed for accurate time in logging message below
end = datetime.datetime.now()
# Needed for logging message if none exited before timeout
still_running = self.are_running()
# Cause all exited sessions to clean up when sb.stop() called
self.for_each(lambda sb: sb.auto_clean(True))
# If raise, auto_clean will make sure cleanup happens
if bool(still_running):
raise lvsb_base.SandboxException("%d of %d sandboxes are still "
"running after "
"the timeout of %d seconds."
% (still_running,
self.count,
total_timeout_seconds))
# Kill off all sandboxes, just to be safe
self.for_each(lambda sb: sb.stop())
logging.info("%d sandboxe(s) finished in %s", self.count,
end - start)
# Return a list of stdout contents from each
return self.for_each(lambda sb: sb.recv())
|
import boto
from boto.swf.exceptions import SWFResponseError
import sure # noqa
from moto import mock_swf_deprecated
# RegisterDomain endpoint
@mock_swf_deprecated
def test_register_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
all_domains = conn.list_domains("REGISTERED")
domain = all_domains["domainInfos"][0]
domain["name"].should.equal("test-domain")
domain["status"].should.equal("REGISTERED")
domain["description"].should.equal("A test domain")
@mock_swf_deprecated
def test_register_already_existing_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
conn.register_domain.when.called_with(
"test-domain", "60", description="A test domain"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_register_with_wrong_parameter_type():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain.when.called_with(
"test-domain", 60, description="A test domain"
).should.throw(SWFResponseError)
# ListDomains endpoint
@mock_swf_deprecated
def test_list_domains_order():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("b-test-domain", "60")
conn.register_domain("a-test-domain", "60")
conn.register_domain("c-test-domain", "60")
all_domains = conn.list_domains("REGISTERED")
names = [domain["name"] for domain in all_domains["domainInfos"]]
names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"])
@mock_swf_deprecated
def test_list_domains_reverse_order():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("b-test-domain", "60")
conn.register_domain("a-test-domain", "60")
conn.register_domain("c-test-domain", "60")
all_domains = conn.list_domains("REGISTERED", reverse_order=True)
names = [domain["name"] for domain in all_domains["domainInfos"]]
names.should.equal(["c-test-domain", "b-test-domain", "a-test-domain"])
# DeprecateDomain endpoint
@mock_swf_deprecated
def test_deprecate_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
conn.deprecate_domain("test-domain")
all_domains = conn.list_domains("DEPRECATED")
domain = all_domains["domainInfos | "][0]
domain["name"].should.equal("test-domain")
@mock_swf_deprecated
def test_deprecate_already_deprecated_domain():
co | nn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
conn.deprecate_domain("test-domain")
conn.deprecate_domain.when.called_with(
"test-domain"
).should.throw(SWFResponseError)
@mock_swf_deprecated
def test_deprecate_non_existent_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.deprecate_domain.when.called_with(
"non-existent"
).should.throw(SWFResponseError)
# DescribeDomain endpoint
@mock_swf_deprecated
def test_describe_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
domain = conn.describe_domain("test-domain")
domain["configuration"][
"workflowExecutionRetentionPeriodInDays"].should.equal("60")
domain["domainInfo"]["description"].should.equal("A test domain")
domain["domainInfo"]["name"].should.equal("test-domain")
domain["domainInfo"]["status"].should.equal("REGISTERED")
@mock_swf_deprecated
def test_describe_non_existent_domain():
conn = boto.connect_swf("the_key", "the_secret")
conn.describe_domain.when.called_with(
"non-existent"
).should.throw(SWFResponseError)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import traceback
from argparse import ArgumentParser
class SRDKRunError(Exception):
def __init__(self, message):
self.msg = message
def run_commands(cmds):
for cmd in cmds:
cmd = u" ".join(cmd)
print("Rhedeg %s" % cmd)
returncode = os.system(cmd)
try:
if returncode != 0:
exception_str = ["Problem yn rhedeg y gorchymyn:", " | %s" % cmd]
raise SRDKRunError(u"\n".join(exception_str))
except SRDKRunError, arg:
print 'Exception:', arg.msg
def train_singleuser(userid, **args):
"""Hyfforddi model acwstig HTK / Train HTK acoustic model"""
srdk_cmds = []
print "SRDK_Train : %s" % userid
if userid :
srdk_cmds.append(["rm -rf resu | lts/" + userid])
srdk_cmds.append(["mkdir -p results/" + userid])
srdk_cmds.append(["SRDK_2_PronunciationDictionary"])
srdk_cmds.append(["SRDK_4_Transcriptions"])
if userid:
srdk_cmds.append(["SRDK_5_CodingAudioData " + userid ])
else:
srdk_cmds.append(["SRDK_5_CodingAudioData"])
srdk_cmds.append(["SRDK_6_FlatStart"])
srdk_cmds.append(["SRDK_7_SilenceModels"])
srdk_cmds.append(["SRDK_8_Realign"])
srdk_cmds.append(["SRDK_9_Triphones"])
srdk_cmds.append(["SRDK_10_TiedStateTriphones"])
srdk_cmds.append(["SRDK_11_TestModels"])
if userid:
srdk_cmds.append(["cp recout.mlf results/" + userid])
#srdk_cmds.append(["SRDK_12_Release"])
run_commands(srdk_cmds)
if __name__ == "__main__":
parser = ArgumentParser(description="Sgript creu model acwstig gyda un gorchymun")
parser.add_argument('-u', '--userid', dest="userid", required=False, help="userid cyfrannwr benodol")
parser.set_defaults(func=train_singleuser)
args=parser.parse_args()
try:
args.func(**vars(args))
except SRDKRunError as e:
print ("\n**SRDK ERROR**\n")
print (e)
|
#!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under | the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it wil | l be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Read samples from a UHD device and write to file formatted as binary
outputs single precision complex float values or complex short values
(interleaved 16 bit signed short integers).
"""
from gnuradio import gr, eng_notation
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
n2s = eng_notation.num_to_str
class rx_cfile_block(gr.top_block):
def __init__(self, options, filename):
gr.top_block.__init__(self)
# Create a UHD device source
if options.output_shorts:
self._u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('sc16'))
self._sink = gr.file_sink(gr.sizeof_short*2, filename)
else:
self._u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
self._sink = gr.file_sink(gr.sizeof_gr_complex, filename)
# Set receiver sample rate
self._u.set_samp_rate(options.samp_rate)
# Set receive daughterboard gain
if options.gain is None:
g = self._u.get_gain_range()
options.gain = float(g.start()+g.stop())/2
print "Using mid-point gain of", options.gain, "(", g.start(), "-", g.stop(), ")"
self._u.set_gain(options.gain)
# Set the subdevice spec
if(options.spec):
self._u.set_subdev_spec(options.spec, 0)
# Set the antenna
if(options.antenna):
self._u.set_antenna(options.antenna, 0)
# Set frequency (tune request takes lo_offset)
if(options.lo_offset is not None):
treq = uhd.tune_request(options.freq, options.lo_offset)
else:
treq = uhd.tune_request(options.freq)
tr = self._u.set_center_freq(treq)
if tr == None:
sys.stderr.write('Failed to set center frequency\n')
raise SystemExit, 1
# Create head block if needed and wire it up
if options.nsamples is None:
self.connect(self._u, self._sink)
else:
if options.output_shorts:
self._head = gr.head(gr.sizeof_short*2, int(options.nsamples))
else:
self._head = gr.head(gr.sizeof_gr_complex, int(options.nsamples))
self.connect(self._u, self._head, self._sink)
input_rate = self._u.get_samp_rate()
if options.verbose:
print "Args: ", options.args
print "Rx gain:", options.gain
print "Rx baseband frequency:", n2s(tr.actual_rf_freq)
print "Rx DDC frequency:", n2s(tr.actual_dsp_freq)
print "Rx Sample Rate:", n2s(input_rate)
if options.nsamples is None:
print "Receiving samples until Ctrl-C"
else:
print "Receving", n2s(options.nsamples), "samples"
if options.output_shorts:
print "Writing 16-bit complex shorts"
else:
print "Writing 32-bit complex floats"
print "Output filename:", filename
def get_options():
usage="%prog: [options] output_filename"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args , [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("", "--samp-rate", type="eng_float", default=1e6,
help="set sample rate (bandwidth) [default=%default]")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option( "-s","--output-shorts", action="store_true", default=False,
help="output interleaved shorts instead of complex floats")
parser.add_option("-N", "--nsamples", type="eng_float", default=None,
help="number of samples to collect [default=+inf]")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output")
parser.add_option("", "--lo-offset", type="eng_float", default=None,
help="set daughterboard LO offset to OFFSET [default=hw default]")
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
if options.freq is None:
parser.print_help()
sys.stderr.write('You must specify the frequency with -f FREQ\n');
raise SystemExit, 1
return (options, args[0])
if __name__ == '__main__':
(options, filename) = get_options()
tb = rx_cfile_block(options, filename)
try:
tb.run()
except KeyboardInterrupt:
pass
|
from django.conf.urls import | patterns, include, url
from django.contrib import admin
from leagueofladders import settings
urlpatterns = patterns('',
url(r'^l/', include('leagueofladder | s.apps.myleague.urls', namespace='myleague')),
url(r'^admin/', include(admin.site.urls)),
url(r'^%s$' % settings.LOGIN_URL[1:], 'django.contrib.auth.views.login'))
|
from .v2015_06_15.operations import StorageAccountsOperations as OperationClass
elif api_version == '2016-01-01':
from .v2016_01_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2017-06-01':
from .v2017_06_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2019-06-01':
from .v2019_06_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import StorageAccountsOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import StorageAccountsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'storage_accounts'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def table(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`TableOperations<azure.mgmt.storage.v2019_06_01.operations.TableOperations>`
* 2020-08-01-preview: :class:`TableOperations<azure.mgmt.storage.v2020_08_01_preview.operations.TableOperations>`
* 2021-01-01: :class:`TableOperations<azure.mgmt.storage.v2021_01_01.operations.TableOperations>`
* 2021-02-01: :class:`TableOperations<azure.mgmt.storage.v2021_02_01.operations.TableOperations>`
* 2021-04-01: :class:`TableOperations<azure.mgmt.storage.v2021_04_01.operations.TableOperations>`
* 2021-06-01: :class:`TableOperations<azure.mgmt.storage.v2021_06_01.operations.TableOperations>`
* 2021-08-01: :class:`TableOperations<azure.mgmt.storage.v2021_08_01.operations.TableOperations>`
"""
api_version = self._get_api_version('table')
if api_version == '2019-06-01':
from .v2019_06_01.operations import TableOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import TableOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import TableOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import TableOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import TableOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import TableOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import TableOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'table'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def table_services(self):
"""Instance depends on the API version:
* 2019-06-01: :class:`TableServicesOperations<azure.mgmt.storage.v2019_06_01.operations.TableServicesOperations>`
* 2020-08-01-preview: :class:`TableServicesOperations<azure.mgmt.storage.v2020_08_01_preview.operations.TableServicesOperations>`
* 2021-01-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_01_01.operations.TableServicesOperations>`
* 2021-02-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_02_01.operations.TableServicesOperations>`
* 2021-04-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_04_01.operations.TableServicesOperations>`
* 2021-06-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_06_01.operations.TableServicesOperations>`
* 2021-08-01: :class:`TableServicesOperations<azure.mgmt.storage.v2021_08_01.operations.TableServicesOperations>`
"""
api_version = self._get_api_version('table_services')
if api_version == '2019-06-01':
from .v2019_06_01.operations import TableServicesOperations as OperationClass
elif api_version == '2020-08-01-preview':
from .v2020_08_01_preview.operations import TableServicesOperations as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import TableServicesOperations as OperationClass
elif api_version == '2021-02-01':
from .v2021_02_01.operations import TableServicesOperations as OperationClass
elif api_version == '2021-04-01':
from .v2021_04_01.operations import TableServicesOperations as OperationClass
elif api_version == '2021-06-01':
from .v2021_06_01.operations import TableServicesOperations as OperationClass
elif api_version == '2021-08-01':
from .v2021_08_01.operations import TableServicesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'table_services'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def usage(self):
"""Instance | depends on the API version:
* 2015-06-15: :class:`Us | ageOperations<azure.mgmt.storage.v2015_06_15.operations.UsageOperations>`
* 2016-01-01: :class:`UsageOperations<azure.mgmt.storage.v2016_01_01.operations.UsageOperations>`
* 2016-12-01: :class:`UsageOperations<azure.mgmt.storage.v2016_12_01.operations.UsageOperations>`
* 2017-06-01: :class:`UsageOperations<azure.mgmt.storage.v2017_06_01.operations.UsageOperations>`
* 2017-10-01: :class:`UsageOperations<azure.mgmt.storage.v2017_10_01.operations.UsageOperations>`
* 2018-02-01: :class:`UsageOperations<azure.mgmt.storage.v2018_02_01.operations.UsageOperations>`
"""
api_version = self._get_api_version('usage')
if api_version == '2015-06-15':
from .v2015_06_15.operations import UsageOperations as OperationClass
elif api_version == '2016-01-01':
from .v2016_01_01.operations import UsageOperations as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import UsageOperations as OperationClass
elif api_version == '2017-06-01':
from .v2017_06_01.operations import UsageOperations as OperationC |
from . uuid64 imp | ort * | |
#!/usr/bin/env python
# take a large pcap and dump the data into a CSV so it can be analysed by something like R.
#
# This version we want to know what the source IP is, what the protocol is and based on those
# peices of info run a function to grab that data and write a line to a CSV file
#
# Ignore all traffic sourced from the self IP, pass self ip as on arg
#
# Parse HTTP data decoded by tshark into additional content.
#
# Prereqs: pyshark, http://kiminewt.github.io/pyshark/
import pyshark, sys, getopt
from datetime import datetime
# input and output files
ifile=''
ofile=''
selfip=''
# read command line args and bail if not complete
if len(sys.argv) != 9:
print("Usage: %s -i input.pcap -o output.csv -s 192.168.100.6 -l l4proto " % sys.argv[0])
exit()
# Use getopt to avoid param order errors
opts, args = getopt.getopt(sys.argv[1:],"i:o:s:l:")
for o, a in opts:
if o == '-i':
ifile=a
elif o == '-o':
ofile=a
elif o == '-s':
selfip=a
elif o == '-l':
l4proto=a
elif o == '-h':
print("Usage: %s -i input.pcap -o output.csv -s 192.168.100.6 -l l4proto" % sys.argv[0])
else:
print("Usage: %s -i input.pcap -o output.csv -s 192.168.100.6 -l l4proto" % sys.argv[0])
# Functions
def evall4plist(plist):
protolist=[]
#plist = plist.strip()
if plist.find(',')!=-1:
protolist = l4proto.split(",")
elif plist.find(' ')!=-1:
protolist = l4proto.split(" ")
else:
protolist.append(plist)
#print "Unexpected error, likely bad characters in list of ports :", sys.exc_info()[0]
protolist= map(lambda x:x.lower(),protolist)
return protolist
def readpcap(pfile):
return pyshark.FileCapture(pfile)
def epochconv(tsstr):
# convert the frame time into iso via epoch, clumsy but works better for excel
# return list so we can have both in the CSV, epoch and friendly
retlist=[]
dtobj=datetime.fromtimestamp(float(tsstr))
retlist.append(str(dtobj).strip())
retlist.append(tsstr.strip())
return retlist
def appendcsv(rlist):
# convert ints and
outputline = ",".join(map(str, rlist))
with open(ofile,"a") as outputfile:
outputfile.write(outputline + "\n")
return
def tcpdecode(lyrlst,l4plist):
if lyrlst._layer_name.lower() in l4plist :
tmplist=[]
tmpdict=lyrlst._all_fields
for key in tmpdict:
tmplist.append(tmpdict[key])
return "#".join(map(str,tmplist))
else:
return
def udpdecode(lyrlst, l4plist):
if lyrlst._layer_name.lower() in l4plist:
tmplist=[]
tmpdict=lyrlst._all_fields
for key in tmpdict:
tmplist.append(tmpdict[key])
return "#".join(map(str,tmplist))
else:
return
def parseTC | P(tpkt):
#print "running par | seTCP"
if len(tpkt.layers) > 3:
# pass to http module
decoded = tcpdecode(tpkt.layers[3],thisproto)
rowlist[8]= str(decoded)
#rowlist[8]= str(tpkt.layers[3]).replace('\n','')
# Complete this section regardless
rowlist[3]= 6
rowlist[4]= str(tpkt.ip.src).strip()
rowlist[5]= int(tpkt.tcp.dstport)
rowlist[6]= int(tpkt.tcp.srcport)
rowlist[7]= str(tpkt.tcp.flags).strip()
tsstr=str(tpkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
rowlist[0]= dtobj.strftime("%Y%m%d")
rowlist[1]= dtobj.strftime("%H:%M:%S.%f")
rowlist[2]= tsstr
return
def parseICMP(ipkt):
#print "running parseICMP"
rowlist[3]= 1
rowlist[4]= str(ipkt.ip.src).strip()
rowlist[5]= int(ipkt.icmp.type)
rowlist[6]= int(ipkt.icmp.code)
tsstr=str(ipkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
rowlist[0]= dtobj.strftime("%Y-%m-%d")
rowlist[1]= dtobj.strftime("%H:%M:%S.%f")
rowlist[2]= tsstr
return
def parseUDP(upkt):
#print "running parseUDP"
if len(upkt.layers) > 3:
# pass to http module
decoded = udpdecode(upkt.layers[3],thisproto)
rowlist[8]= str(decoded)
rowlist[3]= 17
rowlist[4]= str(upkt.ip.src).strip()
rowlist[5]= int(upkt.udp.dstport)
rowlist[6]= int(upkt.udp.srcport)
tsstr=str(upkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
rowlist[0]= dtobj.strftime("%Y-%m-%d")
rowlist[1]= dtobj.strftime("%H:%M:%S.%f")
rowlist[2]= tsstr
return
def parseIPother(ipopkt):
print "running parseIP Other "
rowlist[3]= int(ipopkt.ip.proto)
rowlist[4]= str(ipopkt.ip.src).strip()
tsstr=str(ipopkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
rowlist[0]= dtobj.strftime("%Y-%m-%d")
rowlist[1]= dtobj.strftime("%H:%M:%S.%f")
rowlist[2]= tsstr
return
def protorouter(evalpkt):
# direct
if int(evalpkt.ip.proto) == 6:
parseTCP(evalpkt)
elif int(evalpkt.ip.proto) == 1:
parseICMP(evalpkt)
elif int(evalpkt.ip.proto) == 17:
parseUDP(evalpkt)
else:
parseIPother(evalpkt)
return
def initrow():
# iso-tstamp Date, iso-tstamp Time, epoch-tstamp, proto, src-ip, dest port/type, flag/code, src port, payload decode
rwlist = [str('iso-date'),str('iso-time'),str('epoch-tstamp'),int(6),str('1.2.3.4'),None,None,None,None]
return rwlist
# Main flow
thiscap = readpcap(ifile)
wrstat = True
# cheat making a global
rowlist=[]
thisproto=evall4plist(l4proto)
for pkt in thiscap:
pktsrc = str(pkt.ip.src)
if pktsrc != selfip:
#reinit array
rowlist = initrow()
protorouter(pkt)
appendcsv(rowlist)
|
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _kern():
return cuda.elementwise(
'T cond, T x, T slope', 'T y',
'y = cond >= 0 ? x : (T)(slope * x)', 'lrelu')
class LeakyReLU(function.Function):
"""Leaky rectifier unit."""
def __init__(self, slope=0.2):
self.slope = slope
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward_cpu(self, x):
y = x[0].copy()
y[x[0] < 0] *= self.slope
if self.slope >= 0:
self.retain_inputs(())
self.retain_outputs((0,))
return y,
def forward_gpu(self, x):
y = _kern()(x[0], x[0], self.slope)
if self.slope >= 0:
self.retain_inputs(())
self.retain_outputs((0,))
return y,
def backward_cpu(self, x, gy):
gx = gy[0].copy()
if self.slope >= 0:
y = self.output_data
gx[y[0] < 0] *= self.slope
else:
gx[x[0] | < 0] *= self.slope
return gx,
def backward_gpu(self, x, gy):
if self.slope >= 0:
y = self.output_data
gx = _kern()(y[0], gy[0], self.slope)
else:
gx = _kern()(x[0], gy[0], self.slope)
return gx,
def leaky_relu(x, slope=0.2):
"""Leaky Rectified Linear Unit function.
This function is expr | essed as
.. math:: f(x)=\\max(x, ax),
where :math:`a` is a configurable slope value.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
slope (float): Slope value :math:`a`.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], 'f')
>>> x
array([[-1., 0.],
[ 2., -3.],
[-2., 1.]], dtype=float32)
>>> F.leaky_relu(x, slope=0.2).data
array([[-0.2 , 0. ],
[ 2. , -0.60000002],
[-0.40000001, 1. ]], dtype=float32)
"""
return LeakyReLU(slope)(x)
|
meter
def getTypeNameTuple(s | elf, param):
type = ''
name = ''
for elem in param:
if elem.tag == 'type':
type = noneStr(elem.text)
elif elem.tag == 'name':
name = noneStr(elem.text)
return (type, name)
#
# Retrieve the value of the len tag
def getLen(self, param):
result = None
len = param.attrib.get('len')
if len | and len != 'null-terminated':
# For string arrays, 'len' can look like 'count,null-terminated', indicating that we
# have a null terminated array of strings. We strip the null-terminated from the
# 'len' field and only return the parameter specifying the string count
if 'null-terminated' in len:
result = len.split(',')[0]
else:
result = len
# Spec has now notation for len attributes, using :: instead of platform specific pointer symbol
result = str(result).replace('::', '->')
return result
#
# Generate a VkStructureType based on a structure typename
def genVkStructureType(self, typename):
# Add underscore between lowercase then uppercase
value = re.sub('([a-z0-9])([A-Z])', r'\1_\2', typename)
# Change to uppercase
value = value.upper()
# Add STRUCTURE_TYPE_
return re.sub('VK_', 'VK_STRUCTURE_TYPE_', value)
#
# Struct parameter check generation.
# This is a special case of the <type> tag where the contents are interpreted as a set of
# <member> tags instead of freeform C type declarations. The <member> tags are just like
# <param> tags - they are a declaration of a struct or union member. Only simple member
# declarations are supported (no nested structs etc.)
def genStruct(self, typeinfo, typeName, alias):
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
members = typeinfo.elem.findall('.//member')
# Iterate over members once to get length parameters for arrays
lens = set()
for member in members:
len = self.getLen(member)
if len:
lens.add(len)
# Generate member info
membersInfo = []
for member in members:
# Get the member's type and name
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
cdecl = self.makeCParamDecl(member, 0)
# Process VkStructureType
if type == 'VkStructureType':
# Extract the required struct type value from the comments
# embedded in the original text defining the 'typeinfo' element
rawXml = etree.tostring(typeinfo.elem).decode('ascii')
result = re.search(r'VK_STRUCTURE_TYPE_\w+', rawXml)
if result:
value = result.group(0)
else:
value = self.genVkStructureType(typeName)
# Store the required type value
self.structTypes[typeName] = self.StructType(name=name, value=value)
# Store pointer/array/string info
extstructs = self.registry.validextensionstructs[typeName] if name == 'pNext' else None
membersInfo.append(self.CommandParam(type=type,
name=name,
ispointer=self.paramIsPointer(member),
isconst=True if 'const' in cdecl else False,
iscount=True if name in lens else False,
len=self.getLen(member),
extstructs=extstructs,
cdecl=cdecl,
islocal=False,
iscreate=False,
isdestroy=False,
feature_protect=self.featureExtraProtect))
self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo))
#
# Determine if a struct has an NDO as a member or an embedded member
def struct_contains_ndo(self, struct_item):
struct_member_dict = dict(self.structMembers)
struct_members = struct_member_dict[struct_item]
for member in struct_members:
if self.handle_types.IsNonDispatchable(member.type):
return True
# recurse for member structs, guard against infinite recursion
elif member.type in struct_member_dict and member.type != struct_item:
if self.struct_contains_ndo(member.type):
return True
return False
#
# Return list of struct members which contain, or which sub-structures contain
# an NDO in a given list of parameters or members
def getParmeterStructsWithNdos(self, item_list):
struct_list = set()
for item in item_list:
paramtype = item.find('type')
typecategory = self.type_categories[paramtype.text]
if typecategory == 'struct':
if self.struct_contains_ndo(paramtype.text) == True:
struct_list.add(item)
return struct_list
#
# Return list of non-dispatchable objects from a given list of parameters or members
def getNdosInParameterList(self, item_list, create_func):
ndo_list = set()
if create_func == True:
member_list = item_list[0:-1]
else:
member_list = item_list
for item in member_list:
if self.handle_types.IsNonDispatchable(paramtype.text):
ndo_list.add(item)
return ndo_list
#
# Construct list of extension structs containing handles, or extension structs that share a structextends attribute
# WITH an extension struct containing handles. All extension structs in any pNext chain will have to be copied.
# TODO: make this recursive -- structs buried three or more levels deep are not searched for extensions
def GenerateCommandWrapExtensionList(self):
for struct in self.structMembers:
if (len(struct.members) > 1) and struct.members[1].extstructs is not None:
found = False;
for item in struct.members[1].extstructs:
if item != '' and item not in self.pnext_extension_structs:
self.pnext_extension_structs.append(item)
if item != '' and self.struct_contains_ndo(item) == True:
found = True
if found == True:
for item in struct.members[1].extstructs:
if item != '' and item not in self.extension_structs:
self.extension_structs.append(item)
#
# Returns True if a struct may have a pNext chain containing an NDO
def StructWithExtensions(self, struct_type):
if struct_type in self.struct_member_dict:
param_info = self.struct_member_dict[struct_type]
if (len(param_info) > 1) and param_info[1].extstructs is not None:
for item in param_info[1].extstructs:
if item in self.extension_structs:
return True
return False
#
# Generate pNext handling function
def build_extension_processing_func(self):
# Construct helper functions to build and free pNext extension chains
pnext_proc = ''
pnext_proc += 'void WrapPnextChainHandles(ValidationObject *layer_data, const void *pNext) {\n'
pnext_proc += ' void *cur_pnext = const_cast<void *>(pNext);\n'
pnext_proc += ' while (cur_pnext != NULL) {\n'
pnext_proc += ' VkBaseOutStructure *header = reinterpret_cast<VkBaseOutStructure *>(cur_pnext);\n\n'
pnext_proc += ' switch (header->sType) {\n'
for item in self.pnext_extension |
from | envi.archs.msp430.regs import *
checks = [
# SWPB
(
'DEC r15',
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "8f10", 'data': "" },
{ 'regs': [(REG_R15, 0xbbaa)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "8f10", 'data': "" }
),
] | |
import socket
import nlp
class NLPServer(object):
def __init__(self, ip, port):
self.sock = socket.socket()
self.sock.bind((ip, port))
self.processor = nlp.NLPProcessor()
print "Established Server"
def list | en(self):
import thread
| self.sock.listen(5)
print "Started listening at port."
while True:
c = self.sock.accept()
cli_sock, cli_addr = c
try:
print 'Got connection from', cli_addr
thread.start_new_thread(self.manageRequest, (cli_sock,))
except Exception, Argument:
print Argument
self.sock.close()
quit()
def manageRequest(self, cli_sock):
data = cli_sock.recv(8192)
result = self.processor.processQuestion(data)
cli_sock.send(str(result))
cli_sock.close()
# server = NLPServer('127.0.0.1', 3369)
import sys
server = NLPServer(str(sys.argv[1]), int(sys.argv[2]))
server.listen()
|
from __future__ import division, print_function
import numpy as np
from lmfit.models import VoigtModel
from scipy.signal import argrelmax
import matplotlib.pyplot as plt
def lamda_from_bragg(th, d, n):
return 2 * d * np.sin(th / 2.) / n
def find_peaks(chi, sides=6, intensity_threshold=0):
# Find all potential peaks
preliminary_peaks = argrelmax(chi, order=20)[0]
# peaks must have at least sides pixels of data to work with
preliminary_peaks2 = preliminary_peaks[
np.where(preliminary_peaks < len(chi) - sides)]
# make certain that a peak has a drop off which causes the peak height to
# be more than twice the height at sides pixels away
criteria = chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 + sides]
criteria *= chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 - sides]
criteria *= chi[preliminary_peaks2] >= intensity_threshold
peaks = preliminary_peaks[np.where(criteria)]
left_idxs = peaks - sides
right_idxs = peaks + sides
peak_centers = peaks
left_idxs[left_idxs < 0] = 0
right_idxs[right_idxs > len(chi)] = len(chi)
return left_idxs, right_idxs, peak_centers
def get_wavelength_from_std_tth(x, y, d_spacings, ns, plot=False):
"""
Return the wavelength from a two theta scan of a standard
Parameters
----------
x: ndarray
the two theta coordinates
y: ndarray
the detector intensity
d_spacings: ndarray
the dspacings of the standard
ns: ndarray
the multiplicity of the reflection
plot: bool
If true plot some of the intermediate data
Returns
-------
float:
The average wavelength
float:
The standard deviation of the wavelength
"""
l, r, c = find_peaks(y)
lmfit_centers = []
for lidx, ridx, peak_center in zip(l, r, c):
mod = VoigtModel()
pars = mod.guess(y[lidx: ridx],
x=x[lidx: ridx])
out = mod.fit(y[lidx: ridx], pars,
x=x[lidx: ridx])
lmfit_centers.append(out.values['center'])
lmfit_centers = np.asarray(lmfit_centers)
if plot:
plt.plot(x, y)
plt.plot(x[c], y[c], 'ro')
plt.show()
wavelengths = []
l_peaks = lmfit_centers[lmfit_centers < 0.]
r_peaks = lmfit_centers[lmfit_centers > 0.]
for peak_set in [r_peaks, l_peaks[::-1]]:
for peak_center, d, n in zip(peak_set, d_spacings, ns):
tth = np.deg2rad(np.abs(peak_center))
wavelengths.append(lamda_from_bragg(tth, d, n))
return np.average(wavelengths), np.std(wavelengths)
from bluesky.callbacks import CollectThenCompute
class ComputeWavelength(CollectThenCompute):
"""
Example
-------
>>> cw = ComputeWavelgnth('tth_cal', 'some_detector', d_spacings, ns)
>>> RE(scan(...), cw)
"""
CONVERSION_FACTOR = 12.3984 # keV-Angstroms
def __init__(self, x_name, y_name, d_spacings, ns=None):
self._descriptors = []
self._events = []
self.x_name = x_name
self.y_name = y_name
self.d_spacings = d_spacings
self.wavelength = None
self.wavelength_std = None
if ns is None:
self.ns = np.ones(self.d_spacings.shape)
else:
self.ns = ns
@property
def energy(self):
if self.wavelength is None:
return None
else:
return self.CONVERSION_FACTOR / self.wavelength
def com | pute(self):
x = []
y = []
for event in self._events:
x.append(event['data'][self.x_name])
y.append(event['data'][self.y_name])
x = np.array(x)
y = np.array(y)
self.wavelength, self.wavelength_std = get_wavelength_from_std_tth(x, y, self.d_spacings, self.ns)
print('wavelength', self.wavelength, '+-', self.wavelength_std)
print('energy', self.energy)
|
"""
if __name__ == '__main__':
import os
calibration_file = os.path.join('../../data/LaB6_d.txt')
# step 0 load data
d_spacings = np.loadtxt(calibration_file)
for data_file in ['../../data/Lab6_67p8.chi', '../../data/Lab6_67p6.chi']:
a = np.loadtxt(data_file)
wavechange = []
b = np.linspace(.1, 3, 100)
for dx in b:
x = a[:, 0]
x = np.hstack((np.zeros(1), x))
x = np.hstack((-x[::-2], x))
y = a[:, 1]
y = np.hstack((np.zeros(1), y))
y = np.hstack((y[::-1], y))
x = x[:] + dx
y = y[:]
wavechange.append(get_wavelength_from_std_tth(x, y, d_spacings,
np.ones(d_spacings.shape),
)[0])
plt.plot(b, wavechange)
plt.show()
"""
|
""" This test need a set of pins which can be set as inputs and have no external
pull up or pull down connected.
"""
from machine import Pin
import os
mch = os.uname().machine
if 'LaunchPad' in mch:
pin_map = ['GP24', 'GP12', 'GP14', 'GP15', 'GP16', 'GP17', 'GP28', 'GP8', 'GP6', 'GP30', 'GP31', 'GP3', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
elif 'WiPy' in mch:
pin_map = ['GP23', 'GP24', 'GP12', 'GP13', 'GP14', 'GP9', 'GP17', 'GP28', 'GP22', 'GP8', 'GP30', 'GP31', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
else:
raise Exception('Board not supported!')
def test_noinit():
for p in pin_map:
pin = Pin(p)
pin.value()
def test_pin_read(pull):
# enable the pull resistor on all pins, then read the value
for p in pin_map:
pin = Pin(p, mode=Pin.IN, pull=pull)
for p in pin_map:
print(pin())
def test_pin_af():
for p in pin_map:
for af in Pin(p).alt_list():
if af[1] <= max_af_idx:
Pin(p, mode=Pin.ALT, alt=af[1])
Pin(p, mode=Pin.ALT_OPEN_DRAIN, alt=af[1])
# test un-initialized pins
test_noinit()
# test with pull-up and pull-down
test_pin_read(Pin.PULL_UP)
test_pin_read(Pin.PULL_DOWN)
# test all constructor combinations
pin = Pin(pin_map[0])
pin = Pin(pin_map[0], mode=Pin.IN)
pin = Pin(pin_map[0], mode=Pin.OUT)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OPEN_DRAIN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=None)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.MED_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_DOWN)
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP)
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP)
test_pin_af() # try the entire af range on all pins
# test pin init and printing
pin = Pin(pin_map[0])
pin.init(mode=Pin.IN)
print(pin)
pin.init(Pin.IN, Pin.PULL_DOWN)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
print(pin)
# test value in OUT mode
pin = Pin(pin_map[0], mode=Pin.OUT)
pin.value(0)
pin.toggle() # test toggle
print(pin())
pin.toggle() # test toggle again
print(pin())
# test different value settings
pin(1)
print(pin.value())
pin(0)
print(pin.value())
pin.value(1)
print(pin())
pin.value(0)
print(pin())
# test all getters and setters
pin = Pin(pin_map[0], mode=Pin.OUT)
# mode
print(pin.mode() == Pin.OUT)
pin.mode(Pin.IN)
print(pin.mode() == Pin.IN)
# pull
pin.pull(None)
print(pin.pull() == None)
pin.pull(Pin.PULL_DOWN)
print(pin.pull() == Pin.PULL_DOWN)
# drive
pin.drive(Pin.MED_POWER)
print(pin.drive() == Pin.MED_POWER)
pin.drive(Pin.HIGH_POWER)
print(pin.drive() == Pin.HIGH_POWER)
# id
print(pin.id() == pin_map[0])
# all the next ones MUST raise
try:
pin = Pin(pin_map[0], mode=Pin.OUT, pull=P | in.PULL_UP, drive=pin.IN) # incorrect drive value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.LOW_POWER, pull=Pin.PULL_UP) # incorrect mode value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.HIGH_POWER) # incorrect pull value
except Exception:
print('Exception')
try:
pin = Pin('A0', Pin.OUT, Pin.PULL_DOWN) # incorrect pin id
except Exception:
print('Exception')
try:
pi | n = Pin(pin_map[0], Pin.IN, Pin.PULL_UP, alt=0) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_UP, alt=7) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP, alt=0) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=-1) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=16) # incorrect af
except Exception:
print('Exception')
try:
pin.mode(Pin.PULL_UP) # incorrect pin mode
except Exception:
print('Exception')
try:
pin.pull(Pin.OUT) # incorrect pull
except Exception:
print('Exception')
try:
pin.drive(Pin.IN) # incorrect drive strength
except Exception:
print('Exception')
try:
pin.id('ABC') # id cannot be set
except Exception:
print('Exception')
|
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django set | tings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'su | garcub.settings')
from django.conf import settings # noqa
app = Celery('sugarcub')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
ream_name = 'test-stream'
>>> delete_stream(client, stream_name)
Returns:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ''
results = dict()
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found:
success, err_msg = (
stream_action(
client, stream_name, action='delete', check_mode=check_mode
)
)
if success:
changed = True
if wait:
success, err_msg, results = (
wait_for_status(
client, stream_name, 'DELETING', wait_timeout,
check_mode=check_mode
)
)
err_msg = 'Stream {0} deleted successfully'.format(stream_name)
if not success:
return success, True, err_msg, results
else:
err_msg = (
'Stream {0} is in the process of being deleted'
.format(stream_name)
)
else:
success = True
changed = False
err_msg = 'Stream {0} does not exist'.format(stream_name)
return success, changed, err_msg, results
def start_stream_encryption(client, stream_name, encryption_type='', key_id='',
wait=False, wait_timeout=300, check_mode=False):
"""Start encryption on an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
encryption_type (str): KMS or NONE
key_id (str): KMS key GUID or alias
wait (bool): Wait until Stream is ACTIVE.
default=False
wait_timeout (int): How long to wait until this operation is considered failed.
default=300
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> key_id = 'alias/aws'
>>> encryption_type = 'KMS'
>>> start_stream_encryption(client, stream_name,encryption_type,key_id)
Returns:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ''
params = {
'StreamName': stream_name
}
results = dict()
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found:
success, err_msg = (
stream_encryption_action(
client, strea | m_name, action='start_encryption', encryption_type=encryption_typ | e, key_id=key_id, check_mode=check_mode
)
)
if success:
changed = True
if wait:
success, err_msg, results = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name)
if not success:
return success, True, err_msg, results
else:
err_msg = (
'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name)
)
else:
success = True
changed = False
err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name)
return success, changed, err_msg, results
def stop_stream_encryption(client, stream_name, encryption_type='', key_id='',
wait=True, wait_timeout=300, check_mode=False):
"""Stop encryption on an Amazon Kinesis Stream.
Args:
client (botocore.client.EC2): Boto3 client.
stream_name (str): The name of the kinesis stream.
Kwargs:
encryption_type (str): KMS or NONE
key_id (str): KMS key GUID or alias
wait (bool): Wait until Stream is ACTIVE.
default=False
wait_timeout (int): How long to wait until this operation is considered failed.
default=300
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('kinesis')
>>> stream_name = 'test-stream'
>>> start_stream_encryption(client, stream_name,encryption_type, key_id)
Returns:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ''
params = {
'StreamName': stream_name
}
results = dict()
stream_found, stream_msg, current_stream = (
find_stream(client, stream_name, check_mode=check_mode)
)
if stream_found:
if current_stream.get('EncryptionType') == 'KMS':
success, err_msg = (
stream_encryption_action(
client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode
)
)
elif current_stream.get('EncryptionType') == 'NONE':
success = True
if success:
changed = True
if wait:
success, err_msg, results = (
wait_for_status(
client, stream_name, 'ACTIVE', wait_timeout,
check_mode=check_mode
)
)
err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name)
if not success:
return success, True, err_msg, results
else:
err_msg = (
'Stream {0} is in the process of stopping encryption.'.format(stream_name)
)
else:
success = True
changed = False
err_msg = 'Stream {0} does not exist.'.format(stream_name)
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
shards=dict(default=None, required=False, type='int'),
retention_period=dict(default=None, required=False, type='int'),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
wait=dict(default=True, required=False, type='bool'),
wait_timeout=dict(default=300, required=False, type='int'),
state=dict(default='present', choices=['present', 'absent']),
encryption_type=dict(required=False, choices=['NONE', 'KMS']),
key_id=dict(required=False, type='str'),
encryption_state=dict(required=False, choices=['enabled', 'disabled']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
retention_period = module.params.get('retention_period')
stream_name = module.params.get('name')
shards = module.params.get('shards')
state = module.params.get('state')
tags = module.params.get('tags')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
encryption_type = module.params.get('encryption_type')
key_id = module.params.get('key_id')
encryption_state = module.params.get('encryption_state')
if state == 'present' and not shards:
module.fail_json(msg='Shards is required when state == present.')
if retention_period:
if retention_period < 24:
module.fail_json(msg='Retention period can not be less than 24 hours.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
check_mode = module.check_mode
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='kinesis',
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 16:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0017_auto_20170327_1934'),
]
operations = [
migrations.AlterField(
model_name='tovar',
name='tovar_image',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image1',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара1'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image10',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара10'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image11',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара11'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image2',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара2'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image3',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара3'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image4',
field | =models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара4'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image5',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', v | erbose_name='Изображение товара5'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image6',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара6'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image7',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара7'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image8',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара8'),
),
migrations.AlterField(
model_name='tovar_img',
name='tovar_image9',
field=models.ImageField(blank=True, upload_to='media/products/%Y/%m/%d/', verbose_name='Изображение товара9'),
),
]
|
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIO | NS OF ANY KIND, either express or implied.
See the License for the specif | ic language governing permissions and
limitations under the License.
"""
import logging
import time
import json
import sys
from clicrud.device.generic import generic
def read(queue, finq, ranonceq, **kwargs):
_cli_input = "['command', 'commands', 'listofcommands']"
_command_list = []
_kwargs = {}
_kwargs = kwargs
_output_dict = {}
# _ranonce = False
for key in _kwargs:
if key in _cli_input:
if key == 'command':
_command_list.append(_kwargs.get(key))
if key == 'commands':
for key1 in _kwargs.get('commands'):
_command_list.append(key1)
if key == 'listofcommands':
try:
_command_file = open(_kwargs.get('listofcommands'), 'r')
_output = _command_file.readlines()
_command_file.close()
for line in _output:
line = line.translate(None, '\r\n')
_command_list.append(line)
except:
logging.error("Could not open 'listofcommands' file")
# Build transport
_transport = generic(**_kwargs)
if _transport.err:
finq.put('error')
_transport.close()
return
# Now we want to call each command and put the string output in a list
for index, command in enumerate(_command_list):
_output_dict[command] = _transport.read(command, return_type='string')
if _kwargs['setup']._splash is True:
sys.stdout.write("\r[%4s/%4s] Complete - " % (len(_command_list),
index+1) +
time.strftime("%d-%m-%Y") +
time.strftime("-%H:%M:%S"))
sys.stdout.flush()
# PEP8 Fix
# if _kwargs.has_key('delay'):
if "delay" in _kwargs:
time.sleep(_kwargs['delay'])
# Sets the ranonce bool if triggered once
# if not _ranonce:
# _ranonce = True
# ranonceq.put(True)
ranonceq.put(True)
queue.put(_output_dict)
# If we need to output to a file, let's do that.
# PEP8 Fix
# if _kwargs.has_key('fileoutput'):
if "fileoutput" in _kwargs:
# Create a filename on hostname+date
# Output the _output_dict to it in the right format
_filename = _transport.hostname
_filename += time.strftime("%d-%m-%Y-") + time.strftime("-%H-%M-%S")
try:
f = open(_filename, 'w')
if _kwargs.get('fileformat') == 'json':
f.write(json.dumps(_output_dict))
if _kwargs.get('fileformat') == 'string':
for command in _command_list:
f.write("COMMAND: " + command + "--------------------\r\n")
f.write(_output_dict.get(command) + "\r\n\r\n")
f.close()
except:
logging.error("Could not open/create file for output of commands")
finq.put('completed_run')
_transport.close()
# print _command_list
|
from __future__ import division, absolute_import, print_function
from flask import current_app, Blueprint, jsonify, url_for, request
from idb.helpers.cors import crossdomain
from .common import json_error, idbmodel, logger
this_version = Blueprint(__name__,__name__)
def format_list_item(t,uuid,etag,modified,version,parent):
links = {}
if t in current_app.config["PARENT_MAP"] and parent is not None:
links["".join(current_app.config["PARENT_MAP"][t][:-1])] = url_for(".item",t=current_app.config["PARENT_MAP"][t],u=parent,_external=True)
links["".join(t[:-1])] = url_for(".item",t=t,u=uuid,_external=True)
return {
"idigbio:uuid": uuid,
"idigbio:etag": etag,
"idigbio:dateModified": modified.isoformat(),
"idigbio:version": version,
"idigbio:links": links,
}
def format_item(t,uuid,etag,modified,version,parent,data,siblings,ids):
r = format_list_item(t,uuid,etag,modified,version,parent)
del r["idigbio:links"]["".join(t[:-1])]
for l in r["idigbio:links"]:
r["idigbio:links"][l] = [r["idigbio:links"][l]]
l = {}
if siblings is not None:
for k in siblings:
l[k] = []
for i in siblings[k]:
l[k].append(url_for(".item",t=k,u=i,_external=True))
r["idigbio:data"] = data
r["idigbio:links"].update(l)
r["idigbio:recordIds"] = ids
return r
@this_version.route('/<string:t>/<uuid:u>/<string:st>', methods=['GET','OPTIONS'])
@crossdomain(origin="*")
def subitem(t,u,st):
if not (t in current_app.config["SUPPORTED_TYPES"] and st in current_app.config["SUPPORTED_TYPES"]):
return json_error(404)
limit = request.args.get("limit")
if limit is not None:
limit = int(limit)
else: |
limit = 100
offset = request.args.get("offset")
if offset is not None:
offset = | int(offset)
else:
offset = 0
r = {}
l = [
format_list_item(
st,
v["uuid"],
v["etag"],
v["modified"],
v["version"],
v["parent"],
) for v in idbmodel.get_children_list(str(u), "".join(st[:-1]),limit=limit,offset=offset)
]
r["idigbio:items"] = l
r["idigbio:itemCount"] = idbmodel.get_children_count(str(u), "".join(st[:-1]))
return jsonify(r)
@this_version.route('/<string:t>/<uuid:u>', methods=['GET','OPTIONS'])
@crossdomain(origin="*")
def item(t,u):
if t not in current_app.config["SUPPORTED_TYPES"]:
return json_error(404)
version = request.args.get("version")
v = idbmodel.get_item(str(u), version=version)
if v is not None:
if v["data"] is None:
return json_error(500)
if v["type"] + "s" == t:
r = format_item(
t,
v["uuid"],
v["etag"],
v["modified"],
v["version"],
v["parent"],
v["data"],
v["siblings"],
v["recordids"]
)
return jsonify(r)
else:
return json_error(404)
else:
return json_error(404)
@this_version.route('/<string:t>', methods=['GET','OPTIONS'])
@crossdomain(origin="*")
def list(t):
if t not in current_app.config["SUPPORTED_TYPES"]:
return json_error(404)
limit = request.args.get("limit")
if limit is not None:
limit = int(limit)
else:
limit = 100
offset = request.args.get("offset")
if offset is not None:
offset = int(offset)
else:
offset = 0
r = {}
l = [
format_list_item(
t,
v["uuid"],
v["etag"],
v["modified"],
v["version"],
v["parent"],
) for v in idbmodel.get_type_list("".join(t[:-1]),limit=limit,offset=offset)
]
r["idigbio:items"] = l
r["idigbio:itemCount"] = idbmodel.get_type_count("".join(t[:-1]))
return jsonify(r)
@this_version.route('/', methods=['GET','OPTIONS'])
@crossdomain(origin="*")
def index():
r = {}
for t in current_app.config["SUPPORTED_TYPES"]:
r[t] = url_for(".list",t=t,_external=True)
return jsonify(r)
|
device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._ | device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_st | art_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace or node_stats.node_name == 'RecvTensor':
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in sorted(
alloc_list, key=lambda allocation: allocation[0]):
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def _preprocess_op_time(self, op_time):
"""Update the start and end time of ops in step stats.
Args:
op_time: How the execution time of op is shown in timeline. Possible values
are "schedule", "gpu" and "all". "schedule" will show op from the time it
is scheduled to the end of the scheduling. Notice by the end of its
scheduling its async kernels may not start yet. It is shown using the
default value from step_stats. "gpu" will show op with the execution time
of its kernels on GPU. "all" will show op from the start of its scheduling
to the end of its last kernel.
"""
if op_time == 'schedule':
self._step_stats = self._origin_step_stats
return
self._step_stats = copy.deepcopy(self._origin_step_stats)
# Separate job task and gpu tracer stream
stream_all_stats = []
job_stats = []
for stats in self._step_stats.dev_stats:
if '/stream:all' in stats.device:
stream_all_stats.append(stats)
elif '/job' in stats.device:
job_stats.append(stats)
# Record the start time of the first kernel and the end time of
# the last gpu kernel for all ops.
op_gpu_start = {}
op_gpu_end = {}
for stats in stream_all_stats:
for kernel in stats.node_stats:
name, _ = self._parse_kernel_label(kernel.timeline_label,
kernel.node_name)
start = kernel.all_start_micros
end = kernel.all_start_micros + kernel.all_end_rel_micros
if name in op_gpu_start:
op_gpu_start[name] = min(op_gpu_start[name], start)
op_gpu_end[name] = max(op_gpu_end[name], end)
else:
op_gpu_start[name] = start
op_gpu_end[name] = end
# Update the start and end time of each op according to the op_time
for stats in job_stats:
for op in stats.node_stats:
if op.node_name in op_gpu_start:
end = max(op_gpu_end[op.node_name],
op.all_start_micros + op.all_end_rel_micros)
if op_time == 'gpu':
op.all_start_micros = op_gpu_start[op.node_name]
op.all_end_rel_micros = end - op.all_start_micros
def analyze_step_stats(self,
show_dataflow=True,
show_memory=True,
op_time='schedule'):
"""Analyze the step stats and format it into Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
op_time: (Optional.) How the execution time of op is shown in timeline.
Possible values are "schedule", "gpu" and "all". "schedule" will show op
from the time it is scheduled to the end of the scheduling. Notice by
the end of its scheduling its async kernels may not start yet. It is
shown using the default value from step_stats. "gpu" will show op with
the execution time of its kernels on GPU. "all" will show op from the
start of its scheduling to the end of its last kernel.
Returns:
A 'StepStatsAnalysis' object.
"""
self._preprocess_op_time(op_time)
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
|
import blinker
from concurrency.fields import IntegerVersionField
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from game.apps.core.models.planet.models import Planet
from game.utils.models import ResourceContainer
from game.utils.polymorph import PolymorphicBase
from jsonfield import JSONField
import game.apps.core.signals
class Building(PolymorphicBase):
level = models.IntegerField(default=1)
data = JSONField(default={})
planet = models.ForeignKey(Planet, related_name="buildings")
version = IntegerVersionField()
user = models.ForeignKey(User, related_name="buildings")
def save(self, *args, **kwargs):
signal = blinker.signal(game.apps.core.signals.building % self.id)
signal.send(self, building=self)
super().save(*args, **kwargs)
class Meta:
app_l | abel = 'core'
ordering = ('id', )
class Citadel(Building):
class Meta:
proxy = True
def process_turn(self):
warehouse = self.owner.buildings.filter(type='Warehouse')
warehouse.add_resource("Aluminium", 10)
warehouse.add_resource("Steel", 10)
warehouse.save()
class Warehouse(Building, ResourceCon | tainer):
class Meta:
proxy = True
class Terminal(Building):
class Meta:
proxy = True
class Mine(Building):
class Meta:
proxy = True
#TODO use Django ready()
@receiver(post_save, sender=User, dispatch_uid="create_default_buildings")
def create_default_buildings(sender, **kwargs):
if kwargs['created']:
Citadel.objects.create(user=kwargs['instance'], planet_id=1) # TODO don't hard-code planet id
Warehouse.objects.create(user=kwargs['instance'], planet_id=1) # TODO don't hard-code planet id
def get_base(self):
#TODO cache
return self.buildings.get(type="Base")
User.base = property(get_base) |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'slidersd.ui'
#
# Created: Tue Mar 17 23:31:52 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(392, 74)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label = QtGui.QLabel(Form)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.value = QtGui.QLabel(Form)
self.value.setText(_fromUtf8(""))
self.value.setObjectName(_fromUtf8("value"))
self.horizontalLayout_2.addWidget(self.value)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(10, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label_3 = QtGui.Q | Label(Form)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout.addWidget(self.label_3)
self.horizontalSlider = QtGui.QSlider(Form)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider. | setObjectName(_fromUtf8("horizontalSlider"))
self.horizontalLayout.addWidget(self.horizontalSlider)
self.label_4 = QtGui.QLabel(Form)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout.addWidget(self.label_4)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label.setText(_translate("Form", "a = ", None))
self.label_3.setText(_translate("Form", "-10", None))
self.label_4.setText(_translate("Form", "10", None))
|
"""
Platform for a Generic Modbus Thermostat.
This uses a setpoint and process
value within the controller, so both the current temperature register and the
target temperature register need to be configured.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.modbus/
"""
import logging
import struct
import voluptuous as vol
from homeassistant.const | import (
CONF_NAME, CONF_SLAVE, ATTR_TEMPERATURE)
from homeassistant.components.climate import (
ClimateDevice, PLATFORM_SCHEMA, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.components import | modbus
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['modbus']
# Parameters not defined by homeassistant.const
CONF_TARGET_TEMP = 'target_temp_register'
CONF_CURRENT_TEMP = 'current_temp_register'
CONF_DATA_TYPE = 'data_type'
CONF_COUNT = 'data_count'
CONF_PRECISION = 'precision'
DATA_TYPE_INT = 'int'
DATA_TYPE_UINT = 'uint'
DATA_TYPE_FLOAT = 'float'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SLAVE): cv.positive_int,
vol.Required(CONF_TARGET_TEMP): cv.positive_int,
vol.Required(CONF_CURRENT_TEMP): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_FLOAT):
vol.In([DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT]),
vol.Optional(CONF_COUNT, default=2): cv.positive_int,
vol.Optional(CONF_PRECISION, default=1): cv.positive_int
})
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus Thermostat Platform."""
name = config.get(CONF_NAME)
modbus_slave = config.get(CONF_SLAVE)
target_temp_register = config.get(CONF_TARGET_TEMP)
current_temp_register = config.get(CONF_CURRENT_TEMP)
data_type = config.get(CONF_DATA_TYPE)
count = config.get(CONF_COUNT)
precision = config.get(CONF_PRECISION)
add_entities([ModbusThermostat(name, modbus_slave,
target_temp_register, current_temp_register,
data_type, count, precision)], True)
class ModbusThermostat(ClimateDevice):
"""Representation of a Modbus Thermostat."""
def __init__(self, name, modbus_slave, target_temp_register,
current_temp_register, data_type, count, precision):
"""Initialize the unit."""
self._name = name
self._slave = modbus_slave
self._target_temperature_register = target_temp_register
self._current_temperature_register = current_temp_register
self._target_temperature = None
self._current_temperature = None
self._data_type = data_type
self._count = int(count)
self._precision = precision
self._structure = '>f'
data_types = {DATA_TYPE_INT: {1: 'h', 2: 'i', 4: 'q'},
DATA_TYPE_UINT: {1: 'H', 2: 'I', 4: 'Q'},
DATA_TYPE_FLOAT: {1: 'e', 2: 'f', 4: 'd'}}
self._structure = '>{}'.format(data_types[self._data_type]
[self._count])
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def update(self):
"""Update Target & Current Temperature."""
self._target_temperature = self.read_register(
self._target_temperature_register)
self._current_temperature = self.read_register(
self._current_temperature_register)
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temperature = kwargs.get(ATTR_TEMPERATURE)
if target_temperature is None:
return
byte_string = struct.pack(self._structure, target_temperature)
register_value = struct.unpack('>h', byte_string[0:2])[0]
try:
self.write_register(self._target_temperature_register,
register_value)
except AttributeError as ex:
_LOGGER.error(ex)
def read_register(self, register):
"""Read holding register using the modbus hub slave."""
try:
result = modbus.HUB.read_holding_registers(self._slave, register,
self._count)
except AttributeError as ex:
_LOGGER.error(ex)
byte_string = b''.join(
[x.to_bytes(2, byteorder='big') for x in result.registers])
val = struct.unpack(self._structure, byte_string)[0]
register_value = format(val, '.{}f'.format(self._precision))
return register_value
def write_register(self, register, value):
"""Write register using the modbus hub slave."""
modbus.HUB.write_registers(self._slave, register, [value, 0])
|
# -*- coding: utf-8 -*-
from greek_stemmer.c | losets.word_exceptions impo | rt exceptions
def test_word_exceptions():
assert isinstance(exceptions, dict)
|
calculations
'''
def __init__(self, files):
super(VaspParser, self).__init__(files)
self.settings = {}
parser = OutcarParser()
# Find the outcar file
def _find_file(name):
"""Find a filename that contains a certain string"""
name = name.upper()
my_file = None
for f in self._files:
if os.path.basename(f).upper().startswith(name):
if my_file is not None:
raise InvalidIngesterException('Found more than one {} file'.format(name))
my_file = f
return my_file
self.outcar = _find_file('OUTCAR')
if self.outcar is None:
raise InvalidIngesterException('OUTCAR not found!')
with open(self.outcar, "r") as fr:
for parsed_line in parser.parse(fr.readlines()):
for k, v in parsed_line.items():
if k in self.settings:
self.settings[k].append(v)
else:
self.settings[k] = [v]
# Find the DOSCAR, EIGENVAL, and INCAR files
# None of these are required so we do not throw exceptions
self.incar = _find_file('INCAR')
self.poscar = _find_file('POSCAR')
self.doscar = _find_file('DOSCAR')
self.eignval = _find_file('EIGNVAL')
def get_name(self): return "VASP"
def get_output_structure(self):
self.atoms = read_vasp_out(self.outcar)
return self.atoms
def get_outcar(se | lf):
raw_path = self.outcar
if raw_path[0:2] == "./":
raw_path = raw_path[2:]
retu | rn Property(files=[FileReference(
relative_path=raw_path
)])
def get_incar(self):
if self.incar is None: return None
raw_path = self.incar
if raw_path[0:2] == "./":
raw_path = raw_path[2:]
return Value(files=[FileReference(
relative_path=raw_path
)])
def get_poscar(self):
if self.poscar is None: return None
raw_path = self.poscar
if raw_path[0:2] == "./":
raw_path = raw_path[2:]
return Value(files=[FileReference(
relative_path=raw_path
)])
def get_cutoff_energy(self):
# Open up the OUTCAR
with open(self.outcar, 'r') as fp:
# Look for ENCUT
for line in fp:
if "ENCUT" in line:
words = line.split()
return Value(scalars=[Scalar(value=float(words[2]))], units=words[3])
# Error handling: ENCUT not found
raise Exception('ENCUT not found')
@Value_if_true
def uses_SOC(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
#look for LSORBIT
for line in fp:
if "LSORBIT" in line:
words = line.split()
return words[2] == 'T'
# Error handling: LSORBIT not found
raise Exception('LSORBIT not found')
@Value_if_true
def is_relaxed(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
# Look for NSW
for line in fp:
if "NSW" in line:
words = line.split()
return int(words[2]) != 0
# Error handling: NSW not found
raise Exception('NSW not found')
def get_xc_functional(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
# Look for TITEL
for line in fp:
if "TITEL" in line:
words = line.split()
return Value(scalars=[Scalar(value=words[2])])
def get_pp_name(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
#initialize empty list to store pseudopotentials
pp = []
# Look for TITEL
for line in fp:
if "TITEL" in line:
words = line.split()
pp.append(words[3])
return Value(vectors=[[Scalar(value=x) for x in pp]])
def get_KPPRA(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
#store the number of atoms and number of irreducible K-points
for line in fp:
if "number of ions NIONS =" in line:
words = line.split()
NI = int(words[11])
elif "k-points NKPTS =" in line:
words = line.split()
NIRK = float(words[3])
#check if the number of k-points was reduced by VASP if so, sum all the k-points weight
if "irreducible" in open(self.outcar).read():
fp.seek(0)
for line in fp:
#sum all the k-points weight
if "Coordinates Weight" in line:
NK=0; counter = 0
for line in fp:
if counter == NIRK:
break
NK += float(line.split()[3])
counter += 1
return Value(scalars=[Scalar(value=NI*NK)])
#if k-points were not reduced KPPRA equals the number of atoms * number of irreducible k-points
else:
return Value(scalars=[Scalar(value=NI*NIRK)])
def _is_converged(self):
# Follows the procedure used by qmpy, but without reading the whole file into memory
# Source: https://github.com/wolverton-research-group/qmpy/blob/master/qmpy/analysis/vasp/calculation.py
with open(self.outcar) as fp:
# Part 1: Determine the NELM
nelm = None
for line in fp:
if line.startswith(" NELM ="):
nelm = int(line.split()[2][:-1])
break
# If we don't find it, tell the user
if nelm is None:
raise Exception('NELM not found. Cannot tell if this result is converged')
# Now, loop through the file. What we want to know is whether the last ionic
# step of this file terminates because it converges or because we hit NELM
re_iter = re.compile('([0-9]+)\( *([0-9]+)\)')
converged = False
for line in fp:
# Count the ionic steps
if 'Iteration' in line:
ionic, electronic = map(int, re_iter.findall(line)[0])
# If the loop is finished, mark the number of electronic steps
if 'aborting loop' in line:
converged = electronic < nelm
return converged
def get_total_energy(self):
with open(self.outcar) as fp:
last_energy = None
for line in fp:
if line.startswith(' free energy TOTEN'):
last_energy = float(line.split()[4])
if last_energy is None:
return None
return Property(scalars=[Scalar(value=last_energy)], units='eV')
def get_version_number(self):
# Open up the OUTCAR
with open(self.outcar) as fp:
#look for vasp
for line in fp:
if "vasp" in line:
words = line.split()
return (words[0].strip('vasp.'))
break
# Error handling: vasp not found
raise Exception('vasp not found')
def get_U_settings(self):
#Open up the OUTCAR
with open(self.outcar) as fp:
#Check if U is used
if "LDAU" in open(self.outcar).read():
U_param = {}
atoms = []
#get the list of pseupotential used
for line in fp:
if "TITEL" in line:
atoms.append(line.split()[3])
#Get th |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reading, writing, and describing memory.
"""
import gdb
import pwndbg.compat
import pwndbg.typeinfo
PAGE_SIZE = 0x1000
PAGE_MASK = ~(PAGE_SIZE-1)
MMAP_MIN_ADDR = 0x8000
def read(addr, count, partial=False):
result = ''
try:
result = gdb.selected_inferior().read_memory(addr, count)
except gdb.error as e:
if not partial:
raise
stop_addr = int(e.message.split()[-1], 0)
if stop_addr != addr:
return read(addr, stop_addr-addr)
# QEMU will return the start address as the failed
# read address. Try moving back a few pages at a time.
stop_addr = addr + count
# Move the stop address down to the previous page boundary
stop_addr &= PAGE_MASK
while stop_addr > addr:
result = read(addr, stop_addr-addr)
if result:
return result
# Move down by another page
stop_addr -= PAGE_SIZE
if pwndbg.compat.python3:
result = result.tobytes()
return bytearray(result)
def readtype(gdb_type, addr):
return int(gdb.Value(addr).cast(gdb_type.pointer()).dereference())
def write(addr, data):
gdb.selected_inferior().write_memory(addr, data)
def peek(address):
try: return read(address, 1)
except: pass
return None
def poke(address):
c = peek(address)
if c is None: return False
try: write(address, c)
except: return False
return True
def byte(addr): return readtype(pwndbg.typeinfo.uchar, addr)
def uchar(addr): return readtype(pwndbg.typeinfo.uchar, addr)
def ushort(addr): return readtype(pwndbg.typeinfo.ushort, addr)
def uint(addr): return readtype(pwndbg.typeinfo.uint, addr)
def pvoid(addr): return readtype(pwndbg.typeinfo.pvoid, addr)
def u8(addr): return readtype(pwndbg.typeinfo.uint8, addr)
def u16(addr): return readtype(pwndbg.typeinfo.uint16, addr)
def u32(addr): return readtype(pwndbg.typeinfo.uint32, addr)
def u64(addr): return readtype(pwndbg.typeinfo.uint64, addr)
def u(addr, size):
return {
8: u8,
16: u16,
32: u32,
64: u64
}[size](addr)
def s8(addr): return readtype(pwndbg.typeinfo.int8, addr)
def s16(addr): return readtype(pwndbg.typeinfo.int16, addr)
def s32(addr): return readtype(pwndbg.typeinfo.int32, addr)
def s64(addr): return readtype(pwndbg.typeinfo.int64, addr)
def write(addr, data):
gdb.selected_inferior().write_memory(addr, data)
def poi(type, addr): return gdb.Value(addr).cast(type.pointer()).dereference()
def round_down(address, align): return address & ~(align-1)
def round_up(address, align): return (address+(align-1))&(~(align-1))
align_down = round_down
align_up = round_up
def page_align(address): return round_down(address, PAGE_SIZE)
def page_size_align(address): return round_up(address, PAGE_SIZE)
def page_offset(address): return (address & (PAGE_SIZE-1))
assert round_down(0xdeadbeef, 0x1000) == 0xdeadb000
assert round_up(0xdeadbeef, 0x1000) == 0xdeadc000
def find_upper_boundary(addr, max_pages=1024):
addr = pwndbg.memory.page_align(int(addr))
try:
for i in range(max_pages):
pwndbg.memory.read(addr, 1)
import sys
sys.stdout.write(hex(addr) + '\n')
addr += pwndbg.memory.PAGE_SIZE
except gdb.MemoryError:
pass
return addr
def find_lower_boundary(addr, max_pages=1024):
addr = pwndbg.memory.page_align(int(addr))
try:
for i in range(max_pages):
pwndbg.memory.read(addr, 1)
addr -= pwndbg.memory.PAGE_SIZE
except gdb.MemoryError:
pass
return addr
class Page(object):
"""
Represents the address space and page permissions of at least
one page of memory.
"""
vaddr = 0 #: Starting virtual address
memsz = 0 #: Size of the address space, in bytes
flags = 0 #: Flags set by the ELF file, see PF_X, PF_R, PF_W
offset = 0 #: Offset into the original ELF file that the data is loaded from
objfile = '' #: Path to the ELF on disk
def __init__(self, start, size, flags, offset, objfile=''):
self.vaddr = start
self.memsz = size
self.flags = flags
self.offset = offset
self.objfile = objfile
# if self.rwx:
# self.flags = self.flags ^ 1
@property
def read(self):
return bool(self.flags & 4)
@property
def write(self):
return bool(self.flags & 2)
@property
def execute(self):
return bool(self.flags & 1)
@property
def rw(self):
return self.read and self.write
@property
def rwx(self):
return self.read and self.write and self.execute
@property
def permstr(self):
flags = self.flags
return ''.join(['r' if flags & 4 else '-',
'w' if flags & 2 else '-',
'x' if flags & 1 else '-',
'p'])
def __str__(self):
width = 2 + 2*pwndbg.typeinfo.ptrsize
fmt_string = "%#{}x %#{}x %s %8x %-6x %s"
fmt_string = fmt_string.format(width, width)
return fmt_string % (self.vaddr,
self.vaddr+self.memsz,
| self.permstr,
self.memsz,
self.offset,
self.objfile or '')
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.__str__())
def __contains__(self, a):
return self.vad | dr <= a < (self.vaddr + self.memsz)
def __eq__(self, other):
return self.vaddr == getattr(other, 'vaddr', other)
def __lt__(self, other):
return self.vaddr < getattr(other, 'vaddr', other)
def __hash__(self):
return hash((self.vaddr, self.memsz, self.flags, self.offset, self.objfile))
|
>
# Jeremy Katz <katzj@redhat.com>
# Chris Lumens <clumens@redhat.com>
# Paul Nasrat <pnasrat@redhat.com>
# Erik Troan <ewt@rpath.com>
# Matt Wilson <msw@rpath.com>
#
import os
import sys
import stat
from glob import glob
from tempfile import mkstemp
import threading
f | rom pyanaconda.bootloader import get_bootloader
from pyanaconda import constants
from pyanaconda import iutil
from pyanaconda.iutil import open # pylint: disable=redefined-builtin
from pyanaconda import addons
import logging
log = logging.getLogger("anaconda")
stdoutLog = logging.getLogger("anaconda.stdout")
class Anaconda(object):
def __init__(self):
from pyanaconda import desktop
| self._bootloader = None
self.canReIPL = False
self.desktop = desktop.Desktop()
self.dir = None
self.displayMode = None
self.gui_startup_failed = False
self.id = None
self._instClass = None
self._intf = None
self.isHeadless = False
self.ksdata = None
self.mediaDevice = None
self.methodstr = None
self.opts = None
self._payload = None
self.proxy = None
self.proxyUsername = None
self.proxyPassword = None
self.reIPLMessage = None
self.rescue_mount = True
self.rootParts = None
self.stage2 = None
self._storage = None
self.updateSrc = None
self.mehConfig = None
# *sigh* we still need to be able to write this out
self.xdriver = None
# Data for inhibiting the screensaver
self.dbus_session_connection = None
self.dbus_inhibit_id = None
# This is used to synchronize Gtk.main calls between the graphical
# interface and error dialogs. Whoever gets to their initialization code
# first will lock gui_initializing
self.gui_initialized = threading.Lock()
@property
def bootloader(self):
if not self._bootloader:
self._bootloader = get_bootloader()
return self._bootloader
@property
def instClass(self):
if not self._instClass:
from pyanaconda.installclass import DefaultInstall
self._instClass = DefaultInstall()
return self._instClass
def _getInterface(self):
return self._intf
def _setInterface(self, v):
# "lambda cannot contain assignment"
self._intf = v
def _delInterface(self):
del self._intf
intf = property(_getInterface, _setInterface, _delInterface)
@property
def payload(self):
# Try to find the packaging payload class. First try the install
# class. If it doesn't give us one, fall back to the default.
if not self._payload:
klass = self.instClass.getBackend()
if not klass:
from pyanaconda.flags import flags
if self.ksdata.ostreesetup.seen:
from pyanaconda.packaging.rpmostreepayload import RPMOSTreePayload
klass = RPMOSTreePayload
elif flags.livecdInstall:
from pyanaconda.packaging.livepayload import LiveImagePayload
klass = LiveImagePayload
elif self.ksdata.method.method == "liveimg":
from pyanaconda.packaging.livepayload import LiveImageKSPayload
klass = LiveImageKSPayload
else:
from pyanaconda.packaging.dnfpayload import DNFPayload
klass = DNFPayload
self._payload = klass(self.ksdata)
return self._payload
@property
def protected(self):
specs = []
if os.path.exists("/run/initramfs/livedev") and \
stat.S_ISBLK(os.stat("/run/initramfs/livedev")[stat.ST_MODE]):
specs.append(os.readlink("/run/initramfs/livedev"))
if self.methodstr and self.methodstr.startswith("hd:"):
specs.append(self.methodstr[3:].split(":", 3)[0])
if self.stage2 and self.stage2.startswith("hd:"):
specs.append(self.stage2[3:].split(":", 3)[0])
# zRAM swap devices need to be protected
for zram_dev in glob("/dev/zram*"):
specs.append(zram_dev)
return specs
@property
def storage(self):
if not self._storage:
import blivet
import blivet.arch
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
self._storage = blivet.Blivet(ksdata=self.ksdata)
if self.instClass.defaultFS:
self._storage.setDefaultFSType(self.instClass.defaultFS)
if blivet.arch.isS390():
# want to make sure s390 plugin is loaded
if "s390" not in blockdev.get_available_plugin_names():
plugin = blockdev.PluginSpec()
plugin.name = blockdev.Plugin.S390
plugin.so_name = None
blockdev.reinit([plugin], reload=False)
return self._storage
def dumpState(self):
from meh import ExceptionInfo
from meh.dump import ReverseExceptionDump
from inspect import stack as _stack
from traceback import format_stack
# Skip the frames for dumpState and the signal handler.
stack = _stack()[2:]
stack.reverse()
exn = ReverseExceptionDump(ExceptionInfo(None, None, stack),
self.mehConfig)
# gather up info on the running threads
threads = "\nThreads\n-------\n"
# Every call to sys._current_frames() returns a new dict, so it is not
# modified when threads are created or destroyed. Iterating over it is
# thread safe.
for thread_id, frame in sys._current_frames().items():
threads += "\nThread %s\n" % (thread_id,)
threads += "".join(format_stack(frame))
# dump to a unique file
(fd, filename) = mkstemp(prefix="anaconda-tb-", dir="/tmp")
dump_text = exn.traceback_and_object_dump(self)
dump_text += threads
dump_text_bytes = dump_text.encode("utf-8")
iutil.eintr_retry_call(os.write, fd, dump_text_bytes)
iutil.eintr_ignore(os.close, fd)
# append to a given file
with open("/tmp/anaconda-tb-all.log", "a+") as f:
f.write("--- traceback: %s ---\n" % filename)
f.write(dump_text + "\n")
def initInterface(self, addon_paths=None):
if self._intf:
raise RuntimeError("Second attempt to initialize the InstallInterface")
if self.displayMode == 'g':
from pyanaconda.ui.gui import GraphicalUserInterface
# Run the GUI in non-fullscreen mode, so live installs can still
# use the window manager
self._intf = GraphicalUserInterface(self.storage, self.payload,
self.instClass, gui_lock=self.gui_initialized,
fullscreen=False)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="gui")
elif self.displayMode in ['t', 'c']: # text and command line are the same
from pyanaconda.ui.tui import TextUserInterface
self._intf = TextUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="tui")
else:
raise RuntimeError("Unsupported displayMode: %s" % self.displayMode)
if addon_paths:
self._intf.update_paths(addon_paths)
def writeXdriver(self, root=None):
# this should go away at some point, b |
"""
WSGI config for Footer project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATI | ON`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from dja | ngo.core.wsgi import get_wsgi_application
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ast
import os.path
import platform
import re
import sys
class Config(object):
'''A Config contains a dictionary that species a build configuration.'''
# Valid values for target_os:
OS_ANDROID = 'android'
OS_CHROMEOS = 'chromeos'
OS_LINUX = 'linux'
OS_MAC = 'mac'
OS_WINDOWS = 'windows'
# Valid values for target_cpu:
ARCH_X86 = 'x86'
ARCH_X64 = 'x64'
ARCH_ARM = 'arm'
def __init__(self, build_dir=None, target_os=None, target_cpu=None,
is_debug=None, is_verbose=None, apk_name='MojoRunner.apk'):
'''Function arguments take precedence over GN args and default values.'''
assert target_os in (None, Config.OS_ANDROID, Config.OS_CHROMEOS,
Config.OS_LINUX, Config.OS_MAC, Config.OS_WINDOWS)
assert target_cpu in (None, Config.ARCH_X86, Config.ARCH_X64,
Config.ARCH_ARM)
assert is_debug in (None, True, False)
assert is_verbose in (None, True, False)
self.values = {
'build_dir': build_dir,
'target_os': self.GetHostOS(),
'target_cpu': self.GetHostCPU(),
'is_debug': True,
'is_verbose': True,
'dcheck_always_on': False,
'is_asan': False,
'apk_name': apk_name,
}
self._ParseGNArgs()
if target_os is not None:
self.values['target_os'] = target_os
if target_cpu is not None:
self.values['target_cpu'] = target_cpu
if is_debug is not None:
self.values['is_debug'] = is_debug
if is_verbose is not None:
self.values['is_verbose'] = is_verbose
@staticmethod
def GetHostOS():
if sys.platform == 'linux2':
return Config.OS_LINUX
if sys.platform == 'darwin':
return Config.OS_MAC
if sys.platform == 'win32':
return Config.OS_WINDOWS
raise NotImplementedError('Unsupported host OS')
@staticmethod
def GetHostCPU():
# Derived from //native_client/pynacl/platform.py
machine = platform.machine()
if machine in ('x86', 'x86-32', 'x86_32', 'x8632', 'i386', 'i686', 'ia32',
'32'):
return Config.ARCH_X86
if machine in ('x86-64', 'amd64', 'AMD64', 'x86_64', 'x8664', '64'):
return Config.ARCH_X64
if machine.startswith('arm'):
return Config.ARCH_ARM
raise Exception('Cannot identify CPU arch: %s' % machine)
def _ParseGNArgs(self):
'''Parse the gn config file from the build directory, if it exists.'''
TRANSLATIONS = { 'true': 'True', 'false': 'False', }
if self.values['build_dir'] is None:
return
gn_file = os.path.join(self.values['build_dir'], 'args.gn | ')
if not os.path.isfile(gn_file):
return
with open(gn_file, 'r') as f:
for line in f:
line = re.sub('\s*#.*', '', line)
result = re.match('^\s*(\w+)\s*=\s*(.*)\s*$', line)
if result:
key = result.group(1)
value = result.group(2)
self.values[key] = ast.literal_eval(TRANSLATIONS.get( | value, value))
# Getters for standard fields ------------------------------------------------
@property
def build_dir(self):
'''Build directory path.'''
return self.values['build_dir']
@property
def target_os(self):
'''OS of the build/test target.'''
return self.values['target_os']
@property
def target_cpu(self):
'''CPU arch of the build/test target.'''
return self.values['target_cpu']
@property
def is_debug(self):
'''Is Debug build?'''
return self.values['is_debug']
@property
def is_verbose(self):
'''Should print additional logging information?'''
return self.values['is_verbose']
@property
def dcheck_always_on(self):
'''DCHECK is fatal even in release builds'''
return self.values['dcheck_always_on']
@property
def is_asan(self):
'''Is ASAN build?'''
return self.values['is_asan']
@property
def apk_name(self):
'''Name of the APK file to run'''
return self.values['apk_name']
|
import datetime
import psycopg2
from collections import deque
from tornado.stack_context import wrap
from tornado.ioloop import IOLoop
from tornado.concurrent import return_future
class WrapCursor:
def __init__(self,db,cur):
self._db = db
self._cur = cur
self._oldcur = None
self._init_member()
def __iter__(self):
return self._cur
@return_future
def execute(self,sql,param = None,callback = None):
def _cb(err = None):
if err != None:
raise err
self.arraysize = self._cur.arraysize
self.itersize = self._cur.itersize
self.rowcount = self._cur.rowcount
self.rownumber = self._cur.rownumber
self.lastrowid = self._cur.lastrowid
self.query = self._cur.query
self.statusmessage = self._cur.statusmessage
callback()
self._db._execute(self._cur,sql,param,_cb)
@return_future
def begin(self,callback):
def _cur_cb(cur,err = None):
if err != None:
self._db._end_tran(cur)
raise err
self._db._execute(cur,'BEGIN;',callback =
lambda err : _exec_cb(cur,err))
def _exec_cb(cur,err = None):
if err != None:
self._db._end_tran(cur)
raise err
self._oldcur = self._cur
self._cur = cur
callback()
assert(self._oldcur == None)
self._db._begin_tran(_cur_cb)
@return_future
def commit(self,callback):
def _cb(err = None):
if err != None:
raise err
self._db._end_tran(self._cur)
self._cur = self._oldcur
self._oldcur = None
callback()
assert(self._oldcur != None)
self._db._execute(self._cur,'COMMIT;',callback = _cb)
@return_future
def rollback(self,callback):
def _cb(err = None):
if err != None:
raise err
self._db._end_tran(self._cur)
self._cur = self._oldcur
self._oldcur = None
callback()
assert(self._oldcur != None)
self._db._execute(self._cur,'ROLLBACK;',callback = _cb)
def _init_member(self):
self.fetchone = self._cur.fetchone
self.fetchmany = self._cur.fetchmany
self.fetchall = self._cur.fetchall
self.scroll = self._cur.scroll
self.cast = self._cur.cast
self.tzinfo_factory = self._cur.tzinfo_factory
self.arraysize = 0
self.itersize = 0
self.rowcount = 0
self.rownumber = 0
self.lastrowid = None
self.query = ''
self.statusmessage = ''
class AsyncPG:
def __init__(self,dbname,dbuser,dbpasswd,
dbschema = 'public',dbtz = '+0'):
self.INITCONN_SHARE = 4
self.INITCONN_FREE = 16
self.OPER_CURSOR = 0
self.OPER_EXECUTE = 1
self._ioloop = IOLoop.instance()
self._dbname = dbname
self._dbuser = dbuser
self._dbpasswd = dbpasswd
self._dbschema = dbschema
self._dbtz = dbtz
self._share_connpool = []
self._free_connpool = []
self._conn_fdmap = {}
class _InfDateAdapter:
def __init__(self,wrapped):
self.wrapped = wrapped
def getquoted(self):
if self.wrapped == datetime.datetime.max:
return b"'infinity'::date"
elif self.wrapped == datetime.datetime.min:
return b"'-infinity'::date"
else:
return psycopg2.extensions.TimestampFromPy(
self.wrapped).getquoted()
psycopg2.extensions.register_adapter(datetime.datetime,_InfDateAdapter)
for i in range(self.INITCONN_SHARE):
conn = self._create_conn()
self._share_connpool.append(conn)
self._ioloop.add_handler(conn[0],self._dispatch,IOLoop.ERROR)
conn[2] = True
self._ioloop.add_callback(self._dispatch,conn[0],0)
for i in range(self.INITCONN_FREE):
conn = self._create_conn()
self._free_connpool.append(conn)
self._ioloop.add_handler(conn[0],self._dispatch,IOLoop.ERROR)
conn[2] = True
self._ioloop.add_callback(self._dispatch,conn[0],0)
@return_future
def cursor(self,callback):
def _cb(cur,err = None):
if err != None:
raise err
callback(WrapCursor(self,cur))
self._cursor(callback = _cb)
def _cursor(self,conn = None,callback = None):
def _cb(err = None):
if err != None:
callback(None,err)
callback(conn[4].cursor())
if conn == None | :
conn = self._share_connpool[
random.randrange(len(self._share_connpool))]
conn[1].append((self.OPER_CURSOR,None,wrap(_cb)))
if conn[2] == False:
conn[2] = True
self._ioloop.add_callback(self._dispatch,conn[0],0)
def _execute(self,cur,sql,param = (),callback = None):
conn = self._conn_fdmap[ | cur.connection.fileno()]
conn[1].append((self.OPER_EXECUTE,(cur,sql,param),wrap(callback)))
if conn[2] == False:
conn[2] = True
self._ioloop.add_callback(self._dispatch,conn[0],0)
def _begin_tran(self,callback):
if len(self._free_connpool) == 0:
conn = self._create_conn()
self._ioloop.add_handler(conn[0],self._dispatch,IOLoop.ERROR)
else:
conn = self._free_connpool.pop()
self._cursor(conn,callback)
def _end_tran(self,cur):
conn = self._conn_fdmap[cur.connection.fileno()]
if len(self._free_connpool) < self.INITCONN_FREE:
self._free_connpool.append(conn)
else:
self._close_conn(conn)
def _create_conn(self):
dbconn = psycopg2.connect(database = self._dbname,
user = self._dbuser,
password = self._dbpasswd,
async = 1,
options = (
'-c search_path=%s '
'-c timezone=%s'
)%(self._dbschema,self._dbtz))
conn = [dbconn.fileno(),deque(),False,None,dbconn]
self._conn_fdmap[conn[0]] = conn
return conn
def _close_conn(self,conn):
self._conn_fdmap.pop(conn[0],None)
self._ioloop.remove_handler(conn[0])
conn[4].close()
def _dispatch(self,fd,evt):
err = None
try:
conn = self._conn_fdmap[fd]
except KeyError:
self._ioloop.remove_handler(fd)
return
try:
stat = conn[4].poll()
except Exception as e:
err = e
if err != None or stat == psycopg2.extensions.POLL_OK:
self._ioloop.update_handler(fd,IOLoop.ERROR)
elif stat == psycopg2.extensions.POLL_READ:
self._ioloop.update_handler(fd,IOLoop.READ | IOLoop.ERROR)
return
elif stat == psycopg2.extensions.POLL_WRITE:
self._ioloop.update_handler(fd,IOLoop.WRITE | IOLoop.ERROR)
return
cb = conn[3]
if cb != None:
conn[3] = None
cb(err)
else:
try:
oper,data,cb = conn[1].popleft()
except IndexError:
conn[2] = False
return
try:
if oper == self.OPER_CURSOR:
conn[3] = cb
elif oper == self.OPER_EXECUTE:
cur,sql,param = data
cur.execute(sql,param)
conn[3] = cb
except Exception as e:
conn[3] = None
cb(e)
self._ioloop.add_callback(self._dispatch,fd, |
import simplejson as json
import urllib
import urllib2
import time
server = ""
def GET(uri, params):
params = urllib.urlencode(params)
req = urllib2.Request(server + uri + "?" + params , headers={'Accept': 'application/json'})
return json.loads(urllib2.urlopen(req).read())
def POST(uri, params):
params = json.dumps(params)
req = urllib2.Request(server + uri, params, headers={'Content-Type': 'application/json',
'Accept': 'application/json'})
response = json.loads(urllib2.urlopen(req).read())
return response["id"]
def set_server_url(url):
global server
server = url
class Detector:
def __init__(self, name, url):
self.name = name
self.url = url
def get_id(self):
try:
return self.id
except AttributeError:
try:
detectors = GET("/detectors/", {'name': self.name})
self.id = detectors[0]['id']
except urllib2.HTTPError as e:
self.id = POST("/detectors/", {'name': self.name, 'url': self.url})
return self.id
def realize(self):
self.get_id()
class Metric:
def __init__(self, name, descr, detector):
self.name = name
self.descr = descr
self.detector = detector
def get_id(self):
try:
return self.id
except AttributeError:
uri = "/detectors/" + str(self.detector.get_id()) + "/metrics/"
try:
metrics = GET(uri, {'name': self.name})
return metrics[0]['id']
except urllib2.HTTPError as e:
return POST(uri, {'name': self.name, 'description': self.descr})
def realize(self):
self.get_id()
def post_alert(detector, metric, payload, emails="", date=time.strftime("%Y-%m-%d")):
try:
payload = json.dumps(payload)
uri = "/detectors/" + str(detector.get_id()) + "/metrics/" + str(metric.get_id()) + "/alerts/"
return POST(uri, {'description': payload, 'date': date, 'emails': emails})
except urllib2.HTTPError as e:
if e.code == 422:
print "Alert for detector: " + detector.name + ", metric: " + metric.name + | ", has already been submitted!"
else:
raise e
if __name__ == "__m | ain__":
set_server_url("http://localhost:8080")
detector = Detector("Histogram Regression Detector", "foobar")
metric = Metric("metric100", "foobar", detector)
post_alert(detector, metric, "foobar")
|
# -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Don Welch
#
# Qt
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class ReadOnlyRadioButton(QRadioButton):
def __init__(self, parent):
| QRadioButton.__init__(self, parent)
s | elf.setFocusPolicy(Qt.NoFocus)
self.clearFocus()
def mousePressEvent(self, e):
if e.button() == Qt.LeftButton:
return
QRadioButton.mousePressEvent(e)
def mouseReleaseEvent(self, e):
if e.button() == Qt.LeftButton:
return
QRadioButton.mouseReleaseEvent(e)
def mouseMoveEvent(self, e):
return
def keyPressEvent(self, e):
if e.key() not in (Qt.Key_Up, Qt.Key_Left, Qt.Key_Right,
Qt.Key_Down, Qt.Key_Escape):
return
QRadioButton.keyPressEvent(e)
def keyReleaseEvent(self, e):
return
|
_future__ import print_function
from __future__ import division
import numpy as np
import sys
import argparse
import time
import re
import gzip
import os
import logging
| from collections import defaultdict
from operator import itemgetter
__version__ = "1.0"
def main():
parser=argparse.ArgumentParser(description='vcf2fasta (diploid)',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--ref', dest='ref_fa | sta', type=str, required=True, help='input reference file (fasta)')
parser.add_argument('-v', '--vcf', dest='vcf_file', type=str, required=True, help='input vcf file (vcf)')
parser.add_argument('-n', '--name', dest='name', type=str, required=True, help='sample name (column header)')
parser.add_argument('--verbose', dest='verbose', action='count', help='Increase verbosity (specify multiple times for more)')
parser.add_argument('--version', action='version', version='%(prog)s '+__version__)
args=parser.parse_args()
ref_fasta=args.ref_fasta
vcf_file=args.vcf_file
name=args.name
verbose=args.verbose
log_level = logging.WARNING
if verbose == 1:
log_level = logging.INFO
elif verbose >= 2:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
verboseprint = print if verbose else lambda *a, **k: None
# process VCF file
verboseprint("processing VCF file")
snps=defaultdict(list)
snp_count=0
last_chrom=None
chrs=dict()
# open vcf file
if vcf_file.endswith('.gz'):
vcf_fh=gzip.open(vcf_file,'r')
else:
vcf_fh=open(vcf_file,'r')
# iterate over vcf file
for linenum,line in enumerate(vcf_fh):
if line.startswith("##"): # skip headers
continue
line=line.rstrip("\n")
# get header line
if line.startswith("#"):
header=line.lstrip("#").split("\t");
header2index=dict([(h,i) for i,h in enumerate(header)])
# ensure FORMAT column is included
if "FORMAT" not in header2index:
print("FORMAT field not specified in VCF file!")
print(header2index)
sys.exit('error')
# ensure user-specified sample name column is included
if name not in header2index:
print(name,"field not specified in VCF file!")
print(header2index)
sys.exit('error')
continue
tmp=line.split("\t")
genotype=tmp[header2index[name]]
format=tmp[header2index["FORMAT"]].split(":")
index2field=dict([(f,i) for i,f in enumerate(format)])
# ensure GT field id included in FORMAT column
if "GT" not in index2field:
print("GT field not specified in FORMAT!")
print(index2field)
sys.exit('error')
genotype_list=genotype.split(":")
gt=genotype_list[index2field["GT"]]
pattern = re.compile('[\|\/]')
(a,b) = pattern.split(gt)
if(a != b):
sys.exit('error: non-homo SNP found @ line# '+linenum+'\n')
c=a=b
c=int(c)
chrom=tmp[header2index["CHROM"]]
pos=int(tmp[header2index["POS"]])
ref=tmp[header2index["REF"]]
alt=tmp[header2index["ALT"]].split(",")
if(c == 0):
snps["chr"+chrom].append((pos,ref))
snp_count+=1
elif(c >= 1):
snps["chr"+chrom].append((pos,alt[c-1]))
snp_count+=1
if(chrom != last_chrom):
if(chrom not in chrs):
verboseprint("\tchr",chrom)
chrs[chrom]=1
last_chrom=chrom
vcf_fh.close()
verboseprint("found",snp_count,"snps")
# process VCF file
verboseprint("")
# ensure all snps are sorted by position (sorted seperatley for each chromosome)
verboseprint("sorting by position")
for chr in snps: # ensure sorted by pos
snp_positions=snps[chr]
verboseprint("\t",chr," ... ",len(snp_positions)," snps",sep="")
sorted_snp_positions=sorted(snp_positions, key=itemgetter(0))
snps[chr]=sorted_snp_positions
verboseprint("")
# process REFERENCE file
verboseprint("processing REF file")
# get output name
ref_fasta_name=os.path.basename(ref_fasta)
ref_fasta_name=re.sub(".gz", "", ref_fasta_name)
ref_fasta_name=re.sub(".fasta", "", ref_fasta_name)
ref_fasta_name=re.sub(".fa", "", ref_fasta_name)
out_fh=open(ref_fasta_name+'__'+name+'.fa',"w")
placed_snps=0
total_placed_snps=0
current_snp=(None,None)
pos=1
last_chrom=None
tmp_pos_list=[(None,None)]
# open reference fasta file
if ref_fasta.endswith('.gz'):
ref_fh=gzip.open(ref_fasta,'r')
else:
ref_fh=open(ref_fasta,'r')
# iterate over fasta file
for linenum,line in enumerate(ref_fh):
line=line.rstrip("\n")
# search for > (contig name)
regexp = re.compile('>')
if regexp.search(line):
if line.startswith(">"):
chrom=line.lstrip(">")
pos=1
print(line,"-",name,file=out_fh,sep="")
continue
else: # random > found in line - issue with cat ?
sys.exit('error with fasta file'+'\n'+str(line))
if(chrom != last_chrom):
tmp_pos_list=[]
if(last_chrom != None):
verboseprint(" ... ",placed_snps," / ",possible_snps,sep="")
tmp_pos_list=[(None,None)]
possible_snps=0
if(chrom in snps):
tmp_pos_list=snps[chrom]
possible_snps=len(tmp_pos_list)
verboseprint("\t",chrom,sep="",end="")
current_snp=tmp_pos_list.pop(0)
total_placed_snps += placed_snps
placed_snps=0
tmp_len=len(line)
start=pos
end=pos+tmp_len-1
while((current_snp[0] < start) and (len(tmp_pos_list) > 0)):
print("ERROR: missed snp!",current_snp,"\t",start,"-",end,">",current_snp[0])
current_snp=tmp_pos_list.pop(0)
if((current_snp[0] == None) or (current_snp[0] > end)):
print(line,file=out_fh)
else:
char_list=list(line)
snp_offset=current_snp[0]-start
if((snp_offset < 0) or (snp_offset > len(char_list))): # check to ensure SNP overlaps interval
sys.exit('error '+str(current_snp)+' '+str(snp_offset)+' '+str(start)+'-'+str(end))
# replace snp in char arr
char_list[snp_offset]=current_snp[1]
placed_snps+=1
if(len(tmp_pos_list) == 0):
current_snp=(None,None)
# handle multiple SNPs per FASTA line (normally 50 chars /buffer/)
if(len(tmp_pos_list) > 0):
current_snp=tmp_pos_list.pop(0)
while((current_snp[0] <= end) and (len(tmp_pos_list) > 0)):
snp_offset=current_snp[0]-start
# replace snp in char arr
char_list[snp_offset]=current_snp[1]
placed_snps+=1
current_snp=tmp_pos_list.pop(0)
if((current_snp[0] <= end) and (len(tmp_pos_list) == 0)):
snp_offset=current_snp[0]-start
char_list[snp_offset]=current_snp[1]
placed_snps+=1
current_snp=(None,None)
# char list to string, and print
print(''.join(char_list),file=out_fh)
pos += tmp_len
last_chrom=chrom
ref_fh.close()
# handle last line
verbos |
import socket
import pytest
import mock
from pygelf import GelfTcpHandler, GelfUdpHandler, GelfHttpHandler, GelfTlsHandler, GelfHttpsHandler
from tests.helper import logger, get_unique_message, log_warning, log_exception
SYSLOG_LEVEL_ERROR = 3
SYSLOG_LEVEL_WARNING = 4
@pytest.fixture(params=[
GelfTcpHandler(host='127.0.0.1', port=12201),
GelfUdpHandler(host='127.0.0.1', port=12202),
GelfUdpHandler(host='127.0.0.1', port=12202, compress=False),
GelfHttpHandler(host='127.0.0.1', port=12203),
GelfHttpHandler( | host='127.0.0.1', port=12203, compress=False),
GelfTlsHandler(host='127.0.0.1', port=12204),
GelfHttpsHandler(host='127.0.0.1', port=12205, validate=False),
GelfHttpsHandler(host='localhost' | , port=12205, validate=True, ca_certs='tests/config/cert.pem'),
GelfTlsHandler(host='127.0.0.1', port=12204, validate=True, ca_certs='tests/config/cert.pem'),
])
def handler(request):
return request.param
def test_simple_message(logger):
message = get_unique_message()
graylog_response = log_warning(logger, message)
assert graylog_response['message'] == message
assert graylog_response['level'] == SYSLOG_LEVEL_WARNING
assert 'full_message' not in graylog_response
assert 'file' not in graylog_response
assert 'module' not in graylog_response
assert 'func' not in graylog_response
assert 'logger_name' not in graylog_response
assert 'line' not in graylog_response
def test_formatted_message(logger):
message = get_unique_message()
template = message + '_%s_%s'
graylog_response = log_warning(logger, template, args=('hello', 'gelf'))
assert graylog_response['message'] == message + '_hello_gelf'
assert graylog_response['level'] == SYSLOG_LEVEL_WARNING
assert 'full_message' not in graylog_response
def test_full_message(logger):
message = get_unique_message()
try:
raise ValueError(message)
except ValueError as e:
graylog_response = log_exception(logger, message, e)
assert graylog_response['message'] == message
assert graylog_response['level'] == SYSLOG_LEVEL_ERROR
assert message in graylog_response['full_message']
assert 'Traceback (most recent call last)' in graylog_response['full_message']
assert 'ValueError: ' in graylog_response['full_message']
assert 'file' not in graylog_response
assert 'module' not in graylog_response
assert 'func' not in graylog_response
assert 'logger_name' not in graylog_response
assert 'line' not in graylog_response
def test_source(logger):
original_source = socket.gethostname()
with mock.patch('socket.gethostname', return_value='different_domain'):
message = get_unique_message()
graylog_response = log_warning(logger, message)
assert graylog_response['source'] == original_source
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from wsme import types as wtypes
LOG = logging.getLogger(__name__)
API_STATUS = wtypes.Enum(str, 'SUPPORTED', 'CURRENT', 'DEPRECATED')
class Resource(wtypes.Base):
"""REST API Resource."""
@classmethod
def from_dict(cls, d):
# TODO: take care of nested resources
obj = cls()
for key, val in d.items():
if hasattr(obj, key):
setattr(obj, key, val)
return obj
def __str__(self):
"""WSME based implementation of __str__."""
res = "%s [" % type(self).__name__
first = True
| for attr in self._wsme_attributes:
if | not first:
res += ', '
else:
first = False
res += "%s='%s'" % (attr.name, getattr(self, attr.name))
return res + "]"
|
# Copyright the Karmabot authors and contributors.
# All rights reserved. See AUTHORS.
#
# This file is part of 'karmabot' and is distributed under the BSD license.
# See LICENSE for more details.
# dedicated to LC
from json import JSONDecoder
from urllib import urlencode
from urllib2 import urlopen
from karmabot.core.client import thin | g
from karmabot.core.commands.sets import CommandSet
from karmabot.core.register import facet_registry
from karmabot.core.facets import Facet
import re
import htmlentitydefs
##
# Function Placed in public domain by Fredrik Lundh
# http://effbot.org/zone/copyright.htm
# http://effbot.org/zone/re-sub.htm#unescape-html
# Removes HTML or XML character references and entities from a text strin | g.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
# leave as is
return text
return re.sub("&#?\w+;", fixup, text)
@facet_registry.register
class LmgtfyFacet(Facet):
name = "lmgtfy"
commands = thing.add_child(CommandSet(name))
@classmethod
def does_attach(cls, thing):
return thing.name == "lmgtfy"
@commands.add(u"lmgtfy {item}",
u"googles for a {item}")
def lmgtfy(self, context, item):
api_url = "http://ajax.googleapis.com/ajax/services/search/web?"
response = urlopen(api_url + urlencode(dict(v="1.0",
q=item)))
response = dict(JSONDecoder().decode(response.read()))
top_result = {}
if response.get('responseStatus') == 200:
results = response.get('responseData').get('results')
top_result = results.pop(0)
context.reply(", ".join([unescape(top_result.get('titleNoFormatting')),
top_result.get('unescapedUrl'),
]))
|
ed
from .lapack import get_lapack_funcs
from scipy._lib.six import callable
__all__ = ['qz']
_double_precision = ['i','l','d']
def _select_function(sort, typ):
if typ in ['F','D']:
if callable(sort):
# assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x,y: (np.real(x/y) < 0.0)
elif sort == 'rhp':
sfunction = lambda x,y: (np.real(x/y) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x,y: (abs(x/y) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x,y: (abs(x/y) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
elif typ in ['f','d']:
if callable(sort):
# assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x,y,z: (np.real((x+y*1j)/z) < 0.0)
elif sort == 'rhp':
sfunction = lambda x,y,z: (np.real((x+y*1j)/z) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x,y,z: (abs((x+y*1j)/z) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x,y,z: (abs((x+y*1j)/z) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
else: # to avoid an error later
raise ValueError("dtype %s not understood" % typ)
return sfunction
def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
overwrite_b=False, check_finite=True):
"""
QZ decomposition for generalized eigenvalues of a pair of matrices.
The QZ, or generalized Schur, decomposition for a pair of N x N
nonsymmetric matrices (A,B) is::
(A,B) = (Q*AA*Z', Q*BB*Z')
where AA, BB is in generalized Schur form if BB is upper-triangular
with non-negative diagonal and AA is upper-triangular, or for real QZ
decomposition (``output='real'``) block upper triangular with 1x1
and 2x2 blocks. In this case, the 1x1 blocks correspond to real
generalized eigenvalues and 2x2 blocks are 'standardized' by making
the corresponding elements of BB have the form::
[ a 0 ]
[ 0 b ]
and the pair of corresponding 2x2 blocks in AA and BB will have a complex
conjugate pair of generalized eigenvalues. If (``output='complex'``) or
A and B are complex matrices, Z' denotes the conjugate-transpose of Z.
Q and Z are unitary matrices.
Parameters
----------
A : (N, N) array_like
2d array to decompose
B : (N, N) array_like
2d array to decompose
output : str {'real','complex'}
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
NOTE: THIS INPUT IS DISABLED FOR NOW, IT DOESN'T WORK WELL ON WINDOWS.
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True). For
real matrix pairs, the sort function takes three real arguments
(alphar, alphai, beta). The eigenvalue x = (alphar + alphai*1j)/beta.
For complex matrix pairs or output='complex', the sort function
takes two complex arguments (alpha, beta). The eigenvalue
x = (alpha/beta).
Alternatively, string parameters may be used:
| - ' | lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
check_finite : boolean
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
sdim : int, optional
If sorting was requested, a fifth return value will contain the
number of eigenvalues for which the sort condition was True.
Notes
-----
Q is transposed versus the equivalent function in Matlab.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import linalg
>>> np.random.seed(1234)
>>> A = np.arange(9).reshape((3, 3))
>>> B = np.random.randn(3, 3)
>>> AA, BB, Q, Z = linalg.qz(A, B)
>>> AA
array([[-13.40928183, -4.62471562, 1.09215523],
[ 0. , 0. , 1.22805978],
[ 0. , 0. , 0.31973817]])
>>> BB
array([[ 0.33362547, -1.37393632, 0.02179805],
[ 0. , 1.68144922, 0.74683866],
[ 0. , 0. , 0.9258294 ]])
>>> Q
array([[ 0.14134727, -0.97562773, 0.16784365],
[ 0.49835904, -0.07636948, -0.86360059],
[ 0.85537081, 0.20571399, 0.47541828]])
>>> Z
array([[-0.24900855, -0.51772687, 0.81850696],
[-0.79813178, 0.58842606, 0.12938478],
[-0.54861681, -0.6210585 , -0.55973739]])
"""
if sort is not None:
# Disabled due to segfaults on win32, see ticket 1717.
raise ValueError("The 'sort' input of qz() has to be None (will "
" change when this functionality is made more robust).")
if output not in ['real','complex','r','c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(A)
b1 = asarray_chkfinite(B)
else:
a1 = np.asarray(A)
b1 = np.asarray(B)
a_m, a_n = a1.shape
b_m, b_n = b1.shape
if not (a_m == a_n == b_m == b_n):
raise ValueError("Array dimensions must be square and agree")
typa = a1.dtype.char
if output in ['complex', 'c'] and typa not in ['F','D']:
if typa in _double_precision:
a1 = a1.astype('D')
typa = 'D'
else:
a1 = a1.astype('F')
typa = 'F'
typb = b1.dtype.char
if output in ['complex', 'c'] and typb not in ['F','D']:
if typb in _double_precision:
b1 = b1.astype('D')
typb = 'D'
else:
b1 = b1.astype('F')
typb = 'F'
overwrite_a = overwrite_a or (_datacopied(a1,A))
overwrite_b = overwrite_b or (_datacopied(b1,B))
gges, = get_lapack_funcs(('gges',), (a1,b1))
if lwork is None or lwork == -1:
# get optimal work array size
result = gges(lambda x: None, a1, b1, lwork=-1)
lwork = result[-2][0].real.astype(np.int)
if sort is None:
sort_t = 0
sfunction = lambda x: None
else:
sort_t = 1
sfunction = _select_function(sort, typa)
result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b, sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError("Illegal value in argument %d of gges" % -info)
elif info > 0 and info <= a_n:
warnings.warn("The QZ iteration failed. (a,b) are not in Schur "
"form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be correct "
"for J=%d,...,N" % info-1, UserWarning)
elif info == a_n+1:
raise LinAlgError("Something other than QZ iteration failed")
elif info == a_n+2:
raise LinAlgError("After reordering, roundoff changed values of some "
"complex eigenvalues so that leading eigenvalues in the "
"Generalized Schur form no longer satisfy sort=True. "
"This could also be caused due to scaling.")
elif in |
permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import inspect
import logging
from oslo_config import cfg
import pkg_resources
import six
from nova.openstack.common._i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
]
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
report_deprecated_feature(LOG, msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the oslo-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
report_deprecated_feature(LOG, msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
# Track the messages we have sent already. See
# report_deprecated_feature().
_deprecated_messages_sent = {}
def report_deprecated_feature(logger, msg, *args, **kwargs):
"""Call this function when a deprecated fe | ature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = | _("Deprecated: %s") % msg
CONF.register_opts(opts)
if CONF.fatal_deprecations:
logger.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = _deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
logger.warn(stdmsg, *args, **kwargs)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message |
from exchanges import helpers
from exchanges import kraken
from decimal impo | rt Decimal
### Kraken opportunities
#### ARBITRAGE OPPORTUNITY 1
def opportunity_1():
sellLTCbuyEUR = kraken.get_current_bid_LTCEUR()
sellEURbuyXBT = kraken.get_current_ask_XBTEUR()
sellXBTbuyLTC = kraken.get_current_ask_XBTLTC()
opport = 1-((sellLTCbuyEUR/sellEURbuyBTX)*sellXBTbuyLTC)
return Decimal(opport)
def opportunity_2():
sellEURbuyLTC = kraken.get_current_ask_LTCEUR()
sellLTCbu | yXBT = kraken.get_current_ask_XBTLTC()
sellXBTbuyEUR = kraken.get_current_bid_XBTEUR()
opport = 1-(((1/sellEURbuyLTC)/sellLTCbuyXBT)*sellXBTbuyEUR)
return Decimal(opport) |
import os
from fs import enums
import unittest
class TestEnums(unittest.TestCase):
def te | st_enums(self):
self.assertEqual(enums.Seek.current, os.SEEK_CUR)
self.assertEqual(enums.Seek.end, os.SEEK_END)
self.assertEqual(e | nums.Seek.set, os.SEEK_SET)
self.assertEqual(enums.ResourceType.unknown, 0)
|
#!/usr/bin/env python
from __future__ import print_function
# Use the srilm module
from srilm import *
# Initialize a trigram LM variable (1 = unigram, 2 = bigram and so on)
n = initLM(5)
# Read 'sa | mple.lm' into the LM variable
readLM( | n, "corpu.lm")
# How many n-grams of different order are there ?
print("1. Number of n-grams:")
print(" There are {} unigrams in this LM".format(howManyNgrams(n, 1)))
print(" There are {} bigrams in this LM".format(howManyNgrams(n, 2)))
print(" There are {} trigrams in this LM".format(howManyNgrams(n, 3)))
print(" There are {} 4-grams in this LM".format(howManyNgrams(n, 4)))
print(" There are {} 5-grams in this LM".format(howManyNgrams(n, 5)))
print()
# Query the LM for some n-gram log probabilities.
# Note that a SRI language model uses backoff smoothing, so if an n-gram is
# not present in the LM, it will compute it using a smoothed lower-order
# n-gram distribution.
print("2. N-gram log probabilities:")
p1 = getUnigramProb(n, 'Naturverbundenheit')
print(" p('weil') = {}".format(p1))
p2 = getBigramProb(n, 'of the')
print(" p('of the') = {}".format(p2))
p3 = getBigramProb(n, 'Niederlage Deutschlands')
print(" p('Niederlage Deutschlands') = {}".format(p3))
p4 = getTrigramProb(n, 'there are some')
print(" p('there are some') = {}".format(p4))
# generic n-gram probability function
p5 = getNgramProb(n, 'sachin tendulkar .PERIOD', 3)
print(" p('sachinr') = {}".format(p5))
p6 = getNgramProb(n, 'or whatever has yet to', 5)
print(" p('or whatever has yet to') = {}".format(p6))
print()
# Query the LM to get the final log probability for an entire sentence.
# Note that this is different from a n-gram probability because
# (1) For a sentence, SRILM appends <s> and </s> to its beginning
# and the end respectively
# (2) The log prob of a probability is the sum of all individual
# n-gram log probabilities
print("3. Sentence log probabilities and perplexities:")
sprob = getSentenceProb(n,'there are some good',4)
print(" p('there are some good') = {}".format(sprob))
# the perplexity
sppl = getSentencePpl(n,'there are some good', 4)
print(" ppl('there are some good') = {}".format(sppl))
print()
# number of OOVs in a sentence
print("4. OOvs:")
noov = numOOVs(n, 'there are some foobar', 4)
print(" nOOVs('there are some foobar') = {}".format(noov))
print()
# Query the LM to get the total log probability for the file named 'corpus'
print("5. Corpus log probabilties and perplexities:")
corpus = 'test.txt'
corpus_prob = getCorpusProb(n, corpus)
print(" Logprob for the file {} = {}".format(corpus, corpus_prob))
# Query the LM to get the perplexity for the file named 'corpus'
corpus_ppl = getCorpusPpl(n, corpus);
print(" Perplexity for the file {} = {}".format(corpus, corpus_ppl))
# Free LM variable
deleteLM(n);
|
from flask import Flask
from flask import request
from flask.ext.sqlalchemy import SQLAlchemy
import datetime
import uuid as uid
import sys
import requests
import urllib2
GOIP_SERVER_IP = '127.0.0.1' #'172.248.114.178'
TELEPHONY_SERVER_IP = '127.0.0.1:5000/sms/in'
sys.path.append('/home/csik/public_python/sms_server/deploy') #move
app = Flask(__name__)
from rootio.extensions import db
from config import SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(app)
from rootio.telephony.models import PhoneNumber, Message
def debug(request):
if request.method == 'POST':
deets = request.form.items()
print >> sys.stderr, type(deets)
deets_method = 'POST'
else:
deets = request.args.items()
print >> sys.stderr, type(deets)
deets_method = 'GET'
s = ""
#print "({0}) parameters via {1}".format(le | n(deets)-1, deets_method)
for deet in deets:
s += str(deet)
print s
return deets
@app.route("/", methods=['GET', 'POST'])
def hello():
debug(request)
return "Hello World!"
@app.route("/init_goip", methods=['GET', 'POST'])
def init_goip():
try:
import send_sms_GOIP
if not send_sms_GOIP.create_flags():
raise Exc | eption("Wrong machine")
except:
print "Unable to init GOIP -- are you sure you called the right machine?"
return "Unable to init GOIP", 404
@app.route("/out", methods=['GET', 'POST'])
def sms_out():
"""
Handles outgoing message requests.
Currently only from GOIP8, should be generalized to any type of sending unit, called by station.
Expected args: line, to_number, message
"""
try:
import send_sms_GOIP
except:
print "Unable to init GOIP -- are you sure you called the right machine?"
return "Unable to init GOIP", 404
debug(request)
line = request.args.get('line')
to_number = request.args.get('to_number')
message = request.args.get('message')
if not line or not to_number or not message:
print "Insufficient number of arguments!"
return "False"
if not send_sms_GOIP.send(line,to_number,message):
print "Uh Oh, some kind of error in send_sms_GOIP"
return "False"
else:
return "Sent!"
@app.route("/in/", methods=['GET', 'POST'])
def sms_in():
"""
Handles incoming messages.
Currently getting incoming messages from GOIP8, routed to extension 1100 which triggers handle_chat.py
Expected args: Event-Date-Timestamp (Unix epoch), from, to, from_number, body
"""
debug(request)
uuid = uid.uuid5(uid.NAMESPACE_DNS, 'rootio.org')
edt = datetime.datetime.fromtimestamp(int(request.args.get('Event-Date-Timestamp'))/1000000) #.strftime('%Y-%m-%d %H:%M:%S')
fr = request.args.get('from') #This line should look up the station through its from address
to = request.args.get('to') #This will be the same for all related units -- again may make sense to have a representation of sending units
from_number = request.args.get('from_number') #look up a number now? Save a foreign key
body = request.args.get('body')
payload = { 'uuid': uuid,
'edt': edt,
'fr': fr,
'to': to,
'from_number': from_number,
'body': body,
}
r= requests.get(TELEPHONY_SERVER_IP,params=payload)
print r.text
return "looks alright " + str(uuid)
#return str(str(edt)+'\n'+fr+'->'+to+'\n'+from_number+'\n'+body+'\n'+uuid)
if __name__ == "__main__":
app.run(debug=True)
r = requests.get('http://'+GOIP_SERVER_IP+'/init_goip')
|
s to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha512.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.sha512.key; };
allow-query { any; };
};
key "forge.sha512.key" {
algorithm hmac-sha512;
secret "jBng5D6QL4f8cfLUUwE7OQ==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
8: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.md5.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { key forge.md5.key; };
allow-query { any; };
};
key "forge.md5.key" {
algorithm hmac-md5;
secret "bX3Hs+fG/tThidQPuhK1mA==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
9: ["""
options {
directory "${data_path}"; // Working directory
listen-on-v6 port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa" {
type master;
file "rev.db";
notify no;
allow-update { key forge.sha512.key; };
allow-query { any; };
};
zone "six.example.com" {
type master;
file "fwd.db";
notify no;
allo | w-update { key forge.md5.key; };
allow-query { any; };
};
key "forge.sha512.key" {
algorithm hmac-sha512;
secret "jBng5D6QL4f8cfLUUwE7OQ==";
};
key "forge.md5.key" {
algorithm hmac-md5;
secret "bX3Hs+fG/tThidQPuhK1mA==";
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA= | =";
};
controls {
inet 127.0.0.1 port 53001
allow { 127.0.0.1; } keys { "rndc-key"; };
};
logging{
channel simple_log {
file "/tmp/dns.log";
severity debug 99;
print-time yes;
print-severity yes;
print-category yes;
};
category default{
simple_log;
};
category queries{
simple_log;
};
};
""", """
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
options {
default-key "rndc-key";
default-server 127.0.0.1;
default-port 953;
};
""", """$ORIGIN .
$TTL 86400 ; 1 day
six.example.com IN SOA dns6-1.six.example.com. mail.six.example.com. (
107 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
2592000 ; expire (4 weeks 2 days)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN six.example.com.
dns6-1 AAAA 2001:db8:1::1
nanny6 AAAA 2001:db8:1::10
""", """$ORIGIN .
$TTL 3600 ; 1 hour
1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa IN SOA dns6-1.six.example.com. mail.six.example.com. (
102 ; serial
3600 ; refresh (1 hour)
900 ; retry (15 minutes)
604800 ; expire (1 week)
3600 ; minimum (1 hour)
)
NS dns6-1.six.example.com.
$ORIGIN 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
0 PTR nanny6.six.exmaple.com.1.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa.
"""],
10: ["", "", "", ""],
11: ["", "", "", ""],
12: ["", "", "", ""],
13: ["", "", "", ""],
14: ["", "", "", ""],
15: ["", "", "", ""],
## v4 configs!
20: ["""
options {
directory "${data_path}"; // Working directory
listen-on port ${dns_port} { ${dns_addr}; };
allow-query-cache { none; }; // Do not allow access to cache
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
recursion no; // Do not provide recursive service
};
zone "50.168.192.in-addr.arpa." {
type master;
file "rev.db";
notify no;
allow-update { any; }; // This is the default
allow-query { any; }; // This is the default
};
zone "four.example.com" {
type master;
file "fwd.db";
notify no;
allow-update { any; }; // This is the default
allow-transfer { any; };
allow-query { any; }; // This is the default
};
#Use with the following in named.conf, adjusting the allow list as needed:
key "rndc-key" {
algorithm hmac-md5;
secret "+kOEcvxPTCPxzGqB5n5FeA==";
};
controls {
inet 127.0.0.1 port 53001 allow { 127 |
import sys
import os
import torch
import time
from engine import TetrisEngine
from dqn_agent import DQN, ReplayMemory, Transition
from torch.autograd import Variable
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
width, height = 10, 20 # standard tetris friends rules
engine = TetrisEngine(width, height)
def load_model(filename):
model = DQN()
if use_cuda:
model.cuda()
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
return model
def run(model):
state = FloatTensor(engine.clear()[None,None,:,:])
score = 0
while True:
action = model(Variable(state,
volatile=True).type(FloatTensor)).data.max(1)[1].view(1,1).type(LongTensor)
print( model(Variable(state,
volatile=True).type(FloatTensor)).data)
state, reward, done = engine.step(action[0,0])
state = FloatTen | sor(state[None,None,:,:])
# Accumulate reward
score += int(reward)
print(engine)
print(action)
time.sleep(.1)
if done:
print('score {0}'.format(score))
break
if len(sys.argv) <= 1:
print('specify a filename to load the model')
sys.exit(1)
if __name__ == '__main__':
filename = sys.argv[1]
| if os.path.isfile(filename):
print("=> loading model '{}'".format(filename))
model = load_model(filename).eval()
run(model)
else:
print("=> no file found at '{}'".format(filename))
|
import pandas as pd
import logging
import settings
import os
from scikits.audiolab import oggwrite, play, oggread
from scipy.fftpack import dct
from itertools import chain
import numpy as np
import math
log = logging.getLogger(__name__)
def read_sound(fpath, limit=settings.MUSIC_TIME_LIMIT):
try:
data, fs, enc = oggread(fpath)
upto = fs * limit
except IOError:
log.error("Could not read file at {0}".format(fpath))
raise IOError
if data.shape[0] < upto:
log.error("Music file at {0} not long enough.".format(fpath))
raise ValueError
try:
if len(data.shape) == 1 or data.shape[1] != 2:
data = np.vstack([data, data]).T
except Exception:
log.error("Invalid dimension count for file at {0}. Do you have left and right channel audio?".format(fpath))
raise ValueError
data = data[0:upto, :]
return data, fs, enc
def calc_slope(x, y):
x_mean = np.mean(x)
y_mean = np.mean(y)
x_dev = np.sum(np.abs(np.subtract(x, x_mean)))
y_dev = np.sum(np.abs(np.subtract(y, y_mean)))
slope = (x_dev * y_dev) / (x_dev * x_dev)
return slope
def get_indicators(vec):
mean = np.mean(vec)
slope = calc_slope(np.arange(len(vec)), vec)
std = np.std(vec)
return mean, slope, std
def calc_u(vec):
fft = np.fft.fft(vec)
return np.sum(np.multiply(fft, vec)) / np.sum(vec)
def calc_mfcc(fft):
ps = np.abs(fft) ** 2
fs = np.dot(ps, mel_filter(ps.shape[0]))
ls = np.log(fs)
ds = dct(ls, type=2)
return ds
def mel_filter(blockSize):
numBands = 13
maxMel = int(freqToMel(24000))
minMel = int(freqToMel(10))
filterMatrix = np.zeros((numBands, blockSize))
melRange = np.array(xrange(numBands + 2))
melCenterFilters = melRange * (maxMel - minMel) / (numBands + 1) + minMel
aux = np.log(1 + 1000.0 / 700.0) / 1000.0
aux = (np.exp(melCenterFilters * aux) - 1) / 22050
aux = 0.5 + 700 * blockSize * aux
aux = np.floor(aux) # Arredonda pra baixo
centerIndex = np.array(aux, int) # Get int values
for i in xrange(numBands):
start, center, end = centerIndex[i:(i + 3)]
k1 = np.float32(center - start)
k2 = np.float32(end - center)
up = (np.array(xrange(start, center)) - start) / k1
down = (end - np.array(xrange(center, end))) / k2
filterMatrix[i][start:center] = up
try:
filterMatrix[i][center:end] = down
except ValueError:
pass
return filterMatrix.transpose()
def freqToMel(freq):
return 1127.01048 * math.log(1 + freq / 700.0)
def melToFreq(freq):
return 700 * (math.exp(freq / 1127.01048 - 1))
def calc_features(vec, freq):
# bin count
bc = settings.MUSIC_TIME_LIMIT
bincount = list(range(bc))
# framesize
fsize = 512
#mean
m = np.mean(vec)
#spectral flux
sf = np.mean(vec - np.roll(vec, fsize))
mx = np.max(vec)
mi = np.min(vec)
sdev = np.std(vec)
binwidth = len(vec) / bc
bins = []
for i in xrange(0, bc):
bins.append(vec[(i * binwidth):(binwidth * i + binwidth)])
peaks = [np.max(i) for i in bins]
mins = [np.min(i) for i in bins]
amin, smin, stmin = get_indicators(mins)
apeak, speak, stpeak = get_indicators(peaks)
#fft = np.fft.fft(vec)
bin_fft = []
for i in xrange(0, bc):
bin_fft.append(np.fft.fft(vec[(i * binwidth):(binwidth * i + binwidth)]))
mel = [list(calc_mfcc(j)) for (i, j) in enumerate(bin_fft) if i % 3 == 0]
mels = list(chain.from_iterable(mel))
cepstrums = [np.fft.ifft(np.log(np.abs(i))) for i in bin_fft]
inter = [get_indicators(i) for i in cepstrums]
acep, scep, stcep = get_indicators([i[0] for i in inter])
aacep, sscep, stsscep = get_indicators([i[1] for i in inter])
zero_crossings = np.where(np.diff(np.sign(vec)))[0]
zcc = len(zero_crossings)
zccn = zcc / freq
u = [calc_u(i) for i in bins]
spread = np.sqrt(u[-1] - u[0] ** 2)
skewness = (u[0] ** 3 - 3 * u[0] * u[5] + u[-1]) / spread ** 3
#Spectral slope
#ss = calc_slope(np.arange(len(fft)),fft)
avss = [calc_slope(np.arange(len(i)), i) for i in bin_fft]
savss = calc_slope(bincount, avss)
mavss = np.mean(avss)
features = [m, sf, mx, mi, sdev, amin, smin, stmin, apeak, speak, stpeak, acep, scep, stcep, aacep, sscep, stsscep,
zcc, | zccn, spread, skewness, savss, mavss] + mels + [i[0] for (j, i) in enumerate(inter) if j % 5 == 0]
for i in xrange(0, len(features)):
try:
features[i] = features[i].real
except Exception:
pass
return features
def extract_features(sample, freq):
left = calc_features(sample[:, 0], freq)
right = calc_features(sample[:, 1], freq)
return left + right
def process_song(vec, f):
try:
| features = extract_features(vec, f)
except Exception:
log.error("Cannot generate features for file {0}".format(f))
return None
return features
def generate_features(filepath):
frame = None
data, fs, enc = read_sound(filepath)
features = process_song(data, fs)
frame = pd.Series(features)
frame['fs'] = fs
frame['enc'] = enc
frame['fname'] = filepath
return frame
def generate_train_features():
if not os.path.isfile(settings.TRAIN_FEATURE_PATH):
d = []
encs = []
fss = []
fnames = []
for i, p in enumerate(os.listdir(settings.OGG_DIR)):
if not p.endswith(".ogg"):
continue
log.debug("On file {0}".format(p))
filepath = os.path.join(settings.OGG_DIR, p)
try:
data, fs, enc = read_sound(filepath)
except Exception:
continue
try:
features = process_song(data, fs)
except Exception:
log.error("Could not get features for file {0}".format(p))
continue
d.append(features)
fss.append(fs)
encs.append(enc)
fnames.append(p)
frame = pd.DataFrame(d)
frame['fs'] = fss
frame['enc'] = encs
frame['fname'] = fnames
frame.to_csv(settings.TRAIN_FEATURE_PATH)
else:
frame = pd.read_csv(settings.TRAIN_FEATURE_PATH)
frame = frame.iloc[:, 1:]
return frame |
import time
from indy import anoncreds, wallet
import json
import logging
from indy import pool
from src.utils import run_coroutine, PROTOCOL_VERSION
logger = logging.getLogger(__name__)
async def demo():
logger.info("Anoncreds sample -> started")
issuer = {
'did': 'NcYxiDXkpYi6ov5FcYDi1e',
'wallet_config': json.dumps({'id': 'issuer_wallet'}),
'wallet_credentials': json.dumps({'key': 'issuer_wallet_key'})
}
prover = {
'did': 'VsKV7grR1BUE29mG2Fm2kX',
'wallet_config': json.dumps({"id": "prover_wallet"}),
'wallet_credentials': json.dumps({"key": "issuer_wallet_key"})
}
verifier = {}
store = {}
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
# 1. Create Issuer Wallet and Get Wallet Handle
await wallet.create_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
issuer['wallet'] = await wallet.open_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 2. Create Prover Wallet and Get Wallet Handle
await wallet.create_wallet(prover['wallet_config'], prover['wallet_credentials'])
prover['wallet'] = await wallet.open_wallet(prover['wallet_config'], prover['wallet_credentials'])
# 3. Issuer create Credential Schema
schema = {
'name': 'gvt',
'version': '1.0',
'attributes': '["age", "sex", "height", "name"]'
}
issuer['schema_id'], issuer['schema'] = await anoncreds.issuer_create_schema(issuer['did'], schema['name'],
schema['version'],
schema['attributes'])
store[issuer['schema_id']] = issuer['schema']
# 4. Issuer create Credential Definition for Schema
cred_def = {
'tag': 'cred_def_tag',
'type': 'CL',
'config': json.dumps({"support_revocation": False})
}
issuer['cred_def_id'], issuer['cred_def'] = await anoncreds.issuer_create_and_store_credential_def(
issuer['wallet'], issuer['did'], issuer['schema'], cred_def['tag'], cred_def['type'], cred_def['config'])
store[issuer['cred_def_id']] = issuer['cred_def']
# 5. Prover create Master Secret
prover['master_secret_id'] = await anoncreds.prover_create_master_secret(prover['wallet'], None)
# 6. Issuer create Credential Offer
issuer['cred_offer'] = await anoncreds.issuer_create_credential_offer(issuer['wallet'], issuer['cred_def_id'])
prover['cred_offer'] = issuer['cred_offer']
cred_offer = json.loads(prover['cred_offer'])
prover['cred_def_id'] = cred_offer['cred_def_id']
prover['schema_id'] = cred_offer['schema_id']
prover['cred_def'] = store[prover['cred_def_id']]
prover['schema'] = store[prover['schema_id']]
# 7. Prover create Credential Request
prover['cred_req'], prover['cred_req_metadata'] = \
await anoncreds.prover_create_credential_req(prover['wallet'], prover['did'], prover['cred_offer'],
prover['cred_def'], prover['master_secret_id'])
# 8. Issuer create Credential
prover['cred_values'] = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"},
"height": {"raw": "175", "encoded": "175"},
"age": {"raw": "28", "encoded": "28"}
})
issuer['cred_values'] = prover['cred_values']
issuer['cred_req'] = prover['cred_req']
(cred_js | on, _, _) = await anoncreds.issuer_create_credential(issuer['wallet'], i | ssuer['cred_offer'],
issuer['cred_req'], issuer['cred_values'], None, None)
prover['cred'] = cred_json
# 9. Prover store Credential
await anoncreds.prover_store_credential(prover['wallet'], None, prover['cred_req_metadata'], prover['cred'],
prover['cred_def'], None)
# 10. Prover gets Credentials for Proof Request
verifier['proof_req'] = json.dumps({
'nonce': '123432421212',
'name': 'proof_req_1',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {'name': 'name'}
},
'requested_predicates': {
'predicate1_referent': {'name': 'age', 'p_type': '>=', 'p_value': 18}
}
})
prover['proof_req'] = verifier['proof_req']
# Prover gets Credentials for attr1_referent
prover['cred_search_handle'] = \
await anoncreds.prover_search_credentials_for_proof_req(prover['wallet'], prover['proof_req'], None)
creds_for_attr1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'attr1_referent', 10)
prover['cred_for_attr1'] = json.loads(creds_for_attr1)[0]['cred_info']
# Prover gets Credentials for predicate1_referent
creds_for_predicate1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'predicate1_referent', 10)
prover['cred_for_predicate1'] = json.loads(creds_for_predicate1)[0]['cred_info']
await anoncreds.prover_close_credentials_search_for_proof_req(prover['cred_search_handle'])
# 11. Prover create Proof for Proof Request
prover['requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {'attr1_referent': {'cred_id': prover['cred_for_attr1']['referent'], 'revealed': True}},
'requested_predicates': {'predicate1_referent': {'cred_id': prover['cred_for_predicate1']['referent']}}
})
schemas_json = json.dumps({prover['schema_id']: json.loads(prover['schema'])})
cred_defs_json = json.dumps({prover['cred_def_id']: json.loads(prover['cred_def'])})
revoc_states_json = json.dumps({})
prover['proof'] = await anoncreds.prover_create_proof(prover['wallet'], prover['proof_req'],
prover['requested_creds'],
prover['master_secret_id'], schemas_json, cred_defs_json,
revoc_states_json)
verifier['proof'] = prover['proof']
# 12. Verifier verify proof
proof = json.loads(verifier['proof'])
assert 'Alex' == proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
identifier = proof['identifiers'][0]
verifier['cred_def_id'] = identifier['cred_def_id']
verifier['schema_id'] = identifier['schema_id']
verifier['cred_def'] = store[verifier['cred_def_id']]
verifier['schema'] = store[verifier['schema_id']]
schemas_json = json.dumps({verifier['schema_id']: json.loads(verifier['schema'])})
cred_defs_json = json.dumps({verifier['cred_def_id']: json.loads(verifier['cred_def'])})
revoc_ref_defs_json = "{}"
revoc_regs_json = "{}"
assert await anoncreds.verifier_verify_proof(verifier['proof_req'], verifier['proof'], schemas_json, cred_defs_json,
revoc_ref_defs_json, revoc_regs_json)
# 13. Close and delete Issuer wallet
await wallet.close_wallet(issuer['wallet'])
await wallet.delete_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 14. Close and delete Prover wallet
await wallet.close_wallet(prover['wallet'])
await wallet.delete_wallet(prover['wallet_config'], prover['wallet_credentials'])
logger.info("Anoncreds sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete
|
import gevent
import socket
from vnc_api.vnc_api import *
from cfg | m_common.vnc_kombu import VncKombuClient
from config_db import *
from cfgm_common.dep | endency_tracker import DependencyTracker
from reaction_map import REACTION_MAP
import svc_monitor
class RabbitConnection(object):
_REACTION_MAP = REACTION_MAP
def __init__(self, logger, args=None):
self._args = args
self.logger = logger
def _connect_rabbit(self):
rabbit_server = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
self._db_resync_done = gevent.event.Event()
q_name = 'svc_mon.%s' % (socket.gethostname())
self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode,
q_name, self._vnc_subscribe_callback,
self.logger.log)
def _vnc_subscribe_callback(self, oper_info):
self._db_resync_done.wait()
try:
self._vnc_subscribe_actions(oper_info)
except Exception:
svc_monitor.cgitb_error_log(self)
def _vnc_subscribe_actions(self, oper_info):
msg = "Notification Message: %s" % (pformat(oper_info))
self.logger.log_debug(msg)
obj_type = oper_info['type'].replace('-', '_')
obj_class = DBBaseSM.get_obj_type_map().get(obj_type)
if obj_class is None:
return
if oper_info['oper'] == 'CREATE':
obj_dict = oper_info['obj_dict']
obj_id = oper_info['uuid']
obj = obj_class.locate(obj_id)
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
elif oper_info['oper'] == 'UPDATE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
old_dt = None
if obj is not None:
old_dt = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
old_dt.evaluate(obj_type, obj)
else:
obj = obj_class.locate(obj_id)
obj.update()
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
if old_dt:
for resource, ids in old_dt.resources.items():
if resource not in dependency_tracker.resources:
dependency_tracker.resources[resource] = ids
else:
dependency_tracker.resources[resource] = list(
set(dependency_tracker.resources[resource]) |
set(ids))
elif oper_info['oper'] == 'DELETE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
if obj is None:
return
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
obj_class.delete(obj_id)
else:
# unknown operation
self.logger.log_error('Unknown operation %s' % oper_info['oper'])
return
if obj is None:
self.logger.log_error('Error while accessing %s uuid %s' % (
obj_type, obj_id))
return
for res_type, res_id_list in dependency_tracker.resources.items():
if not res_id_list:
continue
cls = DBBaseSM.get_obj_type_map().get(res_type)
if cls is None:
continue
for res_id in res_id_list:
res_obj = cls.get(res_id)
if res_obj is not None:
res_obj.evaluate()
|
i --targetname %s" % self.target
process.system(cmd)
cmd = "tgtadm --lld iscsi --op bind --mode target "
cmd += "--tid %s -I ALL" % self.emulated_id
process.system(cmd)
else:
target_strs = re.findall("Target\s+(\d+):\s+%s$" %
self.target, output, re.M)
self.emulated_id = target_strs[0].split(':')[0].split()[-1]
cmd = "tgtadm --lld iscsi --mode target --op show"
try:
output = process.system_output(cmd)
except process.CmdError: # In case service stopped
process.system("service tgtd restart")
output = process.system_output(cmd)
# Create a LUN with emulated image
if re.findall(self.emulated_image, output, re.M):
# Exist already
logging.debug("Exported image already exists.")
self.export_flag = True
else:
tgt_str = re.search(r'.*(Target\s+\d+:\s+%s\s*.*)$' % self.target,
output, re.DOTALL)
if tgt_str:
luns = len(re.findall("\s+LUN:\s(\d+)",
tgt_str.group(1), re.M))
else:
luns = len(re.findall("\s+LUN:\s(\d+)", output, re.M))
cmd = "tgtadm --mode logicalunit --op new "
cmd += "--tid %s --lld iscsi " % self.emulated_id
cmd += "--lun %s " % luns
cmd += "--backing-store %s" % self.emulated_image
process.system(cmd)
self.export_flag = True
self.luns = luns
# Restore selinux
if selinux_mode is not None:
utils_selinux.set_status(selinux_mode)
if self.chap_flag:
# Set CHAP authentication on the exported target
self.set_chap_auth_target()
# Set CHAP authentication for initiator to login target
if self.portal_visible():
self.set_chap_auth_initiator()
def delete_target(self):
"""
Delete target from host.
"""
cmd = "tgtadm --lld iscsi --mode target --op show"
output = process.system_output(cmd)
if re.findall("%s$" % self.target, output, re.M):
if self.emulated_id:
cmd = "tgtadm --lld iscsi --mode target --op delete "
cmd += "--tid %s" % self.emulated_id
process.system(cmd)
if self.restart_tgtd:
cmd = "service tgtd restart"
process.system(cmd)
class IscsiLIO(_IscsiComm):
"""
iscsi support class for LIO backend used in RHEL7.
"""
def __init__(self, params, root_dir):
"""
initialize LIO backend for iSCSI
:param params: parameters dict for LIO backend of iSCSI
"""
super(IscsiLIO, self).__init__(params, root_dir)
def get_target_id(self):
"""
Get target id from image name.
"""
cmd = "targetcli ls /iscsi 1"
target_info = process.system_output(cmd)
target = None
for line in re.split("\n", target_info)[1:]:
if re.findall("o-\s\S+\s[\.]+\s\[TPGs:\s\d\]$", line):
# eg: iqn.2015-05.com.example:iscsi.disk
try:
target = re.findall("iqn[\.]\S+:\S+", line)[0]
except IndexError:
logging.info("No found target in %s", line)
continue
else:
continue
cmd = "targetcli ls /iscsi/%s/tpg1/luns" % target
luns_info = process.system_output(cmd)
for lun_line in re.split("\n", luns_info):
if re.findall("o-\slun\d+", lun_line):
if self.emulated_image in lun_line:
break
else:
target = None
return target
def set_chap_acls_target(self):
"""
set CHAP(acls) authentication on a target.
it will require authentication
before an initiator is allowed to log in and access devices.
notice:
Individual ACL entries override common TPG Authentication,
which can be set by set_chap_auth_target().
"""
# Enable ACL nodes
acls_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
attr_cmd = "set attribute generate_node_acls=0"
process.system(acls_cmd + attr_cmd)
# Create user and allow access
acls_cmd = ("targetcli /iscsi/%s/tpg1/acls/ create %s:client"
% (self.target, self.target.split(":")[0]))
output = process.system_output(acls_cmd)
if "Created Node ACL" not in output:
raise exceptions.TestFail("Failed to create ACL. (%s)" % output)
comm_cmd = ("targetcli /iscsi/%s/tpg1/acls/%s:client/"
% (self.target, self.target.split(":")[0]))
# Set userid
userid_cmd = "%s set auth userid=%s" % (comm_cmd, self.chap_user)
output = process.system_output(userid_cmd)
if self.chap_user not in output:
raise exceptions.TestFail("Failed to set user. (%s)" % output)
# Set password
passwd_cmd = "%s set auth password=%s" % (comm_cmd, self.chap_passwd)
output = process.system_output(passwd_cmd)
if self.chap_passwd not in output:
raise exceptions.TestFail("Failed to set password. (%s)" % output)
# Save configuration
process.system("targetcli / saveconfig")
def set_chap_auth_target(self):
"""
set up authentication information for every single initiator,
which provides the capability to define common login information
for all Endpoints in a TPG
"""
auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target
attr_cmd = ("set attribute %s %s %s" %
("demo_mode_write_protect=0",
"generate_node_acls=1",
"cache_dynamic_acls=1"))
process.system(auth_cmd + attr_cmd)
# Set userid
userid_cmd = "%s set auth userid=%s" % (auth_cmd, self.chap_user)
output = process.system_output(userid_cmd)
if self.chap_user not in output:
raise exceptions.TestFail("Failed to set user. (%s)" % output)
# Set password
passwd_cmd = "%s set auth password=%s" % (auth_cmd, self.chap_passwd)
output = process.system_output(passwd_cmd)
if self.chap_passwd not in output:
raise exceptions.TestFail("Failed to set password. (%s)" % output)
# Save configuration
p | rocess.system("targetcli / saveconfig")
def export_target(self):
"""
Export target in localhost for emulated iscsi
"""
selinux_mode = None
# create image disk
if not os.path.isfile(self.emulated_image):
process.system(self.create_cmd)
else:
emulated_image_size = os.path.getsize(self.emulated_image) / 1024
if emulated_image_siz | e != self.emulated_expect_size:
# No need to remvoe, rebuild is fine
process.system(self.create_cmd)
# confirm if the target exists and create iSCSI target
cmd = "targetcli ls /iscsi 1"
output = process.system_output(cmd)
if not re.findall("%s$" % self.target, output, re.M):
logging.debug("Need to export target in host")
# Set selinux to permissive mode to make sure
# iscsi target export successfully
if utils_selinux.is_enforcing():
selinux_mode = utils_selinux.get_status()
utils_selinux.set_status("permissive")
# In fact, We've got two options here
#
# 1) Create a block backstore that usually provides the best
# performance. We can use a block device like /dev/sdb or
# a logical volume previously created,
# (lvcreate -name lv_iscsi -size 1G vg)
# 2) Create a fileio backstore,
# which enables the loca |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the projec | t root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause in | correct behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Cognitive Services error object.
:param error: The error body.
:type error: :class:`ErrorBody
<azure.mgmt.cognitiveservices.models.ErrorBody>`
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorBody'},
}
def __init__(self, error=None):
self.error = error
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
# catalog
service_table = sql.Table(
'service',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('type', sql.String(255)),
sql.Column('extra', sql.Text()))
service_table.create(migrate_engine, checkfirst=True)
endpoint_table = sql.Table(
'endpoint',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('region', sql.String(255)),
sql.Column('service_id',
sql.String(64),
sql.ForeignKey('service.id'),
nullable=False),
sql.Column('extra', sql.Text()))
endpoint_table.create(migrate_engine, checkfirst=True)
# identity
role_table = sql.Table(
'role',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(255), unique=True, nullable=False))
role_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name tenant_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='tenant_name_key'))
else:
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
tenant_table.create(migrate_engine, checkfirst=True)
metadata_table = sql.Table(
'metadata',
meta,
sql.Column('user_id', sql.String(64), primary_key=True),
sql.Column('tenant_id', sql.String(64), primary_key=True),
sql.Column('data', sql.Text()))
metadata_table.create(migrate_engine, checkfirst=True)
ec2_credential_table = sql.Table(
'ec2_credential',
meta,
sql.Column('access', sql.String(64), primary_key=True),
sql.Column('secret', sql.String(64)),
sql.Column('user_id', sql.String(64)),
sql.Column('tenant_id', sql.String(64)))
ec2_credential_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name user_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to al | ter a column to drop
# an unname | d unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='user_name_key'))
else:
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
user_table.create(migrate_engine, checkfirst=True)
user_tenant_membership_table = sql.Table(
'user_tenant_membership',
meta,
sql.Column(
'user_id',
sql.String(64),
sql.ForeignKey('user.id'),
primary_key=True),
sql.Column(
'tenant_id',
sql.String(64),
sql.ForeignKey('tenant.id'),
primary_key=True))
user_tenant_membership_table.create(migrate_engine, checkfirst=True)
# token
token_table = sql.Table(
'token',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('expires', sql.DateTime()),
sql.Column('extra', sql.Text()))
token_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['user_tenant_membership', 'token', 'user', 'tenant', 'role',
'metadata', 'ec2_credential', 'endpoint', 'service']
for t in tables:
table = sql.Table(t, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)
|
8')
return value
def unicode_http_header(value):
# Coerce HTTP header value to unicode.
if isinstance(value, six.binary_type):
return value.decode('iso-8859-1')
return value
def total_seconds(timedelta):
# TimeDelta.total_seconds() is only available in Python 2.7
if hasattr(timedelta, 'total_seconds'):
return timedelta.total_seconds()
else:
return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0)
def distinct(queryset, base):
if settings.DATABASES[queryset.db]["ENGINE"] == "django.db.backends.oracle":
# distinct analogue for Oracle users
return base.filter(pk__in=set(queryset.values_list('pk', flat=True)))
return queryset.distinct()
# Obtaining manager instances and names from model | options differs after 1.10.
def get_names_and_managers(options):
if django.VERSION >= (1, 10):
# Django 1.10 onwards provides a `.managers` property on the Options.
return [
(manager.name, manager)
for manager
in options.managers
]
# For Django 1.8 and 1.9, use the three-tuple information provided
# by .concrete_managers and .abstract_managers
return [
(manager_info[1], manager_info[2])
for manager | _info
in (options.concrete_managers + options.abstract_managers)
]
# field.rel is deprecated from 1.9 onwards
def get_remote_field(field, **kwargs):
if 'default' in kwargs:
if django.VERSION < (1, 9):
return getattr(field, 'rel', kwargs['default'])
return getattr(field, 'remote_field', kwargs['default'])
if django.VERSION < (1, 9):
return field.rel
return field.remote_field
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
resolved_model = apps.get_model(app_name, model_name)
if resolved_model is None:
msg = "Django did not return a model for {0}.{1}"
raise ImproperlyConfigured(msg.format(app_name, model_name))
return resolved_model
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
raise ValueError("{0} is not a Django model".format(obj))
def is_authenticated(user):
if django.VERSION < (1, 10):
return user.is_authenticated()
return user.is_authenticated
def is_anonymous(user):
if django.VERSION < (1, 10):
return user.is_anonymous()
return user.is_anonymous
def get_related_model(field):
if django.VERSION < (1, 9):
return _resolve_model(field.rel.to)
return field.remote_field.model
def value_from_object(field, obj):
if django.VERSION < (1, 9):
return field._get_val_from_obj(obj)
return field.value_from_object(obj)
# contrib.postgres only supported from 1.8 onwards.
try:
from django.contrib.postgres import fields as postgres_fields
except ImportError:
postgres_fields = None
# JSONField is only supported from 1.9 onwards
try:
from django.contrib.postgres.fields import JSONField
except ImportError:
JSONField = None
# coreapi is optional (Note that uritemplate is a dependency of coreapi)
try:
import coreapi
import uritemplate
except (ImportError, SyntaxError):
# SyntaxError is possible under python 3.2
coreapi = None
uritemplate = None
# coreschema is optional
try:
import coreschema
except ImportError:
coreschema = None
# django-filter is optional
try:
import django_filters
except ImportError:
django_filters = None
# django-crispy-forms is optional
try:
import crispy_forms
except ImportError:
crispy_forms = None
# requests is optional
try:
import requests
except ImportError:
requests = None
# Django-guardian is optional. Import only if guardian is in INSTALLED_APPS
# Fixes (#1712). We keep the try/except for the test suite.
guardian = None
try:
if 'guardian' in settings.INSTALLED_APPS:
import guardian
except ImportError:
pass
# PATCH method is not implemented by Django
if 'patch' not in View.http_method_names:
View.http_method_names = View.http_method_names + ['patch']
# Markdown is optional
try:
import markdown
if markdown.version <= '2.2':
HEADERID_EXT_PATH = 'headerid'
LEVEL_PARAM = 'level'
elif markdown.version < '2.6':
HEADERID_EXT_PATH = 'markdown.extensions.headerid'
LEVEL_PARAM = 'level'
else:
HEADERID_EXT_PATH = 'markdown.extensions.toc'
LEVEL_PARAM = 'baselevel'
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = [HEADERID_EXT_PATH]
extension_configs = {
HEADERID_EXT_PATH: {
LEVEL_PARAM: '2'
}
}
md = markdown.Markdown(
extensions=extensions, extension_configs=extension_configs
)
return md.convert(text)
except ImportError:
apply_markdown = None
markdown = None
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
def pygments_highlight(text, lang, style):
lexer = get_lexer_by_name(lang, stripall=False)
formatter = HtmlFormatter(nowrap=True, style=style)
return pygments.highlight(text, lexer, formatter)
def pygments_css(style):
formatter = HtmlFormatter(style=style)
return formatter.get_style_defs('.highlight')
except ImportError:
pygments = None
def pygments_highlight(text, lang, style):
return text
def pygments_css(style):
return None
try:
import pytz
from pytz.exceptions import InvalidTimeError
except ImportError:
InvalidTimeError = Exception
# `separators` argument to `json.dumps()` differs between 2.x and 3.x
# See: http://bugs.python.org/issue22767
if six.PY3:
SHORT_SEPARATORS = (',', ':')
LONG_SEPARATORS = (', ', ': ')
INDENT_SEPARATORS = (',', ': ')
else:
SHORT_SEPARATORS = (b',', b':')
LONG_SEPARATORS = (b', ', b': ')
INDENT_SEPARATORS = (b',', b': ')
try:
# DecimalValidator is unavailable in Django < 1.9
from django.core.validators import DecimalValidator
except ImportError:
DecimalValidator = None
def set_rollback():
if hasattr(transaction, 'set_rollback'):
if connection.settings_dict.get('ATOMIC_REQUESTS', False):
# If running in >=1.6 then mark a rollback as required,
# and allow it to be handled by Django.
if connection.in_atomic_block:
transaction.set_rollback(True)
elif transaction.is_managed():
# Otherwise handle it explicitly if in managed mode.
if transaction.is_dirty():
transaction.rollback()
transaction.leave_transaction_management()
else:
# transaction not managed
pass
def template_render(template, context=None, request=None):
"""
Passing Context or RequestContext to Template.render is deprecated in 1.9+,
see https://github.com/django/django/pull/3883 and
https://github.com/django/django/blob/1.9/django/template/backends/django.py#L82-L84
:param template: Template instance
:param context: dict
:param request: Request instance
:return: rendered template as SafeText instance
"""
if isinstance(template, Template):
if request:
context = RequestContext(request, context)
else:
context = Context(context)
return template.render(context)
# backends template, e.g. django.template.backends.django.Template
|
""" generic tests from the Datetimelike class """
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import Series, Index, DatetimeIndex, date_range
from ..datetimelike import DatetimeLike
class TestDatetimeIndex(DatetimeLike):
_holder = DatetimeIndex
def setup_method(self, method):
self.indices = dict(index=tm.makeDateIndex(10),
index_dec=date_range('20130110', periods=10,
freq='-1D'))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01- | 10',
'2013-01-11'], freq='D')
tm.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
| for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
assert tm.equalContents(result, everything)
|
# -*- coding: utf-8 -*-
#---------------------------- | --------------------------------------------------
# Copyright (c) 2012-2014, Michael Reuter
# Distributed under the MIT License. See LICENSE.txt for more information.
#------------------------------------------------------------------------------
from .moon_info import MoonInfo
from .observing_site import Obser | vingSite
class ObservingInfo(object):
'''
This class is responsible for keeping the observing site information and
the moon information object together. It will be responsible for updating
any of the observing site information that then affects the moon
information.
'''
__shared_state = {"obs_site": ObservingSite(),
"moon_info": MoonInfo()}
def __init__(self):
'''
Constructor
'''
self.__dict__ = self.__shared_state
def update(self):
self.moon_info.compute(self.obs_site.getObserver())
if __name__ == "__main__":
oi = ObservingInfo()
oi.update()
print oi.obs_site
import time
time.sleep(2)
oi2 = ObservingInfo()
oi2.update()
print oi2.obs_site |
import os
import platform
import sys
import threading
from concurrent.futures import ThreadPoolExecutor
from os import environ, path
from threading import Timer
import grpc
import ptvsd
from getgauge import handlers, logger, processor
from getgauge.impl_loader import copy_skel_files
from getgauge.messages import runner_pb2_grpc
from getgauge.static_loader import load_files
from getgauge.util import get_step_impl_dirs
PLUGIN_JSON = 'python.json'
VERSION = 'version'
ATTACH_DEBUGGER_EVENT = 'Runner Ready for Debugging'
def main():
logger.info("Python: {}".format(platform.python_version()))
if sys.argv[1] == "--init":
logger.debug("Initilizing gauge project.")
copy_skel_files()
else:
load_implementations()
start()
def load_implementations():
d = get_step_impl_dirs()
logger.debug(
"Loading step implemetations from {} dirs.".format(', '.join(d)))
for impl_dir in d:
if not path.exists(impl_dir):
logger.error('can not load implementations from {}. {} does not exist.'.format(
impl_ | dir, impl_dir))
load_files(d)
def _handle_detached():
logger.info("No debugger attached. Stopping the execution.")
os._exit(1)
def start():
if environ.get('DEBUGGING'):
ptvsd.enable_attach(address=(
'127.0.0.1', int(environ.get('DEBUG_PORT'))))
print(ATTACH_DEBUGGER_EVENT)
t = Timer(int(environ.get("debugger_wait_time", 30)), _hand | le_detached)
t.start()
ptvsd.wait_for_attach()
t.cancel()
logger.debug('Starting grpc server..')
server = grpc.server(ThreadPoolExecutor(max_workers=1))
p = server.add_insecure_port('127.0.0.1:0')
handler = handlers.RunnerServiceHandler(server)
runner_pb2_grpc.add_RunnerServicer_to_server(handler, server)
logger.info('Listening on port:{}'.format(p))
server.start()
t = threading.Thread(
name="listener", target=handler.wait_for_kill_event)
t.start()
t.join()
os._exit(0)
if __name__ == '__main__':
main()
|
impo | rt tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingAverage'] , ['Seasonal_WeekOfYear'] , | ['NoAR'] ); |
import argparse
import taichi as ti
FRAMES = 100
def tes | t_ad_gravity():
from taichi.examples.simulation.ad_gravity import init, substep
init()
for _ in range(FRAMES):
for _ in range(50):
substep()
def video_ad_gravity(result_dir):
import numpy as np
from taichi.examples.simulation.ad_gravity import init, substep, x
video_manager = ti.t | ools.VideoManager(output_dir=result_dir,
framerate=24,
automatic_build=False)
gui = ti.GUI('Autodiff gravity', show_gui=False)
init()
for _ in range(FRAMES):
for _ in range(50):
substep()
gui.circles(x.to_numpy(), radius=3)
video_manager.write_frame(gui.get_image())
gui.clear()
video_manager.make_video(mp4=True, gif=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate ad_gravity video')
parser.add_argument('output_directory',
help='output directory of generated video')
video_ad_gravity(parser.parse_args().output_directory)
|
"""This script automates the copying of the default keymap into your own keymap.
"""
import shutil
from pathlib import Path
import qmk.path
from qmk.decorators import automagic_keyboard, automagic_keymap
from milc import cli
@cli.argument('-kb', '--keyboard', help='Specify keyboard name. Example: 1upkeyboards/1up60hse')
@cli.argument('-km', '--keymap', help='Specify the name for the new keymap directory')
@cli.subcommand('Creates a new keymap for the keyboard of your choosing')
@automagic_keyboard
@automagic_keymap
def new_keymap(cli):
"""Creates a new keymap for the keyboard of your choosing.
"""
# ask for user input if keyboard or keymap was not provided in the command line
keyboard = cli.config.new_keymap.keyboard if cli.config.new_keymap.keyboard else input("Keyboard Name: ")
keymap = cli.config.new_keymap.keymap if cli.config.new_keymap.keymap else input("Keymap Name: ")
# generate keymap paths
kb_path = Path('keyboards') / keyboard
keymap_path = qmk.path.keymap(keyboard)
keymap_path_default = keymap_path / 'default'
keymap_path_new = keymap_path / keymap
# check directories
if not kb_path.exists():
cli.log.error('Keyboard %s does not exist!', kb_path)
return False
if not keymap_path_default.exists():
cli.log.error('Keyboard defau | lt %s | does not exist!', keymap_path_default)
return False
if keymap_path_new.exists():
cli.log.error('Keymap %s already exists!', keymap_path_new)
return False
# create user directory with default keymap files
shutil.copytree(keymap_path_default, keymap_path_new, symlinks=True)
# end message to user
cli.log.info("%s keymap directory created in: %s", keymap, keymap_path_new)
cli.log.info("Compile a firmware with your new keymap by typing: \n\n\tqmk compile -kb %s -km %s\n", keyboard, keymap)
|
egg\Z')
def __new__(cls, state, bssid, ssid='', regdtm='19000101000000', rssi=-200, bregap=False, bmap=False, optrcom='none', geoloc=None, priority=Priority.NORMAL):
# Classify WiFi
try:
if ssid not in ('', None):
ssid = re.sub(r'^\s*"(.*)"\s*$', r'\1', unicode(ssid))
if ssid.find('"') >= 0:
log.error("!!! SSID - %s" % ssid)
if cls.isHotspot(ssid):
priority = Priority.LOW
else:
optrcom = cls.getWiFiOperator(ssid)
bregap = True if optrcom != 'none' else False
if not bregap:
bmap = cls.isMobile(ssid)
try:
ssid = MySQLdb.escape_string(unicode(ssid).encode('utf-8'))
except Exception, e:
# Non-ascii data.
log.warn("SSID MySQLdb.escape_string Error - %s, %s" % (ssid, e))
if not geoloc:
geoloc = GeoInfo()
except Exception, e:
log.error(e)
log.error('BSSID - %s, SSID - %s' % (bssid, ssid))
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error(traceback.format_exception(exc_type, exc_value, exc_traceback))
raise e
return super(WiFiNode, cls).__new__(cls, state, bssid, ssid, rssi, regdtm, bregap, bmap, optrcom, geoloc, priority)
@classmethod
def isHotspot(cls, ssid):
patt = r'%s' % '|'.join(cls.__hotspotApSSIDPattern)
if re.match(patt, ssid, re.IGNORECASE):
#log.info("%s - Hotspot SSID, drop this AP" % ssid)
return True
@classmethod
def getWiFiOperator(cls, ssid):
for provider in cls.__registerdApSSIDPattern.keys():
patt = r'%s' % '|'.join(cls.__registerdApSSIDPattern[provider])
if re.match(patt, ssid, re.IGNORECASE):
#log.info("Registered SSID - %s" % ssid)
return provider
return 'none'
@classmethod
def isMobile(cls, ssid):
patt = r'%s' % '|'.join(cls.__mobileApSSIDPattern)
if re.search(patt, ssid, re.IGNORECASE):
#log.info("Mobile AP - %s" % ssid)
return True
return False
class CellNode(collections.namedtuple('_CellNode', 'state, cellid, plmnid, cid, lac, celltype, regdtm, geoloc, priority')):
def __new__(cls, state, cellid, celltype=0, regdtm='19000101000000', geoloc=None, priority=Priority.NORMAL):
# add default values
try:
plmnid, cid, lac = cellid.split('_')
# guard from invalid data
if len(plmnid) > 6 or int(plmnid) == 0:
plmnid = '0'
if not geoloc:
geoloc = GeoInfo()
except Exception, e:
raise e
return super(CellNode, cls).__new__(cls, state, cellid, plmnid, cid, lac, celltype, regdtm, geoloc, priority)
def addWiFi(cursor, node):
strSql = """INSERT INTO
apmain.apinfo (bssid, ssid, regdtm, bregap, bmap, lat, lng, acc, geosrc, optrcom, seq)
VALUES('%s','%s','%s','%d','%d','%f','%f','%d',' | %s','%s','%s')
ON DUPLICATED UPDATE
lat = IF(VALUES(seq) > seq, VALUES(lat), | lat),
lng = IF(VALUES(seq) > seq, VALUES(lng), lng),
seq = IF(VALUES(seq) > seq, VALUES(seq), seq),
acc = IF(VALUES(seq) > seq, VALUES(acc), acc),
geosrc=VALUES(geosrc)"""
try:
strSql = strSql % (node.bssid, node.ssid, node.regdtm, int(node.bregap), int(node.bmap), node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, node.geoloc.geosrc, node.optrcom, node.rssi)
except Exception, e:
log.error("SQL GEN ERR - %s" % bytes(node.ssid))
strSql = strSql % (node.bssid, '', node.regdtm, int(node.bregap), int(node.bmap), node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, node.geoloc.geosrc, node.optrcom, node.rssi)
try:
cursor.execute(strSql)
log.debug("INSERT - %s" % node.bssid)
except Exception, e:
# Duplicate entry error
if e[0] != 1062:
log.error(e)
log.error(strSql)
return False
return True
netTypeCode = {'gsm':1, 'cdma':2, 'lte':3}
def addCellTower(cursor, node):
strSql = """INSERT INTO
apmain.cellinfo (fullid, plmnid, cellid, lac, celltype, regdtm, lat, lng, acc, geosrc, seq)
VALUES('%s','%s','%s','%s','%d','%s','%s','%f','%f','%s', '1')
ON DUPLICATED UPDATE lat=((lat*seq)+VALUES(lat))/(seq+1), lng=((lng*seq)+VALUES(lng))/(seq+1), seq=seq+1, geosrc=VALUES(geosrc)"""
try:
strSql = strSql % (node.cellid, node.plmnid, node.cid, node.lac, 0, node.regdtm, node.geoloc.lat, node.geoloc.lng, node.geoloc.acc, 'cellLoc' if node.geoloc.from_cell else node.geoloc.geosrc)
cursor.execute(strSql)
log.debug("INSERT - %s" % node.cellid)
except Exception, e:
# Duplicate entry error
if e[0] != 1062:
log.error(e)
log.error(strSql)
return False
return True
class ProcessNetworkNode(object):
OW_TASK_SUBSCRIBE_EVENTS = ['evtPlayerLog', 'evtNetworkLog']
OW_TASK_PUBLISH_EVENTS = []
OW_USE_HASHING = False
OW_HASH_KEY = None
OW_NUM_WORKER = 8
def publishEvent(self, event, params):
# THIS METHOD WILL BE OVERRIDE
# DO NOT EDIT THIS METHOD
pass
def __makeCellId(self, plmnid, cid, lac):
try:
cellId = map(lambda x: str(x) if str(x).isdigit() else '0', [plmnid, cid, lac])
if 0 not in map(int, cellId) and len(cellId[0]) < 7:
return '_'.join(cellId)
except Exception, e:
log.error(e)
return None
def extractNetworkNode(self, params):
# net info structure
# wifi : 'wifi', status, ssid, bssid
# cell : 'cell', status, celltower id
# status : 'active' for current network, 'inactive' for logged network
timestamp = time.strftime('%Y%m%d%H%M%S', time.gmtime(params['tTM']))
logType = params.get('log_type', 'unknown')
netList = UniqueList()
netList.setKey(key=lambda x: x.cellid if isinstance(x, CellNode) else x.bssid)
if 'lat' in params and 'lng' in params:
geoloc = GeoInfo(lat=params.get('lat'), lng=params.get('lng'), acc=params.get('accuracy', 500), geosrc='device')
else:
geoloc = None
# APAT Header fields
try:
if 'pwf' in params:
pwf = params['pwf']
if 'bssid' in pwf and EthAddrType.isEthAddr(pwf['bssid']):
node = WiFiNode(state='active', bssid=pwf['bssid'], ssid=pwf.get('ssid', ''), regdtm=timestamp, geoloc=geoloc)
netList.addSet(node)
except Exception, e:
log.error(e)
log.error(params)
try:
if 'pcell_list' in params and isinstance(params['pcell_list'], list) and len(params['pcell_list']) > 0:
pcell = params['pcell_list'][0]
if 'cid' in pcell and 'lac' in pcell:
cellId = self.__makeCellId(int("%03d%02d" % (pcell.get('mcc', 0), pcell.get('mnc', 0))), pcell.get('cid'), pcell.get('lac'))
if cellId:
if 'ctype' in pcell and str(pcell['ctype']).isdigit():
ctype = int(pcell.get('ctype', -1)) + 1 # -1 : Unknown
cellType = ctype if ctype in netTypeCode.values() else 0
else:
cellType = 0
node = CellNode(state='active', cellid=cellId, celltype=cellType, regdtm=timestamp, geoloc=geoloc, priority=Priority.HIGH)
netList.addSet(node)
except Exception, e:
log.error(e)
log.error(params)
return netList
def handler(self, params):
# Event Key/Value 인 경우, 무시
if 'evtKey' in par |
# -*- coding: utf8 -*-
| SQL = (
('list_fonds_report1', """
select
F.FKOD,F.FNAME, (F.A16+if(F.A22,A22,0)) as A16
FROM
`af3_fond` F
WHERE
FNAME like ('%%%(qr)s%%') or A1 like ('%%%(qr)s%%')
ORDER BY FKOD;"""),
)
FOUND_ROWS = True
ROOT = "fonds"
ROOT_PREFIX = None
ROOT_POSTFIX= None
XSL_TEMPLATE = "data/af-web.xsl"
EVENT = None
WHERE = ()
PARAM = ("qr",)
TITLE="Поиск фондов"
MESSAGE="Нет результатов по вашему запросу, верни | тесь назад"
ORDER = None
|
from patchs import PatchsBackend
from registry import register_backend
TEST_DB_WITHOUT_COMMITS = bool(int(os.environ.get('TEST_DB_WITHOUT_COMMITS') or 0))
TEST_DB_DESACTIVATE_GIT = bool(int(os.environ.get('TEST_DB_DESACTIVATE_GIT') or 0))
class Heap(object):
"""
This object behaves very much like a sorted dict, but for security only a
subset of the dict API is exposed:
>>> len(heap)
>>> heap[path] = value
>>> value = heap.get(path)
>>> path, value = heap.popitem()
The keys are relative paths as used in Git trees, like 'a/b/c' (and '' for
the root).
The dictionary is sorted so deeper paths are considered smaller, and so
returned first by 'popitem'. The order relation between two paths of equal
depth is undefined.
This data structure is used by RWDatabase._save_changes to build the tree
objects before commit.
"""
def __init__(self):
self._dict = {}
self._heap = []
def __len__(self):
return len(self._dict)
def get(self, path):
return self._dict.get(path)
def __setitem__(self, path, value):
if path not in self._dict:
n = -path.count('/') if path else 1
heappush(self._heap, (n, path))
self._dict[path] = value
def popitem(self):
key = heappop(self._heap)
path = key[1]
return path, self._dict.pop(path)
class GitBackend(object):
def __init__(self, path, fields, read_only=False):
self.nb_transactions = 0
self.last_transaction_dtime = None
self.path = abspath(path) + '/'
self.fields = fields
self.read_only = read_only
# Open database
self.path_data = '%s/database/' % self.path
# Check if is a folder
self.path_data = '%s/database/' % self.path
if not lfs.is_folder(self.path_data):
error = '"{0}" should be a folder, but it is not'.format(self.path_data)
raise ValueError(error)
# New interface to Git
self.worktree = open_worktree(self.path_data)
# Initialize the database, but chrooted
self.fs = lfs.open(self.path_data)
# Static FS
database_static_path = '{0}/database_static'.format(path)
if not lfs.exists(database_static_path):
self.init_backend_static(path)
self.static_fs = lfs.open(database_static_path)
# Patchs backend
self.patchs_backend = PatchsBackend(path, self.fs, read_only)
# Catalog
self.catalog = self.get_catalog()
@classmethod
def init_backend(cls, path, fields, init=False, soft=False):
# Metadata database
init_repository('{0}/database'.format(path), bare=False)
# Init backend static
cls.init_backend_static(path)
# Make catalog
make_catalog('{0}/catalog'.format(path), fields)
@classmethod
def init_backend_static(cls, path):
# Static database
lfs.make_folder('{0}/database_static'.format(path))
lfs.make_folder('{0}/database_static/.history'.format(path))
#######################################################################
# Database API
#######################################################################
def normalize_key(self, path, __root=None):
# Performance is critical so assume the path is already relative to
# the repository.
key = __root.resolve(path)
if key and key[0] == '.git':
err = "bad '{0}' path, access to the '.git' folder is denied"
raise ValueError(err.format(path))
return '/'.join(key)
def handler_exists(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.exists(key)
def get_handler_names(self, key):
return self.fs.get_names(key)
def get_handler_data(self, key):
if not key:
return None
fs = self.get_handler_fs_by_key(key)
with fs.open(key) as f:
return f.read()
def get_handler_mimetype(self, key):
data = self.get_handler_data(key)
return magic_from_buffer(data)
def handler_is_file(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.is_file(key)
def handler_is_folder(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.is_folder(key)
def get_handler_mtime(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.get_mtime(key)
def save_handler(self, key, handler):
data = handler.to_str()
# Save the file
fs = self.get_handler_fs(handler)
# Write and truncate (calls to "_save_state" must be done with the
# pointer pointing to the beginning)
if not fs.exists(key):
with fs.make_file(key) as f:
f.write(data)
f.truncate(f.tell())
else:
with fs.open(key, 'w') as f:
f.write(data)
f.truncate(f.tell())
# Set dirty = None
handler.timestamp = self.get_handler_mtime(key)
handler.dirty = None
def traverse_resources(self):
raise NotImplementedError
def get_handler_fs(self, handler):
if isinstance(handler, Metadata):
return self.fs
return self.static_fs
def get_handler_fs_by_key(self, key):
if key.endswith('metadata'):
return self.fs
return self.static_fs
def add_handler_into_static_history(self, key):
the_time = datetime.now().strftime('%Y%m%d%H%M%S')
new_key = '.history/{0}.{1}.{2}'.format(key, the_time, uuid4())
parent_path = dirname(new_key)
if not self.static_fs.exists(parent_path):
self.static_fs.make_folder(parent_path)
self.static_fs.copy(key, new_key)
def do_transaction(self, commit_message, data, added, changed, removed, handlers,
docs_to_index, docs_to_unindex):
git_author, git_date, git_msg, docs_to_index, docs_to_unindex = data
# Statistics
self.nb_transactions += 1
# Add static changed & removed files to ~/database_static/.history/
changed_and_removed = list(changed) + list(removed)
for key in changed_and_removed:
if not key.endswith('metadata'):
self.add_handler_into_static_history(key)
# Create patch if there's changed
if added or changed or removed:
self.patchs_backend.create_patch(added, changed, removed, handlers, git_author)
else:
# it's a catalog transaction, we have to do nothing
pass
# Added and changed
added_and_changed = list(added) + list(changed)
for key in added_and_changed:
handler = handlers.get(key)
parent_path = dirname(key)
fs = self.get_handler_fs(handler)
if not fs.exists(parent_path):
fs.make_folder(parent_path)
self.save_handler(key, handler)
# Remove files (if not removed via git-rm)
for key in removed:
if not key.endswith('metadata') or TEST_DB_WITHOUT_COMMITS:
fs = self.get_handler_fs_by_key(key)
fs.remove(key)
# Do git transaction for metadata
if not TEST_DB_WITHOUT_COMMITS:
self.do_git_transaction(commit_message, data, added, changed, removed, handlers)
else:
# Commit at start
if not self.last_transaction_dtime:
self.do_git_big_commit()
else:
now = datetime.now()
t = now.time()
is_night = | time(21, 00) < t or t < time(06, 00)
done_recently = now - self.last_transaction_dtime < timedelta(minutes=120)
if is_night and not done_recently:
| self.do_git_big_commit()
# Catalog
for path in docs_to_unindex:
self.catalog.unindex_document(path)
for resource, values in docs_to_index:
self.catalog.index_document(values)
self.catalog.save_changes()
def |
twice (retry after fail)
self.assertEqual(self.cluster_manager.cluster_env_id, "correct")
self.assertEqual(self.sdk.call_counter["search_cluster_environments"], 1)
self.assertEqual(self.sdk.call_counter["create_cluster_environment"], 1)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterEnvNotFound(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Environment build not found
self.sdk.returns["list_cluster_environment_builds"] = APIDict(results=[])
with self.assertRaisesRegex(ClusterEnvBuildError, "No build found"):
self.cluster_manager.build_cluster_env(timeout=600)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterEnvPreBuildFailed(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Build failed on first lookup
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
)
]
)
with self.assertRaisesRegex(ClusterEnvBuildError, "Cluster env build failed"):
self.cluster_manager.build_cluster_env(timeout=600)
self.assertFalse(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertEqual(len(self.sdk.call_counter), 1)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterEnvPreBuildSucceeded(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# (Second) build succeeded
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
),
APIDict(
id="build_succeeded",
status="succeeded",
created_at=1,
),
]
)
self.cluster_manager.build_cluster_env(timeout=600)
self.assertTrue(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.cluster_manager.cluster_env_build_id, "build_succeeded")
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertEqual(len(self.sdk.call_counter), 1)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterBuildFails(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Build, but fails after 300 seconds
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
),
APIDict(
id="build_succeeded",
status="pending",
created_at=1,
),
]
)
with freeze_time() as frozen_time, self.assertRaisesRegex(
ClusterEnvBuildError, "Cluster env build failed"
):
self.sdk.returns["get_build"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(status="in_progress")),
after=APIDict(result=APIDict(status="failed")),
)
self.cluster_manager.build_cluster_env(timeout=600)
self.assertFalse(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertGreaterEqual(self.sdk.call_counter["get_build"], 9)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterEnvBuildTimeout(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Build, but timeout after 100 seconds
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
),
APIDict(
id="build_succeeded",
status="pending",
created_at=1,
),
]
)
with freeze_time() as frozen_time, self.assertRaisesRegex(
ClusterEnvBuildTimeout, "Time out when building cluster env"
):
self.sdk.returns["get_build"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(status="in_progress")),
after=APIDict(result=APIDict(status="succeeded")),
)
self.cluster_manager.build_cluster_env(timeout=100)
self.assertFalse(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertGreaterEqual(self.sdk.call_counter["get_build"], 9)
self.assertEqual(len(self.sdk.call_counter), 2)
@patch("time.sleep", lambda *a, **kw: None)
def testBuildClusterBuildSucceed(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.cluster_env_id = "correct"
# Build, succeed after 300 seconds
self.cluster_manager.cluster_env_build_id = None
self.sdk.reset()
self.sdk.returns["list_cluster_environment_builds"] = APIDict(
results=[
APIDict(
id="build_failed",
status="failed",
created_at=0,
),
APIDict(
id="build_succeeded",
status="pending",
created_at=1,
),
]
)
with freeze_time() as frozen_time:
self.sdk.returns["get_build"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(status="in_progress")),
after=APIDict(result=APIDict(status="succeeded")),
)
self.cluster_manager.build_cluster_env(timeout=600)
self.assertTrue(self.cluster_manager.cluster_env_build_id)
self.assertEqual(self.sdk.call_counter["list_cluster_environment_builds"], 1)
self.assertGreaterEqual(self.sdk.call_counter["get_build"], 9)
self.assertEqual(len(self.sdk.call_counter), 2)
class FullSessionManagerTest(MinimalSessionManagerTest):
cls = FullClusterManager
def testSessionStartCreationError(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id | = "correct"
self.sdk.returns["create_cluster"] = _fail
with self.assertRaises(ClusterCreationError):
self.cluster_manager.start_cluster()
def testSessionStartStartupError(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_ma | nager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = _fail
with self.asser |
from datetime import time
from datetime import timedelta
import pendulum
from .constants import SECS_PER_HOUR
from .constants import SECS_PER_MIN
from .constants import USECS_PER_SEC
from .duration import AbsoluteDuration
from .duration import Duration
from .mixins.default import FormattableMixin
class Time(FormattableMixin, time):
"""
Represents a time instance as hour, minute, second, microsecond.
"""
# String formatting
def __repr__(self):
us = ""
if self.microsecond:
us = f", {self.microsecond}"
tzinfo = ""
if self.tzinfo:
tzinfo = ", tzinfo={}".format(repr(self.tzinfo))
|
return "{} | ({}, {}, {}{}{})".format(
self.__class__.__name__, self.hour, self.minute, self.second, us, tzinfo
)
# Comparisons
def closest(self, dt1, dt2):
"""
Get the closest time from the instance.
:type dt1: Time or time
:type dt2: Time or time
:rtype: Time
"""
dt1 = self.__class__(dt1.hour, dt1.minute, dt1.second, dt1.microsecond)
dt2 = self.__class__(dt2.hour, dt2.minute, dt2.second, dt2.microsecond)
if self.diff(dt1).in_seconds() < self.diff(dt2).in_seconds():
return dt1
return dt2
def farthest(self, dt1, dt2):
"""
Get the farthest time from the instance.
:type dt1: Time or time
:type dt2: Time or time
:rtype: Time
"""
dt1 = self.__class__(dt1.hour, dt1.minute, dt1.second, dt1.microsecond)
dt2 = self.__class__(dt2.hour, dt2.minute, dt2.second, dt2.microsecond)
if self.diff(dt1).in_seconds() > self.diff(dt2).in_seconds():
return dt1
return dt2
# ADDITIONS AND SUBSTRACTIONS
def add(self, hours=0, minutes=0, seconds=0, microseconds=0):
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from .datetime import DateTime
return (
DateTime.EPOCH.at(self.hour, self.minute, self.second, self.microsecond)
.add(
hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds
)
.time()
)
def subtract(self, hours=0, minutes=0, seconds=0, microseconds=0):
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from .datetime import DateTime
return (
DateTime.EPOCH.at(self.hour, self.minute, self.second, self.microsecond)
.subtract(
hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds
)
.time()
)
def add_timedelta(self, delta):
"""
Add timedelta duration to the instance.
:param delta: The timedelta instance
:type delta: datetime.timedelta
:rtype: Time
"""
if delta.days:
raise TypeError("Cannot add timedelta with days to Time.")
return self.add(seconds=delta.seconds, microseconds=delta.microseconds)
def subtract_timedelta(self, delta):
"""
Remove timedelta duration from the instance.
:param delta: The timedelta instance
:type delta: datetime.timedelta
:rtype: Time
"""
if delta.days:
raise TypeError("Cannot subtract timedelta with days to Time.")
return self.subtract(seconds=delta.seconds, microseconds=delta.microseconds)
def __add__(self, other):
if not isinstance(other, timedelta):
return NotImplemented
return self.add_timedelta(other)
def __sub__(self, other):
if not isinstance(other, (Time, time, timedelta)):
return NotImplemented
if isinstance(other, timedelta):
return self.subtract_timedelta(other)
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError("Cannot subtract aware times to or from Time.")
other = self.__class__(
other.hour, other.minute, other.second, other.microsecond
)
return other.diff(self, False)
def __rsub__(self, other):
if not isinstance(other, (Time, time)):
return NotImplemented
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError("Cannot subtract aware times to or from Time.")
other = self.__class__(
other.hour, other.minute, other.second, other.microsecond
)
return other.__sub__(self)
# DIFFERENCES
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration
"""
if dt is None:
dt = pendulum.now().time()
else:
dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)
us1 = (
self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second
) * USECS_PER_SEC
us2 = (
dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second
) * USECS_PER_SEC
klass = Duration
if abs:
klass = AbsoluteDuration
return klass(microseconds=us2 - us1)
def diff_for_humans(self, other=None, absolute=False, locale=None):
"""
Get the difference in a human readable format in the current locale.
:type other: Time or time
:param absolute: removes time difference modifiers ago, after, etc
:type absolute: bool
:param locale: The locale to use for localization
:type locale: str
:rtype: str
"""
is_now = other is None
if is_now:
other = pendulum.now().time()
diff = self.diff(other)
return pendulum.format_diff(diff, is_now, absolute, locale)
# Compatibility methods
def replace(
self, hour=None, minute=None, second=None, microsecond=None, tzinfo=True
):
if tzinfo is True:
tzinfo = self.tzinfo
hour = hour if hour is not None else self.hour
minute = minute if minute is not None else self.minute
second = second if second is not None else self.second
microsecond = microsecond if microsecond is not None else self.microsecond
t = super().replace(hour, minute, second, microsecond, tzinfo=tzinfo)
return self.__class__(
t.hour, t.minute, t.second, t.microsecond, tzinfo=t.tzinfo
)
def __getnewargs__(self):
return (self,)
def _get_state(self, protocol=3):
tz = self.tzinfo
return (self.hour, self.minute, self.second, self.microsecond, tz)
def __reduce__(self):
return self.__reduce_ex__(2)
def __reduce_ex__(self, protocol):
return self.__class__, self._get_state(protocol)
Time.min = Time(0, 0, 0)
Time.max = Time(23, 59, 59, 999999)
Time.resolution = Duration(microseconds=1)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Right | s Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ap | plicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
__all__ = []
class LoggerFactory:
@staticmethod
def build_logger(name=None, level=logging.INFO):
assert name is not None, "name for logger should not be None"
formatter = logging.Formatter(
"%(asctime)s-%(levelname)s: "
"[%(filename)s:%(lineno)d:%(funcName)s] %(message)s")
_logger = logging.getLogger(name)
_logger.setLevel(level)
_logger.propagate = False
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
handler.setLevel(level)
_logger.addHandler(handler)
return _logger
logger = LoggerFactory.build_logger(name="HybridParallel", level=logging.INFO)
def layer_to_str(base, *args, **kwargs):
name = base + "("
if args:
name += ", ".join(str(arg) for arg in args)
if kwargs:
name += ", "
if kwargs:
name += ", ".join("{}={}".format(key, str(value))
for key, value in kwargs.items())
name += ")"
return name
|
# -*- coding: utf-8 -*-
#
# This file is part of the Christine project
#
# Copyright (c) 2006-2007 Marco Antonio Islas Cruz
#
# Christine is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Christine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# alo | ng with this program; if not, write to the Free Software
# | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# @category libchristine
# @package Share
# @author Miguel Vazquez Gocobachi <demrit@gnu.org>
# @author Marco Antonio Islas Cruz <markuz@islascruz.org>
# @copyright 2007-2009 Christine Development Group
# @license http://www.gnu.org/licenses/gpl.txt
#import gtk.glade
# @author Miguel Vazquez Gocobachi <demrit@gnu.org>
from libchristine.Validator import *
from libchristine.pattern.Singleton import Singleton
from libchristine.gui.GtkMisc import glade_xml
from libchristine.globalvars import DATADIR, SHARE_PATH
from libchristine.Logger import LoggerManager
from libchristine.options import options
import time
import os
import gtk
import sys
import gobject
#
# Share class manager for images, glade
# templates and more files
#
# @author Miguel Vazquez Gocobachi <demrit@gnu.org>
class Share(Singleton):
"""
Share class manager for images, glade
templates and more files
"""
#
# Directory where we have template files
#
# @var string
__PathTemplate = None
#
# Directory where we have images
#
# @var string
__PathPixmap = None
def __init__(self):
"""
Constructor
"""
self.setName('Share')
self.__logger = LoggerManager().getLogger('Share')
self.__PathTemplate = os.path.join(SHARE_PATH, 'gui')
self.__PathPixmap = os.path.join(self.__PathTemplate, 'pixmaps')
#self.__Pixmaps, used to store a pixmap. if it is here then reuse it
#instead of creating another one from the same faile
self.__Pixmaps = {}
gobject.timeout_add(1000, self.check_pixmap_time_access)
def getTemplate(self, file, root = None):
"""
Gets glade template
@param string file: file to load
@param string root: root widget to return instead the main window
"""
if file:
file = ''.join([file, '.glade'])
if isFile(os.path.join(self.__PathTemplate, file)):
return glade_xml(os.path.join(self.__PathTemplate, file),root)
self.__logger.warning('File %s was not found'%(os.path.join(self.__PathTemplate, file)))
return None
def getImage(self, name):
"""
Gets image as path string
"""
if ((not isNull(file)) or (isStringEmpty(name))):
if (isFile(os.path.join(self.__PathPixmap, name+'.png'))):
return os.path.join(self.__PathPixmap, name+'.png')
elif (isFile(os.path.join(self.__PathPixmap, name+ '.svg'))):
return os.path.join(self.__PathPixmap, name+'.svg')
return None
def getImageFromPix(self, name):
"""
Gets image from pixbuf
"""
icon_theme = gtk.icon_theme_get_default()
if icon_theme.has_icon(name):
pixbuf = icon_theme.load_icon(name, 48, 0)
return pixbuf
else:
return self.load_from_local_dir(name)
def load_from_local_dir(self, name):
if not name: return
files = os.listdir(self.__PathPixmap)
filesf = [k for k in files if len(k.split('.')) > 1 \
and k.split('.')[0].startswith(name)]
if not filesf:
self.__logger.warning('None of this files \n%s\n where found'%repr(name))
return
filepath = os.path.join(self.__PathPixmap, filesf[0])
pixdir = self.__Pixmaps.get(filepath,{})
if not pixdir:
self.__Pixmaps[filepath] = pixdir
pixmap = gtk.gdk.pixbuf_new_from_file(filepath)
self.__Pixmaps[filepath]['pixmap'] = pixmap
self.__Pixmaps[filepath]['timestamp'] = time.time()
return self.__Pixmaps[filepath]['pixmap']
def check_pixmap_time_access(self):
'''
Check the last time access to a pixmap, if the diference between
the current time and the last access time is more than 600 senconds
(10 minutes) then it will erase the pixmap.
'''
c ={}
ctime = time.time()
for key, value in self.__Pixmaps.iteritems():
if ctime - value['timestamp'] < 60:
c[key] = value
self.__Pixmaps = c.copy()
#del c
return True
|
#!/usr/bin/env python
"""Exponential and Quaternion code for Lab 6.
Course: EE 106, Fall 2015
Author: Victor Shia, 9/24/15
This Python file is a code skeleton Lab 6 which calculates the rigid body transform
given a rotation / translation and computes the twist from rigid body transform.
When you think you have the methods implemented correctly, you can test your
code by running "python exp_quat_func.py at the command line.
This code requires the NumPy and SciPy libraries and kin_func_skeleton which you
should have written in lab 3. If you don't already have
these installed on your personal computer, you can use the lab machines or
the Ubuntu+ROS VM on the course page to complete this portion of the homework.
"""
import tf
import rospy
import sys
from math import *
import numpy as np
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import Transform, Vector3
import kin_func_skeleton as kfs
def quaternion_to_exp(rot):
"""
Converts a quaternion vector in 3D to its corresponding omega and theta.
This uses the quaternion -> exponential coordinate equation given in Lab 6
Args:
rot - a (4,) nd array or 4x1 array: the quaternion vector (\vec{q}, q_o)
Returns:
omega - (3,) ndarray: the rotation vector
theta - a scalar
"""
#YOUR CODE HERE
theta = 2.0 * np.arccos(rot[-1])
if theta == 0:
omega = np.array([0.0, 0.0, 0.0])
else:
omega = ((1.0/sin(theta/2.0)) * rot[:-1])
return (omega, theta)
def create_rbt(omega, theta, p):
"""
Creates a rigid body transform using omega, theta, and the translation component.
g = [R,p; 0,1], where R = exp(omega * theta), p = trans
Args:
omega - (3,) ndarray : the axis you want to rotate about
theta - scalar value
trans - (3,) ndarray or 3x1 array: the translation component of the rigid body motion
Returns:
g - (4,4) ndarray : the rigid body transform
"""
#YOUR CODE HERE
R = kfs.rotation_3d(omega, theta)
g = np.array([[R[0][0], R[0][1], R[0][2], p[0]], [R[1][0], R[1][1], R[1][2], p[1]], [R[2][0], R[2][1], R[2][2], p[2]], [0, 0, 0, 1]])
return g
def compute_gab(g0a,g0b):
"""
Creates a rigid body transform g_{ab} the converts between frame A and B
given the coordinate frame A,B in relation to the origin
Args:
g0a - (4,4) ndarray : the rigid body transform from the origin to frame A
g0b - (4,4) ndarray : the rigid body transform from the origin to frame B
Returns:
gab - (4,4) ndarray : the rigid body transform
"""
#YOUR CODE HERE
gab = np.dot(np.linalg.inv(g0a),g0b)
return gab
def find_omega_theta(R):
"""
Given a rotation matrix R, finds the omega and theta such that R = exp(omega * theta)
Args:
R - (3,3) ndarray : the rotational component of the rigid body transform
Returns:
omega - (3,) ndarray : the axis you want to rotate about
theta - scalar value
"""
#YOUR CODE HERE
theta = np.arccos((np.trace(R) - 1)/2)
omega = (1/(2*sin(theta)))*np.array([R[2][1] - R[1][2],R[0][2] - R[2][0],R[1][0] - R[0][1]])
return (omega, theta)
def find_v(omega, theta, trans):
"""
Finds the linear velocity term of the twist (v,omega) given omega, theta and translation
Args:
omega - (3,) ndarray : the axis you want to rotate about
theta - scalar value
trans - (3,) ndarray of 3x1 list : the translation component of the rigid body transform
Returns:
v - (3,1) ndarray : the linear velocity term of the twist (v,omega)
"""
#YOUR CODE HERE
A_1 = np.eye(3) - kfs.rotation_3d(omega, theta)
#print A_1
A_1 = A_1.dot(kfs.skew_3d(omega))
#print A_1
A_2 = np.outer(omega, omega.T)*theta
#print A_2
A = A_1 + A_2
#print A
#print np.linalg.inv(A)
v = np.dot(np.linalg.inv(A), trans)
#print v
return np.array([v]).T
#-----------------------------Testing code--------------------------------------
#-------------(you shouldn't need to modify anything below here)----------------
def array_func_test(func_name, args, ret_desired):
| ret_value = func_name(*args)
if not isinstance(ret_value, np.ndarray):
print('[FAIL] ' + func_name.__name__ + '() returned something other than a NumPy ndarray')
elif ret_value.shape != ret_desired.shape:
print('[FAIL] ' + func_name.__name__ + '() returned an ndarray with incorrect dimensions')
elif not np.allclose(ret_value, ret_desired, rtol=1e-3):
print('[FAIL] ' + func_name.__name__ + '() returned an incorrect value')
else | :
print('[PASS] ' + func_name.__name__ + '() returned the correct value!')
def array_func_test_two_outputs(func_name, args, ret_desireds):
ret_values = func_name(*args)
for i in range(2):
ret_value = ret_values[i]
ret_desired = ret_desireds[i]
if i == 0 and not isinstance(ret_value, np.ndarray):
print('[FAIL] ' + func_name.__name__ + '() returned something other than a NumPy ndarray')
elif i == 1 and not isinstance(ret_value, float):
print('[FAIL] ' + func_name.__name__ + '() returned something other than a float')
elif i == 0 and ret_value.shape != ret_desired.shape:
print('[FAIL] ' + func_name.__name__ + '() returned an ndarray with incorrect dimensions')
elif not np.allclose(ret_value, ret_desired, rtol=1e-3):
print('[FAIL] ' + func_name.__name__ + '() returned an incorrect value')
else:
print('[PASS] ' + func_name.__name__ + '() returned the argument %d value!' % i)
if __name__ == "__main__":
print('Testing...')
#Test quaternion_to_exp()
arg1 = np.array([1.0, 2, 3, 0.1])
func_args = (arg1,)
ret_desired = (np.array([1.005, 2.0101, 3.0151]), 2.94125)
array_func_test_two_outputs(quaternion_to_exp, func_args, ret_desired)
#Test create_rbt()
arg1 = np.array([1.0, 2, 3])
arg2 = 2
arg3 = np.array([0.5,-0.5,1])
func_args = (arg1,arg2,arg3)
ret_desired = np.array(
[[ 0.4078, -0.6562, 0.6349, 0.5 ],
[ 0.8384, 0.5445, 0.0242, -0.5 ],
[-0.3616, 0.5224, 0.7722, 1. ],
[ 0. , 0. , 0. , 1. ]])
array_func_test(create_rbt, func_args, ret_desired)
#Test compute_gab(g0a,g0b)
g0a = np.array(
[[ 0.4078, -0.6562, 0.6349, 0.5 ],
[ 0.8384, 0.5445, 0.0242, -0.5 ],
[-0.3616, 0.5224, 0.7722, 1. ],
[ 0. , 0. , 0. , 1. ]])
g0b = np.array(
[[-0.6949, 0.7135, 0.0893, 0.5 ],
[-0.192 , -0.3038, 0.9332, -0.5 ],
[ 0.693 , 0.6313, 0.3481, 1. ],
[ 0. , 0. , 0. , 1. ]])
func_args = (g0a, g0b)
ret_desired = np.array([[-0.6949, -0.192 , 0.693 , 0. ],
[ 0.7135, -0.3038, 0.6313, 0. ],
[ 0.0893, 0.9332, 0.3481, 0. ],
[ 0. , 0. , 0. , 1. ]])
array_func_test(compute_gab, func_args, ret_desired)
#Test find_omega_theta
R = np.array(
[[ 0.4078, -0.6562, 0.6349 ],
[ 0.8384, 0.5445, 0.0242 ],
[-0.3616, 0.5224, 0.7722 ]])
func_args = (R,)
ret_desired = (np.array([ 0.2673, 0.5346, 0.8018]), 1.2001156089449496)
array_func_test_two_outputs(find_omega_theta, func_args, ret_desired)
#Test find_v
arg1 = np.array([1.0, 2, 3])
arg2 = 1
arg3 = np.array([0.5,-0.5,1])
func_args = (arg1,arg2,arg3)
ret_desired = np.array([[-0.1255],
[ 0.0431],
[ 0.0726]])
array_func_test(find_v, func_args, ret_desired)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you | under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# | Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module for the ops not belonging to the official numpy package."""
from . import _op
from . import image
from . import _register
from ._op import * # pylint: disable=wildcard-import
__all__ = _op.__all__
|
# A SCons tool for R scripts
#
# Copyright (c) 2014 Kendrick Boyd. This is free software. See LICENSE
# for details.
" | ""
Basic test of producing output using save.
"""
import TestSCons
test = TestSCons.TestSCons()
# Add scons_r tool to test figure.
test.file_fixture('../__init__.py', 'site_scons/site_tools/scons_r/__init__.py')
test.write(['SConstruct'], """\
import os
env = Environment(TOOLS = ['scons_r'])
env.R('basic.r')
""")
test.write(['basic.r'], """\
x=rnorm(100)
save(x, file='x.rdata')
""")
test.run(arguments='.', stderr=None)
test.must_exis | t('x.rdata')
test.pass_test()
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from markitup import | settings
from markitup.markup import filter_func
from markitup.sanitize import sanitize_html
def apply_filter(request):
cleaned_data = sanitize_html(request.POST.get('data', ''), strip=True)
markup = filter_func( | cleaned_data)
return render_to_response( 'markitup/preview.html',
{'preview': markup},
context_instance=RequestContext(request))
|
ow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
TF_WEIGHTS_PATH = (
'https://storage.googleapis.com/tensorflow/keras-applications/'
'xception/xception_weights_tf_dim_ordering_tf_kernels.h5')
TF_WEIGHTS_PATH_NO_TOP = (
'https://storage.googleapis.com/tensorflow/keras-applications/'
'xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.xception.Xception',
'keras.applications.Xception')
def Xception(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the Xception architecture.
Reference:
- [Xception: Deep Learning with Depthwise Separable Convolutions](
https://arxiv.org/abs/1610.02357) (CVPR 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note that the default input image size for this model is 299x299.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.xception.preprocess_input` for an example.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True,
and if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=71,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
x = layers.Conv2D(
32, (3, 3),
strides=(2, 2),
use_bias=False,
name='block1_conv1')(img_input)
x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
x = layers.Activation('relu', name='block1_conv1_act')(x)
x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
x = layers.Activation('relu', name='block1_conv2_act')(x)
residual = layers.Conv2D(
128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x)
x = layers.Activation('relu', name='block2_sepconv2_act')(x)
x = layers.SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block2_pool')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block3_sepconv1_act')(x)
x = layers.SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x)
x = layers.Activation('relu', name='block3_sepconv2_act')(x)
x = layers.SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block3_pool')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block4_sepconv1_act')(x)
x = layers.SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x)
x = layers.Activation('relu', name='block4_sepconv2_act')(x)
x = layers.SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_ | sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = laye | rs.Activation('relu', name=prefix + '_sepconv1_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv1')(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + '_sepconv1_bn')(x)
x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv2')(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + '_sepconv2_bn')(x)
x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x)
x = layers.SeparableConv2D(
728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv3')(x)
x = layers.BatchNormalization(
|
from __future__ import absolute_import, division, print_function
class _slice(object):
""" A hashable slice object
>>> _slice(0, 10, None)
0:10
"""
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
def __hash__(self):
return hash((slice, self.start, self.stop, self.step))
def __str__(self):
s = ''
if self.start is not None:
s = s + str(self.start)
s = s + ':'
if self.stop is not None:
s = s + str(self.stop)
if self.step is not None:
s = s + ':' + str(self.step)
return s
def __eq__(self, other):
return (type(self), self.start, self.stop, self.step) == \
(type(other), other.start, other.stop, other.step)
def as_slice(self):
return slice(self.start, self.stop, self.step)
__repr__ = __str__
class hashable_list(tuple):
def __str__(self):
return str(list(self))
def hashable_index(index):
""" Convert slice-thing into something hashable
>>> hashable_index(1)
1
>>> isinstance(hash(hashable_index((1, slice(10)))), int)
True
"""
if type(index) | is tuple: # can't do isinstance due to hashable_list
return tupl | e(map(hashable_index, index))
elif isinstance(index, list):
return hashable_list(index)
elif isinstance(index, slice):
return _slice(index.start, index.stop, index.step)
return index
def replace_slices(index):
if isinstance(index, hashable_list):
return list(index)
elif isinstance(index, _slice):
return index.as_slice()
elif isinstance(index, tuple):
return tuple(map(replace_slices, index))
return index
|
tal time, idle time, processing time, time since prior interval start,
busy since prior interval start), all in ms (int).
"""
now = get_ion_ts_millis()
running_time = now - self._start_time
idle_time = running_time - self._proc_time
cur_interval = now / STAT_INTERVAL_LENGTH
now_since_prior = now - (cur_interval - 1) * STAT_INTERVAL_LENGTH
if cur_interval == self._proc_interval_num:
proc_time_since_prior = self._proc_time-self._proc_time_prior2
elif cur_interval-1 == self._proc_interval_num:
proc_time_since_prior = self._proc_time-self._proc_time_prior
else:
proc_time_since_prior = 0
return (running_time, idle_time, self._proc_time, now_since_prior, proc_time_since_prior)
def _child_failed(self, child):
"""
Callback from gevent as set in the TheadManager, when a ch | ild greenlet fails.
Kills the ION process main greenlet. This propagates the error up to the process supervisor.
"""
# remove the child from the list of children (so we can shut down cleanly)
for x in self.thread_manager.children:
if x.proc == child:
self.thread_manager.children.remove(x)
b | reak
self._dead_children.append(child)
# kill this process's main greenlet. This should be noticed by the container's proc manager
self.proc.kill(child.exception)
def add_endpoint(self, listener, activate=True):
"""
Adds a listening endpoint to be managed by this ION process.
Spawns the listen loop and sets the routing call to synchronize incoming messages
here. If this process hasn't been started yet, adds it to the list of listeners
to start on startup.
@param activate If True (default), start consuming from listener
"""
if self.proc:
listener.routing_call = self._routing_call
if self.name:
svc_name = "unnamed-service"
if self.service is not None and hasattr(self.service, 'name'):
svc_name = self.service.name
listen_thread_name = "%s-%s-listen-%s" % (svc_name, self.name, len(self.listeners)+1)
else:
listen_thread_name = "unknown-listener-%s" % (len(self.listeners)+1)
listen_thread = self.thread_manager.spawn(listener.listen, thread_name=listen_thread_name, activate=activate)
listen_thread.proc._glname = "ION Proc listener %s" % listen_thread_name
self._listener_map[listener] = listen_thread
self.listeners.append(listener)
else:
self._startup_listeners.append(listener)
def remove_endpoint(self, listener):
"""
Removes a listening endpoint from management by this ION process.
If the endpoint is unknown to this ION process, raises an error.
@return The PyonThread running the listen loop, if it exists. You are
responsible for closing it when appropriate.
"""
if listener in self.listeners:
self.listeners.remove(listener)
return self._listener_map.pop(listener)
elif listener in self._startup_listeners:
self._startup_listeners.remove(listener)
return None
else:
raise IonProcessError("Cannot remove unrecognized listener: %s" % listener)
def target(self, *args, **kwargs):
"""
Entry point for the main process greenlet.
Setup the base properties for this process (mainly the control thread).
"""
if self.name:
threading.current_thread().name = "%s-target" % self.name
# start time
self._start_time = get_ion_ts_millis()
self._proc_interval_num = self._start_time / STAT_INTERVAL_LENGTH
# spawn control flow loop
self._ctrl_thread = self.thread_manager.spawn(self._control_flow)
self._ctrl_thread.proc._glname = "ION Proc CL %s" % self.name
# wait on control flow loop, heartbeating as appropriate
while not self._ctrl_thread.ev_exit.wait(timeout=self._heartbeat_secs):
hbst = self.heartbeat()
if not all(hbst):
log.warn("Heartbeat status for process %s returned %s", self, hbst)
if self._heartbeat_stack is not None:
stack_out = "".join(traceback.format_list(self._heartbeat_stack))
else:
stack_out = "N/A"
#raise PyonHeartbeatError("Heartbeat failed: %s, stacktrace:\n%s" % (hbst, stack_out))
log.warn("Heartbeat failed: %s, stacktrace:\n%s", hbst, stack_out)
# this is almost a no-op as we don't fall out of the above loop without
# exiting the ctrl_thread, but having this line here makes testing much easier.
self._ctrl_thread.join()
def _routing_call(self, call, context, *callargs, **callkwargs):
"""
Endpoints call into here to synchronize across the entire IonProcess.
Returns immediately with an AsyncResult that can be waited on. Calls
are made by the loop in _control_flow. We pass in the calling greenlet so
exceptions are raised in the correct context.
@param call The call to be made within this ION processes' calling greenlet.
@param callargs The keyword args to pass to the call.
@param context Optional process-context (usually the headers of the incoming call) to be
set. Process-context is greenlet-local, and since we're crossing greenlet
boundaries, we must set it again in the ION process' calling greenlet.
"""
ar = AsyncResult()
if len(callargs) == 0 and len(callkwargs) == 0:
log.trace("_routing_call got no arguments for the call %s, check your call's parameters", call)
self._ctrl_queue.put((greenlet.getcurrent(), ar, call, callargs, callkwargs, context))
return ar
def has_pending_call(self, ar):
"""
Returns true if the call (keyed by the AsyncResult returned by _routing_call) is still pending.
"""
for _, qar, _, _, _, _ in self._ctrl_queue.queue:
if qar == ar:
return True
return False
def _cancel_pending_call(self, ar):
"""
Cancels a pending call (keyed by the AsyncResult returend by _routing_call).
@return True if the call was truly pending.
"""
if self.has_pending_call(ar):
ar.set(False)
return True
return False
def _interrupt_control_thread(self):
"""
Signal the control flow thread that it needs to abort processing, likely due to a timeout.
"""
self._ctrl_thread.proc.kill(exception=OperationInterruptedException, block=False)
def cancel_or_abort_call(self, ar):
"""
Either cancels a future pending call, or aborts the current processing if the given AR is unset.
The pending call is keyed by the AsyncResult returned by _routing_call.
"""
if not self._cancel_pending_call(ar) and not ar.ready():
self._interrupt_control_thread()
def _control_flow(self):
"""
Entry point for process control thread of execution.
This method is run by the control greenlet for each ION process. Listeners attached
to the process, either RPC Servers or Subscribers, synchronize calls to the process
by placing call requests into the queue by calling _routing_call.
This method blocks until there are calls to be made in the synchronized queue, and
then calls from within this greenlet. Any exception raised is caught and re-raised
in the greenlet that originally scheduled the call. If successful, the AsyncResult
created at scheduling time is set with the result of the call.
"""
svc_name = getattr(self.service, "name", "unnamed-service") if self.service else "unnamed |
#! /usr/bin/python
# Joe Deller 2014
# Finding out where we are in minecraft
# Level : Beginner
# Uses : Libraries, variables, functions
# Minecraft worlds on the Raspberry Pi are smaller than
# other minecraft worlds, but are still pretty big
# So one of the first things we need to learn to do
# is find out where we are in the world
# As the player moves around the world, Minecraft keeps track
# of the X (left / right ) , Y (height) ,Z (depth) coordinates of the player
# You can see these numbers on the main minecraft game screen
# The minecraft library has a method called getTilePos()
# It tracks where the player is
# This program introduces the "while" keyword, our first
# example of a loop, to make sure the program never stops
# until there is either an error, or we manually stop (break)
# the program using Ctrl-C on the keyboard |
import mcpi.minecraft as minecraft
# This program also uses another library, the time library
# as we want the program to sleep for a short time - 1 second
# so that we don't fill the screen with too much information
# We will come across the time library later when we
# make a minecraft digital clock
import time
# Connect to Minecraft
mc = minecraft.Minecraft.create()
# We will use the getTilePos() method to tell us where we are
# and store that information in a variable
# Technically this | is a special kind of variable, called an "object"
# but for now all we need to worry about is what to call it
# Most computer languages are very strict about using capital letters
# To the computer, playerPos, Playerpos and PlayerPOS are completely
# different things, so once you decide on a name, you need to spell
# it the same way every time you want to use it
playerPos = mc.player.getTilePos()
# playerPos now has our 3d position in the minecraft world
# it is made up of three parts, x, y & z
# There is another similar function called getPos()
# The difference is that getTilePos() returns whole numbers
# getPos() returns the exact position, to several decimal places
# We will stick with whole numbers for now
# playerPos = mc.player.getPos()
# We will be using a special kind of loop - an infinite loop
# Unless there is an error or we manually stop the program
# it will run forever
# True and False are normally used to compare one or more items and then
# make a choice, but for this program the loop is really saying "is true equal true?"
# the answer will always be yes so the loop will never stop
# We will be using more while loops in later programs
# The main thing we need to worry about is the spacing
# Notice playerPos has four spaces before it
# This means that it is "inside" the loop
# Python is very fussy about spaces, something we will be seeing again and again
# However,comments do not care about spaces
while True:
myLocation = mc.player.getTilePos()
# myLocation is variable that contains three variables inside in
# we can get these one at a time, or all three at once
# Before we can use them with postToChat() we need to change them
# from numbers, into characters - called a string
# There are several ways of doing this, for now we will use a command
# called str , which takes a number and hands back a string
# of characters. Although to us there isn't any apparent difference
# the way the numbers and characters are stored is very different.
x = str(myLocation.x)
y = str(myLocation.y)
z = str(myLocation.z)
# We use the postToChat() method from our hello world example
mc.postToChat("You are standing at X: " + x + ", Y: " + y + ", Z: " + z)
# Take a breath!
time.sleep(1)
|
from pySDC import CollocationClasses as collclass
import numpy as np
from ProblemClass import sharpclaw
#from examples.sharpclaw_burgers1d.TransferClass import mesh_to_mesh_1d
from pySDC.datatype_classes.mesh import mesh, rhs_imex_mesh
from pySDC.sweeper_classes.imex_1st_order import imex_1st_order
import pySDC.Methods as mp
fr | om pySDC import Log
from pySDC.Stats import grep_stats, sort_stats
# Sharpclaw imports
from clawpack import pyclaw
from clawpack import riemann
from matplotlib import pyplot as plt
if __name__ == "__main__":
# set global logger (remove this if you do not want the output at all)
logger = Log.setup_custom_logger('root')
num_procs = 1
# This comes as read-in for the level class
lparams = {}
lparams['restol'] = 1E-10
sparams = {}
spara | ms['maxiter'] = 20
# setup parameters "in time"
t0 = 0
dt = 0.001
Tend = 100*dt
# This comes as read-in for the problem class
pparams = {}
pparams['nvars'] = [(2,50,50)]
pparams['nu'] = 0.001
# This comes as read-in for the transfer operations
tparams = {}
tparams['finter'] = True
# Fill description dictionary for easy hierarchy creation
description = {}
description['problem_class'] = sharpclaw
description['problem_params'] = pparams
description['dtype_u'] = mesh
description['dtype_f'] = rhs_imex_mesh
description['collocation_class'] = collclass.CollGaussLobatto
description['num_nodes'] = 5
description['sweeper_class'] = imex_1st_order
description['level_params'] = lparams
#description['transfer_class'] = mesh_to_mesh_1d
#description['transfer_params'] = tparams
# quickly generate block of steps
MS = mp.generate_steps(num_procs,sparams,description)
# get initial values on finest level
P = MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend,stats = mp.run_pfasst_serial(MS,u0=uinit,t0=t0,dt=dt,Tend=Tend)
# compute exact solution and compare
uex = P.u_exact(Tend)
# print('error at time %s: %s' %(Tend,np.linalg.norm(uex.values-uend.values,np.inf)/np.linalg.norm(
# uex.values,np.inf)))
fig = plt.figure(figsize=(8,8))
plt.imshow(uend.values[0,:,:])
# plt.plot(P.state.grid.x.centers,uend.values, color='b', label='SDC')
# plt.plot(P.state.grid.x.centers,uex.values, color='r', label='Exact')
# plt.legend()
# plt.xlim([0, 1])
# plt.ylim([-1, 1])
plt.show()
|
with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import sys, lucene, unittest
from lucene import JArray
from PyLuceneTestCase import PyLuceneTestCase
from MultiSpansWrapper import MultiSpansWrapper
from java.io import StringReader
from org.apache.lucene.analysis import Analyzer
from org.apache.lucene.analysis.core import \
LowerCaseTokenizer, WhitespaceTokenizer
from org.apache.lucene.analysis.tokenattributes import \
CharTermAttribute, OffsetAttribute, PayloadAttribute, \
PositionIncrementAttribute
from org.apache.lucene.document import Document, Field, TextField
from org.apache.lucene.index import MultiFields, Term
from org.apache.lucene.queryparser.classic import QueryParser
from org.apache.lucene.search import MultiPhraseQuery, PhraseQuery
from org.apache.lucene.search.payloads import PayloadSpanUtil
from org.apache.lucene.search.spans import SpanNearQuery, SpanTermQuery
from org.apache.lucene.util import BytesRef, Version
from org.apache.pylucene.analysis import \
PythonAnalyzer, PythonFilteringTokenFilter, PythonTokenFilter, \
PythonTokenizer
class PositionIncrementTestCase(PyLuceneTestCase):
"""
Unit tests ported from Java Lucene
"""
def testSetPosition(self):
class _tokenizer(PythonTokenizer):
def __init__(_self, reader):
super(_tokenizer, _self).__init__(reader)
_self.TOKENS = ["1", "2", "3", "4", "5"]
_self.INCREMENTS = [1, 2, 1, 0, 1]
_self.i = 0
_self.posIncrAtt = _self.addAttribute(PositionIncrementAttribute.class_)
_self.termAtt = _self.addAttribute(CharTermAttribute.class_)
_self.offsetAtt = _self.addAttribute(OffsetAttribute.class_)
def incrementToken(_self):
if _self.i == len(_self.TOKENS):
return False
_self.clearAttributes()
_self.termAtt.append(_self.TOKENS[_self.i])
_self.offsetAtt.setOffset(_self.i, _self.i)
_self.posIncrAtt.setPositionIncrement(_self.INCREMENTS[_self.i])
_self.i += 1
return True
def end(_self):
pass
def reset(_self):
pass
def close(_self):
pass
class _analyzer(PythonAnalyzer):
def createComponents(_self, fieldName, reader):
return Analyzer.TokenStreamComponents(_tokenizer(reader))
writer = self.getWriter(analyzer=_analyzer())
d = Document()
d.add(Field("field", "bogus", TextField.TYPE_STORED))
writer.addDocument(d)
writer.commit()
writer.close()
searcher = self.getSearcher()
reader = searcher.g | etIndexReader()
pos = MultiFields.getTermPositionsEnum(rea | der, MultiFields.getLiveDocs(reader), "field", BytesRef("1"))
pos.nextDoc()
# first token should be at position 0
self.assertEqual(0, pos.nextPosition())
pos = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader), "field", BytesRef("2"))
pos.nextDoc()
# second token should be at position 2
self.assertEqual(2, pos.nextPosition())
q = PhraseQuery()
q.add(Term("field", "1"))
q.add(Term("field", "2"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# same as previous, just specify positions explicitely.
q = PhraseQuery()
q.add(Term("field", "1"), 0)
q.add(Term("field", "2"), 1)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# specifying correct positions should find the phrase.
q = PhraseQuery()
q.add(Term("field", "1"), 0)
q.add(Term("field", "2"), 2)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "3"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "3"))
q.add(Term("field", "4"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# phrase query would find it when correct positions are specified.
q = PhraseQuery()
q.add(Term("field", "3"), 0)
q.add(Term("field", "4"), 0)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
# phrase query should fail for non existing searched term
# even if there exist another searched terms in the same searched
# position.
q = PhraseQuery()
q.add(Term("field", "3"), 0)
q.add(Term("field", "9"), 0)
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
# multi-phrase query should succed for non existing searched term
# because there exist another searched terms in the same searched
# position.
mq = MultiPhraseQuery()
mq.add([Term("field", "3"), Term("field", "9")], 0)
hits = searcher.search(mq, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "4"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "3"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "4"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(1, len(hits))
q = PhraseQuery()
q.add(Term("field", "2"))
q.add(Term("field", "5"))
hits = searcher.search(q, None, 1000).scoreDocs
self.assertEqual(0, len(hits))
def testPayloadsPos0(self):
writer = self.getWriter(analyzer=TestPayloadAnalyzer())
doc = Document()
doc.add(Field("content", "a a b c d e a f g h i j a b k k",
TextField.TYPE_STORED))
writer.addDocument(doc)
reader = writer.getReader()
writer.close()
tp = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
"content", BytesRef("a"))
count = 0
self.assert_(tp.nextDoc() != tp.NO_MORE_DOCS)
# "a" occurs 4 times
self.assertEqual(4, tp.freq())
expected = 0
self.assertEqual(expected, tp.nextPosition())
self.assertEqual(1, tp.nextPosition())
self.assertEqual(3, tp.nextPosition())
self.assertEqual(6, tp.nextPosition())
# only one doc has "a"
self.assert_(tp.nextDoc() == tp.NO_MORE_DOCS)
searcher = self.getSearcher(reader=reader)
stq1 = SpanTermQuery(Term("content", "a"))
stq2 = SpanTermQuery(Term("content", "k"))
sqs = [stq1, stq2]
snq = SpanNearQuery(sqs, 30, False)
count = 0
sawZero = False
pspans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), snq)
while pspans.next():
payloads = pspans.getPayload()
sawZero |= pspans.start() == 0
it = payloads.iterator()
while it.hasNext():
count += 1
it.next()
self.assertEqual(5, count)
s |
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = '\x91\x95\xa5\xf7\xe0^bz\xc0\xf4\x04\xf9Z\xebA\xba'
_lr_action_items = {'NAME':([0,2,5,7,11,12,13,14,],[1,8,8,8,8,8,8,8,]),')':([3,8,9,10,16,17,18,19,20,],[-9,-10,-7,16,-8,-4,-3,-5,-6,]),'(':([0,2,5,7,11,12,13,14,],[5,5,5,5,5,5,5,5,]),'+':([1,3,6,8,9,10,15,16,17,18,19,20,],[-10,-9,12,-10,-7,12,12,-8,-4,-3,-5,-6,]),'*':([1,3,6,8,9,10,15,16,17,18,19,20,],[-10,-9,13,-10,-7,13,13,-8,13,13,-5,-6,]),'-':([0,1,2,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,],[2,-10,2,-9,2,11,2,-10,-7,11,2,2,2,2,11,-8,-4,-3,-5,-6,]),'NUMBER':([0,2,5,7,11,12,13,14,],[3,3,3,3,3,3,3,3,]),'/':([1,3,6,8,9,10,15,16,17,18,19,20,],[-10,-9,14,-10,-7,14,14,-8,14,14,-5,-6,]),'=':([1,],[7,]),'$end':([1,3,4,6,8,9,15,16,17,18,19,20,],[-10,-9,0,-2,-10,-7,-1,-8,-4,-3,-5,-6,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'expression':([0,2,5,7,11,12,13,14,],[6,9,10,15,17,18,19,20,]),'statement':([0,],[4,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> statement","S'",1,None,None,None),
('statement -> NAME = expression','statement',3,'p_statement_assign','D:\\repo | s\\test\\testpy\\testply.py',58),
('statement -> expression','statement',1,'p_statement_expr','D:\\repos\\test\\testpy\\testply.py',63),
('expression -> expression + expression','expression',3,'p_expression_binop','D:\\repos\\test\\testpy\\testply.py', | 68),
('expression -> expression - expression','expression',3,'p_expression_binop','D:\\repos\\test\\testpy\\testply.py',69),
('expression -> expression * expression','expression',3,'p_expression_binop','D:\\repos\\test\\testpy\\testply.py',70),
('expression -> expression / expression','expression',3,'p_expression_binop','D:\\repos\\test\\testpy\\testply.py',71),
('expression -> - expression','expression',2,'p_expression_uminus','D:\\repos\\test\\testpy\\testply.py',83),
('expression -> ( expression )','expression',3,'p_expression_group','D:\\repos\\test\\testpy\\testply.py',88),
('expression -> NUMBER','expression',1,'p_expression_number','D:\\repos\\test\\testpy\\testply.py',93),
('expression -> NAME','expression',1,'p_expression_name','D:\\repos\\test\\testpy\\testply.py',98),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.