prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage flow counter and flow metadata."""
from typing import Dict, Set, Optional
import datetime
from cylc.flow import LOG
from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager
class FlowMgr:
"""Logic to manage flow counter and flow metadata."""
def __init__(self, db_mgr: "WorkflowDatabaseManager") -> None:
"""Initialise the flow manager."""
self.db_mgr = db_mgr
self.flows: Dict[int, Dict[str, str]] = {}
self.counter: int = 0
def get_new_flow(self, description: Optional[str] = None) -> int:
"""Increment flow counter, record flow metadata."""
self.counter += 1
# record start time to nearest second
now = datetime.datetime.now()
now_sec: str = str(
now - datetime.timedelta(microseconds=now.microsecond))
description = description or "no description"
self.flows[self.counter] = {
"description": description,
"start_time": now_sec
}
LOG.info(
f"New flow: {self.counter} "
f"({description}) "
f"{now_sec}"
)
self.db_mgr.put_insert_workflow_flows(
self.counter,
self.flows[self.counter]
)
return self.counter
def load_from_db(self, flow_nums: Set[int]) -> None:
"""Load flow data for scheduler restart.
Sets the flow counter to the max flow number in the DB.
Loads metadata for selected flows (those in the task pool at startup).
"""
self.counter = self.db_mgr.pri_dao.sele | ct_workflow_flows_max_flow_num()
self.flows = self.db_mgr.pri_dao.select_workflow_flows(flow_nums)
self._log()
def _log(self) -> None:
"""Write current flow info to log."""
LOG.info(
"Flows:\n" + "\n".join(
(
f"flow: {f} "
f"({self.flows[f]['description']}) "
f"{self.flows[f]['start_time']}"
)
for | f in self.flows
)
)
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND | , either express or implied. See the
# License for the specific language governing permissions and limitations
# under t | he License.
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import test
class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
"""
Tests the following operations with the Extra DHCP Options Neutron API
extension:
port create
port list
port show
port update
v2.0 of the Neutron API is assumed. It is also assumed that the Extra
DHCP Options extension is enabled in the [network-feature-enabled]
section of etc/tempest.conf
"""
@classmethod
def skip_checks(cls):
super(ExtraDHCPOptionsTestJSON, cls).skip_checks()
if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
msg = "Extra DHCP Options extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(ExtraDHCPOptionsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.port = cls.create_port(cls.network)
cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4
else '2015::dead')
cls.ip_server = ('123.123.123.45' if cls._ip_version == 4
else '2015::badd')
cls.extra_dhcp_opts = [
{'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'},
{'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'},
{'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'}
]
@test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
def test_create_list_port_with_extra_dhcp_options(self):
# Create a port with Extra DHCP Options
body = self.client.create_port(
network_id=self.network['id'],
extra_dhcp_opts=self.extra_dhcp_opts)
port_id = body['port']['id']
self.addCleanup(self.client.delete_port, port_id)
# Confirm port created has Extra DHCP Options
body = self.client.list_ports()
ports = body['ports']
port = [p for p in ports if p['id'] == port_id]
self.assertTrue(port)
self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts)
@test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607')
def test_update_show_port_with_extra_dhcp_options(self):
# Update port with extra dhcp options
name = data_utils.rand_name('new-port-name')
body = self.client.update_port(
self.port['id'],
name=name,
extra_dhcp_opts=self.extra_dhcp_opts)
# Confirm extra dhcp options were added to the port
body = self.client.show_port(self.port['id'])
self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts)
def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
retrieved = port['extra_dhcp_opts']
self.assertEqual(len(retrieved), len(extra_dhcp_opts))
for retrieved_option in retrieved:
for option in extra_dhcp_opts:
if (retrieved_option['opt_value'] == option['opt_value'] and
retrieved_option['opt_name'] == option['opt_name']):
break
else:
self.fail('Extra DHCP option not found in port %s' %
str(retrieved_option))
class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON):
_ip_version = 6
|
import codecs
import logging
import random
def import_url(path,lo,hi):
with codecs.open(path,encoding='utf-8') as f:
string = f.read()
arr = string.split('\n')
if not lo:
lo=0
if not hi:
hi=len(arr)
arr=arr[lo:hi]
url_arr = []
want = ran | ge(lo,hi)
# returns url and its number
for i,line in enumerate(arr):
if i+lo in want:
url = line.split(':')[0]
num = str(i+l | o).zfill(5)
url_arr.append((num,url))
return url_arr
def import_proxy(path,mode):
with open(path) as f:
string = f.read()
arr = string.split('\n')
del(arr[-1])
proxy_arr = []
for line in arr:
if mode=='comma':
line_arr=line.split(',')
addr=line_arr[0]
port=line_arr[1]
line=addr+':'+port
dic = {}
dic['http'] = 'http://' + line
dic['https'] = 'https://' + line
proxy_arr.append(dic)
random.shuffle(proxy_arr)
return proxy_arr
def setLogger(path):
console_logger = logging.getLogger('consoleLogger')
hdlr = logging.FileHandler('./console.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
console_logger.addHandler(hdlr)
console_logger.addHandler(consoleHandler)
console_logger.setLevel(logging.DEBUG)
result_logger = logging.getLogger('resultLogger')
hdlr2 = logging.FileHandler('./'+path,encoding='utf-8')
formatter2 = logging.Formatter('%(message)s')
hdlr2.setFormatter(formatter2)
result_logger.addHandler(hdlr2)
result_logger.setLevel(logging.DEBUG)
return console_logger, result_logger
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
sys.path.append(sys.path[0]+os.sep+'pygccxml-1.0.0')
import pygccxml, sys, cStringIO
# the output file
outputFile = cStringIO.StringIO()
# init the pygccxml stuff
pygccxml.declarations.scopedef_t.RECURSIVE_DEFAULT = False
pygccxml.declarations.scopedef_t.ALLOW_EMPTY_MDECL_WRAPPER = True
pygccxml_config = pygccxml.parser.config.config_t()
pygccxml_reader = pygccxml.parser.source_reader.source_reader_t(pygccxml_config)
# and read a xml file
res = pygccxml_reader.read_xml_file(sys.argv[1])
global_ns = pygccxml.declarations.get_global_namespace( res )
cable_ns = global_ns.namespace('_cable_')
wrappers_ns = cable_ns.namespace('wrappers')
module = o | s.path.splitext(os.path.basename(sys.argv[1]))[0]
# iterate over all the typedefs in the _cable_::wrappers namespace
for typedef in wrappers_ns.typedefs():
n = typedef.name
s = typedef.type.decl_string
# drop the :: prefix - it make swig produce invalid code
| if s.startswith("::"):
s = s[2:]
print >> outputFile, "{%s} {%s} {%s}" % (s, n, module)
content = outputFile.getvalue()
if sys.argv[2] != '-':
f = file( sys.argv[2], "w" )
f.write( content )
f.close()
else:
sys.stdout.write( content )
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.pyami.scriptbase import ScriptBase
import os, StringIO
class CopyBot(ScriptBase):
def __init__(self):
super(CopyBot, self).__init__()
self.wdir = boto.config.get('Pyami', 'working_dir')
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.wdir, self.log_file)
boto.set_file_logger(self.name, self.log_path | )
self.src_name = boto.config.get(self.name, 'src_bucket')
self.dst_name = boto.config.get(self.name, 'dst_bucket')
self.replace = boto.config.getbool(self.name, 'replace_dst', True)
s3 = boto.connect_s3()
self. | src = s3.lookup(self.src_name)
if not self.src:
boto.log.error('Source bucket does not exist: %s' % self.src_name)
dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None)
if dest_access_key:
dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None)
s3 = boto.connect(dest_access_key, dest_secret_key)
self.dst = s3.lookup(self.dst_name)
if not self.dst:
self.dst = s3.create_bucket(self.dst_name)
def copy_bucket_acl(self):
if boto.config.get(self.name, 'copy_acls', True):
acl = self.src.get_xml_acl()
self.dst.set_xml_acl(acl)
def copy_key_acl(self, src, dst):
if boto.config.get(self.name, 'copy_acls', True):
acl = src.get_xml_acl()
dst.set_xml_acl(acl)
def copy_keys(self):
boto.log.info('src=%s' % self.src.name)
boto.log.info('dst=%s' % self.dst.name)
try:
for key in self.src:
if not self.replace:
exists = self.dst.lookup(key.name)
if exists:
boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name))
continue
boto.log.info('copying %d bytes from key=%s' % (key.size, key.name))
prefix, base = os.path.split(key.name)
path = os.path.join(self.wdir, base)
key.get_contents_to_filename(path)
new_key = self.dst.new_key(key.name)
new_key.set_contents_from_filename(path)
self.copy_key_acl(key, new_key)
os.unlink(path)
except:
boto.log.exception('Error copying key: %s' % key.name)
def copy_log(self):
key = self.dst.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
def main(self):
fp = StringIO.StringIO()
boto.config.dump_safe(fp)
self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue())
if self.src and self.dst:
self.copy_keys()
if self.dst:
self.copy_log()
self.notify('%s (%s) Stopping' % (self.name, self.instance_id),
'Copy Operation Complete')
if boto.config.getbool(self.name, 'exit_on_completion', True):
ec2 = boto.connect_ec2()
ec2.terminate_instances([self.instance_id])
|
#coding=utf-8
'''
Created on 2015年5月18日
python traceback parser
@author: hzwangzhiwei
'''
import json
import re
def str_is_empty(s):
if s == None or s == '' or s.strip().lstrip().rstrip('') == '':
return True
return False
class TracebackParser(object):
'''
parser
'''
tb_is_trace = True
tb_content = ''
tb_header = 'Traceback (most recent call last):'
tb_files = [] # file, line, method 设计的文件信息
tb_type = '' # 类型: AttributeError
tb_msg = '' #信息: 'NoneType' object has no attribute 'model'
def __init__(self):
'''
Constructor
'''
#do nothing
pass
def _try_tb_file(self, line, header = 'Traceback (most recent call last):'):
'''尝试作为影响文件进行解析,成功范围字典,失败放回False
'''
# line = 'File "D:\Work\h28\client_replace\client\scriptvatarmembers\EquipMember.py", line 287, in onEnhanceEquip'
tb_files_re = 'File "(.*)"[,] line (\d*), in (.*)'
re_pat = re.compile(tb_files_re)
search_ret = re_pat.search(line)
if search_ret:
g = search_ret.groups()
if g and len(g) == 3:
return {'file' : g[0], 'line': g[1], 'method': g[2]}
return False
def _try_tb_type_msg(self, line):
'''尝试作为trace类型和提示消息进行解析,成功范围True,同时设置对象属性,失败放回False
'''
tb_type_msg_re = '(.*): (.*)'
re_pat = re.compile(tb_type_msg_re)
search_ret = re_pat.search(line)
if search_ret:
g = search_ret.groups()
if g and len(g) == 2:
self.tb_type = g[0]
self.tb_msg = g[1]
return True
return False
def parse(self, content):
self.tb_header = 'Traceback (most recent call last):'
self.tb_files = [] # file, line, method 设计的文件信息
self.tb_type = '' # 类型: AttributeError
self.tb_msg = '' #信息: 'NoneType' object has no attribute 'model'
self.tb_content = content
tb_lines = self.tb_content.split('\n')
is_first_line = True
for line in tb_lines:
line = line.strip().lstrip().rstrip()
if str_is_empty(line):
continue
#包含tb_header,说明是一个正确的trace
if is_first_line:
if self.tb_header in line:
is_first_line = False
continue
else:
#不是一个合法的trace
self.tb_is_trace = False
return False
else:
#解析非第一行
#1. 尝试以影响文件的解析,解析成功在下一行
tb_file = self._try_tb_file(line)
if tb_file:
self.tb_files.append(tb_file)
continue
#2. 解析不成功,尝试以错误类型解析,解析不成功在下一行
self._try_tb_type_msg(line)
return True
def trace_code_info(self):
if self.tb_is_trace:
if self.tb_files and len(self.tb_files) > 0:
return self.tb_files[len(self.tb_files) - 1]
return ('', '', '')
def trace_msg(self):
return (self.tb_type, self.tb_type)
def tostring(self):
rst = ''
rst += self.tb_header
rst += '\n'
for f in self.tb_files:
rst += json.dumps(f, default = lambda o: o.__dict__)
rst += '\n'
rst += self.tb_type + ': ' + self.tb_msg
return rst
#唯一标示一个trace
def to_md5(self):
rst = ''
try:
if self.tb_is_trace:
rst += (self.tb_type + '|' + self.tb_msg)
if self.tb_files and len(self.tb_files) > 0:
f = self.tb_files[len(self.tb_files) - 1] #取最后一个
rst += ('|' + f['file'] + '|' + f['line'] + '|' + f['method'])
except:
rst = ''
import hashlib
m = hashlib.md5()
m.update(rst)
return m.hexdigest().lower()
if __name__ == '__main__':
content = '''
Traceback (most recent call last):
File "D:\Work\h28\client_replace\client\script\lib\client\GateClient.py", line 337, in entity_message
>methodname:(str)onEnhanceEquip
>_done:(NoneType)None
>entitymsg:(class common.proto_python.common_pb2.EntityMessage)routes: ""
id: "UG\022\264\327\037\375$\
>self:(class client.GateClient.GateClient)<client.GateClient.GateClient object at
>entity:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>_controller:(class mobilerpc.RpcChannel.MobileRpcController)<mobilerpc.RpcChannel.Mo | bileRpcControlle
>entityid:(class bson.objectid.ObjectId)554712b4d71ffd24fb0c7b27
>need_reg_index:(bool)False
>method:(instancemethod)<bound method ClientAvatar.call_rpc_meth
File "D:\Work\h28\client_replace\client\script\lib\common\rpcdecorator.py", line 100, in call_rpc_method
| >self:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>args:(tuple)({u'res': 2, u'eid': u'55481f68d71ffd24f
>rpctype:(int)3
>rpcmethod:(class common.rpcdecorator.RpcMethod)<common.rpcdecorator.RpcMethod object at
File "D:\Work\h28\client_replace\client\script\lib\common\rpcdecorator.py", line 86, in call
>parameters:(dict){u'res': 2, u'eid': u'55481f68d71ffd24fb
>self:(class common.rpcdecorator.RpcMethod)<common.rpcdecorator.RpcMethod object at
>args:(list)[2, '55481f68d71ffd24fb0c7de4', {u'itemI
>entity:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>arg:(dict){u'itemId': 125, u'star': 3, u'itemType'
>argtype:(class common.RpcMethodArgs.Dict)ed(Dict)
>placeholder:(NoneType)None
>first:(bool)False
File "D:\Work\h28\client_replace\client\script\avatarmembers\EquipMember.py", line 287, in onEnhanceEquip
>res:(int)2
>self:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>equipUid:(str)55481f68d71ffd24fb0c7de4
>notifyType:(int)2
>newEquipDict:(dict){u'itemId': 125, u'star': 3, u'itemType'
>equip:(class com.Equip.Equip)<com.Equip.Equip object at 0x17251E50>
>oldEquip:(class com.Equip.Equip)<com.Equip.Equip object at 0x2740E7D0>
File "D:\Work\h28\client_replace\client\script\avatarmembers\EquipMember.py", line 401, in getEquipNotifyDict
>newAttrDict:(dict){'basePhyStrAdditionVal': 3352.156471239
>allAttrNameSet:(set)set(['basePhyStrAdditionVal', 'criRate',
>oldAttrDict:(dict){'basePhyStrAdditionVal': 3047.414973854
>self:(class network.rpcentity.ClientEntities.ClientAvatar)<network.rpcentity.ClientEntities.Client
>notifyType:(int)2
>notifyDict:(dict){'notifyType': 2, 'attrList': []}
>chinese_attrName:(str)生命值
>sortedAllAttrNames:(list)[]
File "D:\Work\h28\client_replace\client\script\com\utils\helpers.py", line 2945, in getAttributeNameC2E
>chinese_name:(str)生命值
KeyError: '\xe7\x94\x9f\xe5\x91\xbd\xe5\x80\xbc'
'''
tb_parser = TracebackParser()
tb_parser.parse(content)
print '============'
print tb_parser.tostring()
print '============' |
import math
import meshlabxml
import os
import tempfile
import plyfile
import numpy as np
import numba
import binvox_rw
import subprocess
def print_hausdorff(hausdorff_distance):
for key, value in hausdorff_distance.items():
print('{}: {}'.format(key, value))
@numba.njit
def minmax(array):
# Ravel the array and return early if it's empty
array = array.ravel()
length = array.size
if not length:
return
# We want to process two elements at once so we need
# an even sized array, but we preprocess the first and
# start with the second element, so we want it "odd"
odd = length % 2
if not odd:
length -= 1
# Initialize min and max with the first item
minimum = maximum = array[0]
i = 1
while i < length:
# Get the next two items and swap them if necessary
x = array[i]
y = array[i+1]
if x > y:
x, y = y, x
| # Compare the min with the smaller one and the max
# with the bigger one
minimum = min(x, minimum)
maximum = max(y, maximum)
i += 2
# If | we had an even sized array we need to compare the
# one remaining item too.
if not odd:
x = array[length]
minimum = min(x, minimum)
maximum = max(x, maximum)
return minimum, maximum
def hausdorff_distance_one_direction(mesh1_filepath, mesh2_filepath):
script = meshlabxml.create.FilterScript(file_in=[mesh1_filepath, mesh2_filepath], ml_version='1.3.2')
meshlabxml.sampling.hausdorff_distance(script)
script.run_script(print_meshlabserver_output=False, skip_error=True)
return script.hausdorff_distance
@numba.jit
def hausdorff_distance_bi(mesh1_filepath, mesh2_filepath):
# get hausdorff dist from meshlab server
hd_ab = hausdorff_distance_one_direction(mesh1_filepath, mesh2_filepath)
hd_ba = hausdorff_distance_one_direction(mesh2_filepath, mesh1_filepath)
min_distance_bi = min(hd_ab["min_distance"], hd_ba["min_distance"])
max_distance_bi = max(hd_ab["max_distance"], hd_ba["max_distance"])
sm = hd_ab["mean_distance"] * hd_ab["number_points"] + hd_ba["mean_distance"] * hd_ba["number_points"]
mean_distance_bi = sm / (hd_ab["number_points"] + hd_ba["number_points"])
ms = (hd_ab["rms_distance"] ** 2) * hd_ab["number_points"] + (hd_ba["rms_distance"] ** 2) * hd_ba["number_points"]
rms_distance_bi = math.sqrt(ms / (hd_ab["number_points"] + hd_ba["number_points"]))
return {"min_distance": min_distance_bi,
"max_distance": max_distance_bi,
"mean_distance": mean_distance_bi,
"rms_distance": rms_distance_bi,
"number_points": hd_ab["number_points"]}
@numba.jit
def calculate_voxel_side_length(mesh, grid_size):
minx, maxx = minmax(mesh.vertices[:, 0])
miny, maxy = minmax(mesh.vertices[:, 1])
minz, maxz = minmax(mesh.vertices[:, 2])
return max(abs(minx - maxx) / grid_size,
abs(miny - maxy) / grid_size,
abs(minz - maxz) / grid_size)
@numba.jit
def _jaccard_distance(grid1, grid2):
intersection = np.logical_and(grid1, grid2)
intersection_count = np.count_nonzero(intersection)
union = np.logical_or(grid1, grid2)
union_count = np.count_nonzero(union)
if union_count == 0:
return 0.0
return float(intersection_count) / float(union_count)
def jaccard_similarity(mesh_filepath0, mesh_filepath1, grid_size=40, exact=True):
temp_mesh0_filepath = tempfile.mktemp(suffix=".ply")
temp_mesh1_filepath = tempfile.mktemp(suffix=".ply")
binvox0_filepath = temp_mesh0_filepath.replace(".ply", ".binvox")
binvox1_filepath = temp_mesh1_filepath.replace(".ply", ".binvox")
os.symlink(os.path.abspath(mesh_filepath0), temp_mesh0_filepath)
os.symlink(os.path.abspath(mesh_filepath1), temp_mesh1_filepath)
mesh0 = plyfile.PlyData.read(temp_mesh0_filepath)
minx, maxx = minmax(mesh0['vertex']['x'])
miny, maxy = minmax(mesh0['vertex']['y'])
minz, maxz = minmax(mesh0['vertex']['z'])
# -d: specify voxel grid size (default 256, max 1024)(no max when using -e)
# -e: exact voxelization (any voxel with part of a triangle gets set)(does not use graphics card)
# -bb <minx> <miny> <minz> <maxx> <maxy> <maxz>: force a different input model bounding box
cmd_base = "binvox -pb "
if exact:
cmd_base += "-e "
cmd_base += "-d " + str(grid_size) + " -bb " + str(minx) + " " + str(miny) + " " + str(minz) + " " + str(maxx) + " " + str(maxy) + " " + str(maxz)
mesh0_cmd = cmd_base + " " + temp_mesh0_filepath
mesh1_cmd = cmd_base + " " + temp_mesh1_filepath
process = subprocess.Popen(mesh0_cmd.split(" "), stdout=subprocess.PIPE)
command1_output, _ = process.communicate()
process = subprocess.Popen(mesh1_cmd.split(" "), stdout=subprocess.PIPE)
command2_output, _ = process.communicate()
with open(binvox0_filepath, 'r') as mesh0_binvox_file:
mesh0_binvox = binvox_rw.read_as_3d_array(mesh0_binvox_file)
with open(binvox1_filepath, 'r') as mesh1_binvox_file:
mesh1_binvox = binvox_rw.read_as_3d_array(mesh1_binvox_file)
jaccard = _jaccard_distance(mesh0_binvox.data, mesh1_binvox.data)
if os.path.exists(temp_mesh0_filepath):
os.remove(temp_mesh0_filepath)
if os.path.exists(temp_mesh1_filepath):
os.remove(temp_mesh1_filepath)
if os.path.exists(binvox0_filepath):
os.remove(binvox0_filepath)
if os.path.exists(binvox1_filepath):
os.remove(binvox1_filepath)
return jaccard
|
# Copyright (c) David Wilson 2015
# Icarus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, ei | ther | version 3 of the License, or
# (at your option) any later version.
# Icarus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Icarus. If not, see <http://www.gnu.org/licenses/>.
import unittest
from Platform import Platform
class TestPlatform(unittest.TestCase):
def test_from_dict(self):
Platform.from_dict({"":""})
def test_from_dict_returns_platform(self):
result = Platform.from_dict({"":""})
self.assertIsInstance(result, Platform)
def test_from_dict_performs_mappings(self):
d = {"name": "name",
"description": "description"}
result = Platform.from_dict(d)
self.assertEqual(d["name"], result.name)
self.assertEqual(d["description"], result.description)
def test_from_mongo_result_performs_mapping(self):
"""Initialise the mapper
:param mongo_result: A MongoDB result. The following fields
can currently be mapped:
* _id
* _Platform__name
* _Platform__description
"""
d = {"_id": "id",
"_Platform__name": "name",
"_Platform__description": "description"}
p = Platform.from_mongo_result(d)
self.assertEqual(d["_id"], p.id)
self.assertEqual(d["_Platform__name"], p.name)
self.assertEqual(d["_Platform__description"], p.description)
|
"""apple URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from dja | ngo.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib i | mport admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
from gc import collect
from inspect import isawaitable
from pytest import mark, raises
from graphql import graphql_sync
from graphql.execution import execute, execute_sync
from graphql.language import parse
from graphql.type import GraphQLField, GraphQLObjectType, GraphQLSchema, GraphQLString
from graphql.validation import validate
def describe_execute_synchronously_when_possible():
def _resolve_sync(root_value, _info):
return root_value
async def _resolve_async(root_value, _info):
return root_value
schema = GraphQLSchema(
GraphQLObjectType(
"Query",
{
"syncField": GraphQLField(GraphQLString, resolve=_resolve_sync),
"asyncField": GraphQLField(GraphQLString, resolve=_resolve_async),
},
),
GraphQLObjectType(
"Mutation",
{"syncMutationField": GraphQLField(GraphQLString, resolve=_resolve_sync)},
),
)
def does_not_return_an_awaitable_for_initial_errors():
doc = "fragment Example on Query { syncField }"
| assert execute(schema, parse(doc), "rootValue") == (
None,
[{"message": "Must provide an operation."}],
)
def does_not_return_an_awaitable_if_fields_are_all_synchronous():
doc = "query Example { syncField }"
assert execute(schema, parse(doc), "rootValue") == (
{"syncField": "rootValue"},
None,
)
def does_not_return_an_awaitable_if_mutation_fields_are_all_synch | ronous():
doc = "mutation Example { syncMutationField }"
assert execute(schema, parse(doc), "rootValue") == (
{"syncMutationField": "rootValue"},
None,
)
@mark.asyncio
async def returns_an_awaitable_if_any_field_is_asynchronous():
doc = "query Example { syncField, asyncField }"
result = execute(schema, parse(doc), "rootValue")
assert isawaitable(result)
assert await result == (
{"syncField": "rootValue", "asyncField": "rootValue"},
None,
)
def describe_execute_sync():
def does_not_return_an_awaitable_for_sync_execution():
doc = "query Example { syncField }"
result = execute_sync(schema, document=parse(doc), root_value="rootValue")
assert result == (
{"syncField": "rootValue"},
None,
)
def does_not_throw_if_not_encountering_async_execution_with_check_sync():
doc = "query Example { syncField }"
result = execute_sync(
schema, document=parse(doc), root_value="rootValue", check_sync=True
)
assert result == (
{"syncField": "rootValue"},
None,
)
@mark.asyncio
@mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning")
async def throws_if_encountering_async_execution_with_check_sync():
doc = "query Example { syncField, asyncField }"
with raises(RuntimeError) as exc_info:
execute_sync(
schema, document=parse(doc), root_value="rootValue", check_sync=True
)
msg = str(exc_info.value)
assert msg == "GraphQL execution failed to complete synchronously."
@mark.asyncio
@mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning")
async def throws_if_encountering_async_operation_without_check_sync():
doc = "query Example { syncField, asyncField }"
result = execute_sync(schema, document=parse(doc), root_value="rootValue")
assert result == (
{"syncField": "rootValue", "asyncField": None},
[
{
"message": "String cannot represent value:"
" <coroutine _resolve_async>",
"locations": [(1, 28)],
"path": ["asyncField"],
}
],
)
# garbage collect coroutine in order to not postpone the warning
del result
collect()
def describe_graphql_sync():
def reports_errors_raised_during_schema_validation():
bad_schema = GraphQLSchema()
result = graphql_sync(schema=bad_schema, source="{ __typename }")
assert result == (None, [{"message": "Query root type must be provided."}])
def does_not_return_an_awaitable_for_syntax_errors():
doc = "fragment Example on Query { { { syncField }"
assert graphql_sync(schema, doc) == (
None,
[
{
"message": "Syntax Error: Expected Name, found '{'.",
"locations": [(1, 29)],
}
],
)
def does_not_return_an_awaitable_for_validation_errors():
doc = "fragment Example on Query { unknownField }"
validation_errors = validate(schema, parse(doc))
result = graphql_sync(schema, doc)
assert result == (None, validation_errors)
def does_not_return_an_awaitable_for_sync_execution():
doc = "query Example { syncField }"
assert graphql_sync(schema, doc, "rootValue") == (
{"syncField": "rootValue"},
None,
)
def does_not_throw_if_not_encountering_async_operation_with_check_sync():
doc = "query Example { syncField }"
assert graphql_sync(schema, doc, "rootValue") == (
{"syncField": "rootValue"},
None,
)
@mark.asyncio
@mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning")
async def throws_if_encountering_async_operation_with_check_sync():
doc = "query Example { syncField, asyncField }"
with raises(RuntimeError) as exc_info:
graphql_sync(schema, doc, "rootValue", check_sync=True)
msg = str(exc_info.value)
assert msg == "GraphQL execution failed to complete synchronously."
@mark.asyncio
@mark.filterwarnings("ignore:.* was never awaited:RuntimeWarning")
async def throws_if_encountering_async_operation_without_check_sync():
doc = "query Example { syncField, asyncField }"
result = graphql_sync(schema, doc, "rootValue")
assert result == (
{"syncField": "rootValue", "asyncField": None},
[
{
"message": "String cannot represent value:"
" <coroutine _resolve_async>",
"locations": [(1, 28)],
"path": ["asyncField"],
}
],
)
# garbage collect coroutine in order to not postpone the warning
del result
collect()
|
"""
Entrypoint for the buildroot update server
"""
import argparse
import asyncio
import logging
import logging.config
from . import (get_app, BR_BUILTIN_VERSION_FILE,
config, constants, name_management)
from aiohttp import web
LOG = logging.getLogger(__name__)
try:
# systemd journal is available, we can use its handler
import systemd.journal # noqa(F401)
import systemd.daemon
def _handler_for(topic_name: str,
log_level: int):
return {'class': 'systemd.journal.JournalHandler',
'formatter': 'message_only',
'level': log_level,
'SYSLOG_IDENTIFIER': topic_name}
# By using sd_notify
# (https://www.freedesktop.org/s | oftware/systemd/man/sd_notify.html)
# and type=notify in the unit file, we can prevent systemd from starting
# dependent services until we actually say we're ready. By calling this
# after we change the hostname, we make anything with an After= on us
# be guaranteed to see the correct hostname
def _notify_up():
systemd.daemon.notify("READY=1")
except ImportError:
# systemd journal isn't available, probably running tests
def _handler_for(topic_name: s | tr,
log_level: int):
return {
'class': 'logging.StreamHandler',
'formatter': 'basic',
'level': log_level,
}
def _notify_up():
LOG.info("systemd couldn't be imported (host? test?), not notifying")
def configure_logging(level: int):
config = {
'version': 1,
'formatters': {
'basic': {
'format': '%(name)s %(levelname)s %(message)s'
},
'message_only': {
'format': '%(message)s'
},
},
'handlers': {
'journald': _handler_for('opentrons-update', level)
},
'loggers': {
'otupdate': {
'handlers': ['journald'],
'level': level,
'propagate': False
},
'__main__': {
'handlers': ['journald'],
'level': level,
'propagate': False,
}
},
'root': {
'handlers': ['journald'],
'level': level
}
}
logging.config.dictConfig(config)
def main():
parser = argparse.ArgumentParser(
description='Opentrons update server for buildroot systems')
parser.add_argument('-p', '--port', dest='port', type=int,
help='Port to listen on. Passed to aiohttp')
parser.add_argument('--host', dest='host', type=str, default='127.0.0.1',
help='Host to listen on. Passed to aiohttp')
parser.add_argument('--version-file', dest='version_file',
type=str, default=BR_BUILTIN_VERSION_FILE,
help='Version file path if not default')
parser.add_argument('--log-level', dest='log_level',
choices=['debug', 'info', 'warning', 'error'],
help='Log level', default='info')
parser.add_argument('--config-file', dest='config_file',
type=str, default=None,
help='Config file path. If not specified, falls back '
f'to {config.PATH_ENVIRONMENT_VARIABLE} env var and '
f'then default path {config.DEFAULT_PATH}')
args = parser.parse_args()
loop = asyncio.get_event_loop()
configure_logging(getattr(logging, args.log_level.upper()))
LOG.info("Setting hostname")
hostname = loop.run_until_complete(name_management.setup_hostname())
LOG.info(f"Set hostname to {hostname}")
LOG.info('Building buildroot update server')
app = get_app(args.version_file, args.config_file)
name = app[constants.DEVICE_NAME_VARNAME]
LOG.info(f"Setting advertised name to {name}")
loop.run_until_complete(name_management.set_name(name))
LOG.info('Notifying systemd')
_notify_up()
LOG.info(
f'Starting buildroot update server on http://{args.host}:{args.port}')
web.run_app(app, host=args.host, port=args.port)
if __name__ == '__main__':
main()
|
serve', 'pressure', 'primate', 'pristine', 'probe', 'process', 'propagation',
'properties', 'protected', 'proton', 'pulley', 'qualitative data', 'quantum', 'quark',
'quarry', 'radiation', 'radioactivity', 'rain forest', 'ratio', 'reaction', 'reagent',
'realm', 'redwoods', 'reeds', 'reflection', 'refraction', 'relationships between', 'reptile',
'research', 'resistance', 'resonate', 'rookery', 'rubble', 'runoff', 'salinity', 'sandbar',
'satellite', 'saturation', 'scientific investigation', 'scientist\'s', 'sea floor', 'season',
'sedentary', 'sediment', 'sedimentary', 'seepage', 'seismic', 'sensors', 'shard',
'similarity', 'solar', 'soluble', 'solvent', 'sonic', 'sound', 'source', 'species',
'spectacular', 'spectrum', 'speed', 'sphere', 'spring', 'stage', 'stalactite',
'stalagmites', 'stimulus', 'substance', 'subterranean', 'sulfuric acid', 'surface',
'survival', 'swamp', 'sylvan', 'symbiosis', 'symbol', 'synergy', 'synthesis', 'taiga',
'taxidermy', 'technology', 'tectonics', 'temperate', 'temperature', 'terrestrial',
'thermals', 'thermometer', 'thrust', 'torque', 'toxin', 'trade winds', 'pterodactyl',
'transformation tremors', 'tropical', 'umbra', 'unbelievable', 'underwater', 'unearth',
'unique', 'unite', 'unity', 'universal', 'unpredictable', 'unusual', 'ursine', 'vacuole',
'valuable', 'vapor', 'variable', 'variety', 'vast', 'velocity', 'ventifact', 'verdant',
'vespiary', 'viable', 'vibration', 'virus', 'viscosity', 'visible', 'vista', 'vital',
'vitreous', 'volt', 'volume', 'vulpine', 'wave', 'wax', 'weather', 'westerlies', 'wetlands',
'whitewater', 'xeriscape', 'xylem', 'yield', 'zero-impact', 'zone', 'zygote', 'achieving',
'acquisition of', 'an alternative', 'analysis of', 'approach toward', 'area', 'aspects of',
'assessment of', 'assuming', 'authority', 'available', 'benefit of', 'circumstantial',
'commentary', 'components', 'concept of', 'consistent', 'corresponding', 'criteria',
'data', 'deduction', 'demonstrating', 'derived', 'distribution', 'dominant', 'elements',
'equation', 'estimate', 'evaluation', 'factors', 'features', 'final', 'function',
'initial', 'instance ', 'interpretation of', 'maintaining ', 'method', 'perceived',
'percent', 'period', 'positive', 'potential', 'previous', 'primary', 'principle',
'procedure', 'process', 'range', 'region', 'relevant', 'required', 'research',
'resources', 'response', 'role', 'section', 'select', 'significant ', 'similar',
'source', 'specific', 'strategies', 'structure', 'theory', 'transfer', 'variables',
'corvidae', 'passerine', 'Pica pica', 'Chinchilla lanigera', 'Nymphicus hollandicus',
'Melopsittacus undulatus', )
def science_word(cls):
"""
:example 'Lorem'
"""
return cls.random_element(cls.word_list)
def science_words(cls, nb=3):
"""
Generate an array of random words
:example array('Lorem', 'ipsum', 'dolor')
:param nb how many words to return
"""
return [cls.science_word() for _ in range(0, nb)]
def science_sentence(cls, nb_words=6, variable_nb_words=True):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words around how many words the sentence should contain
:param variable_nb_words set to false if you want exactly $nbWords returned,
otherwise $nbWords may vary by +/-40% with a minimum of 1
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = cls.randomize_nb_elements(nb_words)
words = cls.science_words(nb_words)
words[0] = words[0].title()
return ' '.join(words) + '.'
def science_sentences(cls, nb=3):
"""
Generate an array of sentences
:example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')
:param nb how many sentences to return
:return list
"""
return [cls.science_sentence() for _ in range(0, nb)]
def science_paragraph(cls, nb_sentences=3, variable_nb_sentences=True):
"""
Generate a single paragraph
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param nb_sentences around how many sentences the paragraph should contain
:param variable_nb_sentences set to false if you want exactly $nbSentences returned,
otherwise $nbSentences may vary by +/-40% with a minimum of 1
:return string
"""
if nb_sentences <= 0:
return ''
if variable_nb_sentences:
nb_sentences = cls.randomize_nb_elements(nb_sentences)
return ' '.join(cls.science_sentences(nb_sentences))
def science_paragraphs(cls, nb=3):
"""
Generate an array of paragraphs
:example array($paragraph1, $paragraph2, $paragraph3)
:param nb how many paragraphs to return
:return array
"""
return [cls.science_paragraph() for _ in range(0, nb)]
def science_text(cls, max_nb_chars=200):
"""
Generate a text string.
Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param max_nb_chars Maximum number of characters the text should contain (minimum 5)
:return string
"""
text = []
if max_nb_chars < 5:
raise ValueError('text() can only generate text of at least 5 characters')
if max_nb_chars < 25:
# join words
while not text:
size = 0
# determine how many words are needed to reach the $max_nb_chars once;
while size < max_nb_chars:
word = (' ' if size else '') + cls.science_word()
text.append(word)
size += len(word)
text.pop()
text[0] = text[0][0].upper() + text[0][1:]
last_index = len(text) - 1
text[last_index] += '.'
elif max_nb_chars < 100:
# join sentences
while not text:
size = 0
# determine how many sentences are needed to reach the $max_nb_chars once
while size < max_nb_chars:
sentence = (' ' if size else '') + cls.science_sentence()
text.append(sentence)
size += len(sentence)
text.pop()
else:
# join paragraphs
while not text:
size = 0
# determine how many paragraphs are needed to reach the $max_nb_chars once
while size < max_nb_chars:
paragraph = ('\n' if size else '') + cls.science_paragraph()
text.append(paragraph)
size += len(paragraph)
text.pop()
return ''.join(text)
logger = logging.getLogger('create_fakes')
SILENT_LOGGERS = [
'factory',
'website.mails',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
fake = Factory.create()
fake.add_provider(Sciencer)
def create_fake_user():
email = fake_email()
name = fake.name()
user = UserFactory(username=email, fullname=name,
is_registered=True, emails=[email],
date_registered=fake.date_time(tzinfo=pytz.UTC),
| )
user.set_password('faker123')
user.save()
logger.info('Created user: {0} <{1} | >'.format(user.fullname, user.userna |
#Chuong trinh Quan ly hoc sinh. Xay dung ham tim kiem, them moi
#haind
#python
import json
import os
#tao menu lua chon
def menu():
print '''
Chao mung ban den voi chuong trinh Quan ly hoc sinh!
Su dung:
1. Them moi hoc sinh
2. Tim kiem hoc sinh
3. Thoat chuong trinh (quit)
'''
return input("Moi ban lua chon: ")
#tao danh sach lam bien global
ds = list()
#chuc nang them moi hoc sinh
def them | HS():
print "Ban da lua chon Them moi hoc sinh"
hs = dict()
name = raw_input("Ho va ten: ")
birth = raw_input("Ngay sinh: ")
addr = raw_input("Dia chi: ")
global ds
#Tao dict
hs['Ho ten'] = name
hs['Ngay sinh'] = birth
hs['Dia chi'] = addr
#Them hoc sinh vao danh | sach
ds.append(hs)
print ""
print "Thong tin hoc sinh vua duoc them vao: %s"%(ds)
print ""
#chuc nang tim kiem hoc sinh
def timHS():
print "Ban da lua chon Tim kiem hoc sinh"
timkiem = raw_input("Moi ban nhap ten hoc sinh muon tim: ")
ketquatim = list()
for i in ds:
if i['Ho ten'] == timkiem:
ketquatim.append(i)
print ""
print "Da ket thuc tim kiem. Ket qua tim kiem la: "
print ketquatim
print ""
# luu hs vao file
def savehs():
global ds
#convert ds ra json
dljson = json.dumps(ds)
#ghi vao file text
try:
with open("dbhs.txt", "wb") as fh:
fh.write(dljson)
print "Da luu ds hs thanh cong"
except e, Exception:
print "Co loi khi luu file"
#load lai du lieu
def loadhs(filename):
global ds
if (os.path.isfile(filename)):
print "File hop le. Bat dau load ds hs"
with open(filename, 'rb') as fh:
data = fh.readline()
ds = json.loads(data)
print "Da load ds hs ok!"
else:
print "File DB khong dung."
#thuc hien vong lap chuong trinh
vonglap = 1
choice = 0
loadhs("dbhs.txt")
while vonglap == 1:
choice = menu()
if choice == 1:
themHS()
elif choice == 2:
timHS()
elif choice == 3:
vonglap = 0
savehs()
print "Cam on ban da su dung chuong trinh"
|
"""
This example shows a Tkinter GUI application that uses event loop callbacks
to integrate Pyro's event loop into the Tkinter GUI mainloop.
No threads are used. The Pyro event callback is called every so often
to check if there are Pyro events to handle, and handles them synchronously.
"""
import time
import select
import Pyro4
try:
from tkinter import *
import tkinter.simpledialog as simpledialog
except ImportError:
from Tkinter import *
import tkSimpleDialog as simpledialog
# Set the Pyro servertype to the multiplexing select-based server that doesn't
# use a threadpool to service method calls. This way the method calls are
# handled inside the main thread as well.
Pyro4.config.SERVERTYPE = "multiplex"
# The frequency with which the GUI loop calls the Pyro event handler.
PYRO_EVENTLOOP_HZ = 50
class PyroGUI(object):
"""
The Tkinter GUI application that also listens for Pyro calls.
"""
def __init__(self):
self.tk = Tk()
self.tk.wm_title("Pyro in a Tkinter GUI eventloop - without threads")
self.tk.wm_geometry("500x500")
buttonframe = Frame(self.tk)
button = Button(buttonframe, text="Messagebox", command=self.button_msgbox_clicked)
button.pack(side=LEFT)
button = Button(buttonframe, text="Add some text", command=self.button_text_clicked)
button.pack(side=LEFT)
button = Button(buttonframe, text="Clear all text", command=self.button_clear_clicked)
button.pack(side=LEFT)
quitbutton = Button(buttonframe, text="Quit", command=self.tk.quit)
quitbutton.pack(side=RIGHT)
frame = Frame(self.tk, padx=2, pady=2)
buttonframe.pack(fill=X)
rlabel = Label(frame, text="Pyro server messages:")
rlabel.pack(fill=X)
self.msg = Message(frame, anchor=NW, width=500, aspect=80, background="white", relief="sunken")
self.msg.pack(fill=BOTH, expand=1)
frame.pack(fill=BOTH)
self.serveroutput = []
def install_pyro_event_callback(self, daemon):
"""
Add a callback to the tkinter event loop that is invoked every so often.
The callback checks the Pyro sockets for activity and dispatches to the
daemon's event process method if needed.
"""
def pyro_event():
while True:
# for as long as the pyro socket triggers, dispatch events
s, _, _ = select.select(daemon.sockets, [], [], 0.01)
if s:
daemon.events(s)
else:
# no more events, stop the loop, we'll get called again soon anyway
break
self.tk.after(1000 // PYRO_EVENTLOOP_HZ, pyro_event)
self.tk.after(1000 // PYRO_EVENTLOOP_HZ, pyro_event)
def mainloop(self):
self.tk.mainloop()
def button_msgbox_clicked(self):
# this button event handler is here only to show that gui events are still processed normally
number = simpledialog.askinteger("A normal popup", "Hi there enter a number", parent=self.tk)
def button_clear_clicked(self):
self.serveroutput = []
self.msg.config(text="")
def button_text_clicked(self):
# add some random text to the message list
self.add_message("The quick brown fox jumps over the lazy dog!")
def add_message(self, message):
message = "[{0}] {1}".format(time.strftime("%X"), message)
self.serveroutput.append(message)
self.serveroutput = self.serveroutput[-27:]
self.msg.config(text="\n".join(self.serveroutput))
@Pyro4.expose
class MessagePrinter(object):
"""
T | he Pyro object that interfaces with the GUI application.
"""
def __init__(self, gui):
self.gui = gui
def message(self, messagetext):
# Add the message to the screen.
# Note that you can't do anything that requires gui interaction
# (such as popping a dialog box asking for user input),
# because the gui (tkinter) is busy processing this pyro call.
| # It can't do two things at the same time when embedded this way.
# If you do something in this method call that takes a long time
# to process, the GUI is frozen during that time (because no GUI update
# events are handled while this callback is active).
self.gui.add_message("from Pyro: " + messagetext)
def sleep(self, duration):
# Note that you can't perform blocking stuff at all because the method
# call is running in the gui mainloop thread and will freeze the GUI.
# Try it - you will see the first message but everything locks up until
# the sleep returns and the method call ends
self.gui.add_message("from Pyro: sleeping {0} seconds...".format(duration))
self.gui.tk.update()
time.sleep(duration)
self.gui.add_message("from Pyro: woke up!")
def main():
gui = PyroGUI()
# create a pyro daemon with object
daemon = Pyro4.Daemon()
obj = MessagePrinter(gui)
uri = daemon.register(obj, "pyrogui.message")
gui.add_message("Pyro server started. Not using threads.")
gui.add_message("Use the command line client to send messages.")
urimsg = "Pyro object uri = {0}".format(uri)
gui.add_message(urimsg)
print(urimsg)
# add a Pyro event callback to the gui's mainloop
gui.install_pyro_event_callback(daemon)
# enter the mainloop
gui.mainloop()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
from utils.benchmark import get_matrix
# Generate a json-formatted problem from a MDVRP file.
# Those benchmarks use double precision for matrix costs and results
# are usually reported with 2 decimal places. As a workaround, we
# multiply all costs by CUSTOM_PRECISION before performing the usual
# integer rounding. Comparisons in benchmarks/compare_to_BKS.py are
# adjusted accordingly.
CUSTOM_PRECISION = 1000
FIRST_LINE = 0
def parse_meta(line):
meta = line.split()
if len(meta) < 1 or int(meta[0]) != 2:
print("Not a MDVRP!")
exit(2)
return {
"VEHICLES_PER_DEPOT": int(meta[1]),
"JOBS": int(meta[2]),
"DEPOTS": int(meta[3]),
}
def parse_jobs(lines, jobs, coords):
for i in range(len(lines)):
customer = lines[i].spl | it()
if len(customer) < 5:
print("Too few columns in customer line.")
exit(2)
current_coords = [float(customer[1]), float(customer[2])]
jobs.append(
{
"id": int(customer[0]),
"location": current_coords,
"location_index": len(coords),
"service": CUSTOM_PRECISION * int(customer[3]),
"delivery": [int(customer[4])],
| }
)
coords.append(current_coords)
def parse_mdvrp(input_file):
with open(input_file, "r") as f:
lines = f.readlines()
meta = parse_meta(lines[FIRST_LINE])
coords = []
# Handle capacity per depot.
first_values = lines[FIRST_LINE + 1].split()
meta["MAX_ROUTE_DURATION"] = int(first_values[0])
meta["CAPACITY"] = int(first_values[1])
for line in lines[FIRST_LINE + 2 : FIRST_LINE + 1 + meta["DEPOTS"]]:
if meta["MAX_ROUTE_DURATION"] != int(line.split()[0]):
print("Inconsistent max route duration!")
exit(1)
if meta["CAPACITY"] != int(line.split()[1]):
print("Inconsistent capacity!")
exit(1)
# Handle customer lines
jobs = []
jobs_start = FIRST_LINE + meta["DEPOTS"] + 1
parse_jobs(lines[jobs_start : jobs_start + meta["JOBS"]], jobs, coords)
# Handle depots and vehicles
vehicles = []
depots_start = jobs_start + meta["JOBS"]
for d in range(meta["DEPOTS"]):
depot = lines[depots_start + d].split()
if len(depot) < 5:
print("Too few columns in depot line.")
exit(2)
depot_id = int(depot[0])
depot_coords = [float(depot[1]), float(depot[2])]
location_index = len(coords)
coords.append(depot_coords)
for v in range(1, 1 + meta["VEHICLES_PER_DEPOT"]):
vehicles.append(
{
"id": 100 * depot_id + v,
"profile": "euc_2D",
"start": depot_coords,
"start_index": location_index,
"end": depot_coords,
"end_index": location_index,
"capacity": [meta["CAPACITY"]],
}
)
meta["VEHICLES"] = len(vehicles)
if meta["MAX_ROUTE_DURATION"] != 0:
for vehicle in vehicles:
vehicle["time_window"] = [0, CUSTOM_PRECISION * meta["MAX_ROUTE_DURATION"]]
matrix = get_matrix(coords, CUSTOM_PRECISION)
return {
"meta": meta,
"vehicles": vehicles,
"jobs": jobs,
"matrices": {"euc_2D": {"durations": matrix}},
}
if __name__ == "__main__":
input_file = sys.argv[1]
instance_name = input_file[: input_file.rfind(".txt")]
output_name = instance_name + ".json"
print("- Writing problem " + input_file + " to " + output_name)
json_input = parse_mdvrp(input_file)
json_input["meta"]["NAME"] = instance_name
with open(output_name, "w") as out:
json.dump(json_input, out)
|
## @package predictor_py_utils
# Module caffe2.python.predictor.predictor_py_utils
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, scope
def create_predict_net(predictor_export_meta):
"""
Return the input prediction net.
"""
# Construct a new net to clear the existing settings.
net = core.Net(predictor_export_meta.predict_net.name or "predict")
net.Proto().op.extend(predictor_export_meta.predict_net.op)
net.Proto().external_input.extend(
predictor_export_meta.inputs + predictor_export_meta.parameters)
net.Proto().external_output.extend(predictor_export_meta.outputs)
net.Proto().arg.extend(predictor_export_meta.predict_net.arg)
if predictor_export_meta.net_type is not None:
net.Proto().type = predictor_export_meta.net_type
if predictor_export_meta.num_workers is not None:
net.Proto().num_workers = predictor_export_meta.num_workers
return net.Proto()
def create_predict_init_net(ws, predictor_export_meta):
"""
Return an initialization net that zero-fill all the input and
output blobs, using the shapes from the provided workspace. This is
necessary as there is no shape inference functionality in Caffe2.
"""
net = core.Net("predict-init")
def zero_fill(blob):
shape = predictor_export_meta.shapes.get(blob)
if shape is None:
if blob not in ws.blobs:
raise Exception(
"{} not in workspace but needed for shape: {}".format(
blob, ws.blobs))
shape = ws.blobs[blob].fetch().shape
# Explicitly null-out the scope so users (e.g. PredictorGPU)
# can control (at a Net-global level) the DeviceOption of
# these filling operators.
with scope.EmptyDeviceScope():
net.ConstantFill([], blob, shape=shape, value=0.0)
external_blobs = predictor_export_meta.inputs + \
predictor_export_meta.outputs
for blob in external_blobs:
zero_fill(blob)
net.Proto().external_input.extend(external_blobs)
if predictor_export_meta.extra_init_net:
net.AppendNet(predictor_export_meta.extra_init_net)
# Add the model_id in the predict_net to the init_net
AddModelIdArg(predictor_export_meta, net.Proto())
return net.Proto()
def get_comp_name(string, name):
if name:
return string + '_' + name
return string
def _ProtoMapGet(field, key):
'''
Given the key, get the value of the repeated field.
Helper function used by protobuf since it doesn't have map construct
'''
for v in field:
if (v.key == key):
return v.value
return None
def GetPlan(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetPlanOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetBlobs(meta_net_def, key):
blobs = _ProtoMapGet(meta_net_def.blobs, key)
if blobs is None:
return []
| return blobs
def GetNet(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetNetOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetApplicationSpecificInfo(meta_net_def, key):
return _ProtoMapGet(meta_net_def.applicationSpecificInfo, key)
def AddBlobs(meta_net_def, blob_name, blob_def):
blobs = _ProtoMapGet(meta_net_def.blobs, blob_name)
if blobs is None:
| blobs = meta_net_def.blobs.add()
blobs.key = blob_name
blobs = blobs.value
for blob in blob_def:
blobs.append(blob)
def AddPlan(meta_net_def, plan_name, plan_def):
meta_net_def.plans.add(key=plan_name, value=plan_def)
def AddNet(meta_net_def, net_name, net_def):
meta_net_def.nets.add(key=net_name, value=net_def)
def GetArgumentByName(net_def, arg_name):
for arg in net_def.arg:
if arg.name == arg_name:
return arg
return None
def AddModelIdArg(meta_net_def, net_def):
"""Takes the model_id from the predict_net of meta_net_def (if it is
populated) and adds it to the net_def passed in. This is intended to be
called on init_nets, as their model_id is not populated by default, but
should be the same as that of the predict_net
"""
# Get model_id from the predict_net, assuming it's an integer
model_id = GetArgumentByName(meta_net_def.predict_net, "model_id")
if model_id is None:
return
model_id = model_id.i
# If there's another model_id on the net, replace it with the new one
old_id = GetArgumentByName(net_def, "model_id")
if old_id is not None:
old_id.i = model_id
return
# Add as an integer argument, this is also assumed above
arg = net_def.arg.add()
arg.name = "model_id"
arg.i = model_id
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-04-07 12:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization_network', '0099_organization_validation_status'),
]
operations = [
migrations.AddField(
model_name='organization',
name='slug',
| field=models.CharField(blank=True, help_text='Leave blank | to have the URL auto-generated from the name.', max_length=2000, null=True, verbose_name='URL'),
),
]
|
import smtplib
fromaddr = 'mwbetrg@gmail.com'
toaddrs = 'awangjangok@gmail.com'
msg = 'Why,Oh why!'
username = 'mwbetr | g@gmail.com'
password = '5147mwbe'
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls | ()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit() |
from servo_handler import servo_handler
import number_range as n
TILT_UP_LIMIT = 90
TILT_DOWN_LIMIT = -90
class elevons:
def __init__(self):
self.left = servo_handler(1)
self.right = servo_handler(2)
self.multiplier = 2
# mobile devide tilt limits
self.pitchUpLimit = 45
self.pitchDownLimit = -45
self.rollUpLimit = 45
self.rollDownLimit = -45
self.setMultiplier(self.multiplier)
self.setServosUpDownLimit(0, 100)
## Servo settings methods
# servo multiplier, default 2 because of extended range due to elevon mixing (50% pitch 50%roll)
def setMultiplier(self, multiplier):
self.left.setMultiplier(multiplier)
self.right.setMultiplier(multiplier)
# see servo_handler.py documentation
def setServosUpDownLimit(self, up, down):
self.left.setUpDownLimit(up, down)
self.right.setUpDownLimit(up, down)
# mobile device pitch limits
def setPitchTiltLimits(self, up, down):
if (up <= TILT_UP_LIMIT and down >= TILT_DOWN_LIMIT):
self.pitchUpLimit = up
self.pitchDownLimit = down
print("pitch limit: up:%d, down:%d" % (up, down))
# mobile device roll limits
def setRollTiltLimits(self, up, down):
if (up <= TILT_UP_LIMIT and down >= TILT_DOWN_LIMIT):
self.rollUpLimit = up
self.rollDownLimit = down
print("roll limit: left:%d, right:%d" % (down, up))
# servoUpDirectionSettings
def setServosUpDirection(self, left, right):
self.left.setUpDirection(left)
self.right.setUpDirection(right)
def setPitch(self, pitch):
self.left.setPositionFromTilt(pitch / 2)
self.right.setPositionFromTilt(pitch / 2)
def setRoll(self, roll):
self.left.setPositionFromTilt(-roll / 2)
self.right.setPositionFromTilt(roll / 2)
def setPitchRoll(self, pitch, roll):
self.left.setPositionFromTilt(pitch / 2 - roll / 2)
self.right.setPositionFromTilt(pitch / 2 + roll / 2)
# set pitch only, no mixing - not tested!
def setPitchFromInput(self, pitch):
pitch = n.arduino_map(n.clamp(pitch, self.pitchDownLimit, self.pitchUpLimit), self.pitchDownLimit,
self.pitchUpLimit, -45, 45)
self.setPitch(pitch)
# set roll only, no mixing - not tested!
def setRollFromInput(self, roll):
roll = n.arduino_map(n.clamp(roll, self.rollDownLimit, self.rollUpLimit), self.rollDownLimit, self.rollUpLimit,
-45, 45)
self.setRoll(roll)
# print("servo L, R: %d %d" % (self.left.getPosition(), self.right.getPosition()))
# pitch and roll update, elevons specific method - tested
def setPitchRollFromInput(self, pitch, roll):
# both elevons have equal limits to pitch and roll input
# pitch and roll input have seperate limits
pitch = n.arduino_map(n.clamp(pitch, self.pitchDownLimit, self.pitchUpLimit), self.pitchDownLimit,
self.pitchUpLimit, -45, 45)
roll = n.arduino_map(n.clamp(roll, self.rollDownLimit, self.rollUpLimit), self.rollDownLimit, self.rollUpLimit,
-45, 45)
self.setPitchRoll(pitch, roll)
# manual - raw control
def setAngle(self, pitch, roll):
#print("pitch,roll: %d %d"%(pitch,roll))
self.setPitchRollFromInput(pitch, roll)
# print("servo L, R: %d %d"%(self.left.getPosition(),self.right.getPosition()))
## Stabilize and Autopilot mode methods - not tested!, just draft
"""
def turnRight(self, val=1):
self.left.add(val)
self.right.sub(val)
def turnLeft(self, val=1):
self.left.sub(val)
self.right.add(val)
def pullUp(self):
self.left.add()
self.right.add()
def pullDown(self):
self.left.sub()
self.right.sub()
"""
# stabilize mode algorithm
def stabilize(self, target_pitch, target_roll, pitch, roll):
# idea: map target-sensor values difference to pitch and roll control
self.setPitchRoll(target_pitch - pitch, target_roll - roll)
#print("control pitch/roll", target_pitch - pitch, "/", target_roll - roll)
"""
if(target_pitch<pitch):
self.pullUp()
elif(target_pitch>pitch):
self.pullDown()
if(target_roll<ro | ll):
self.turnRight()
if(target_roll>roll):
self.turnLeft()
"""
"""def controlHdgPitch(self, hdg, pitch):
# idea: level the plane, set the bank angle, turn the plane, level the plane, set to parameters
if(hdg)
self.control()
"""
def turn(self, target_roll, roll, pitch):
# idea: set roll, apply pitch to turn
rolldiff = target_roll-roll
if(ab | s(rolldiff)>10):
self.setRollFromInput(rolldiff)
else:
self.setPitch(pitch)
|
# -----------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# TestERGTools.py
# Description: Test ERG Tools Toolbox
# Requirements: ArcGIS Desktop Standard
# ----------------------------------------------------------------------------
# history:
# 4/2/2015 - mf - updates for coding standards and making tests as modules
# ==================================================
import arcpy
import sys
import traceback
import TestUtilities
import os
class LicenseError(Exception):
pass
def testERGByChemical(inFS, inMatTy, inWB, inDN, inLS, outAreas, outLines):
'''Testing ERG By Chemical'''
arcpy.AddMessage("Starting Test: ERG Tools - ERG By Chemical")
arcpy.ERGByChemical_erg(inFS, inMatTy, inWB, inDN, inLS, outAreas, outLines)
return [outAreas, outLines]
def testERGByPlacard(inFS, inPID, inWB, inDN, inLS, outAreas, outLines):
'''Testing ERG By Chemical'''
arcpy.AddMessage("Starting Test: ERG Tools - ERG By Placard")
arcpy.ERGByPlacard_erg(inFS, inPID, inWB, inDN, inLS, outAreas, outLines)
return [outAreas, outLines]
def main():
''' Tool main code '''
try:
arcpy.ImportToolbox(TestUtilities.toolbox)
arcpy.env.overwriteOutput = True
# Set tool param variables
inPoint = arcpy.Point(77.0, 38.9)
inWGS84Point = arcpy.PointGeometry(inPoint)
sr = arcpy.SpatialReference(4326)
inWGS84Point.spatial_reference = sr
# create an in_memory feature class to initially contain the input point
fc = arcpy.CreateFeatureclass_management("in_memory", "tempfc", "POINT",
None, "DISABLED", "DISABLED",
sr)[0]
# open and insert cursor
with arcpy.da.InsertCursor(fc, ["SHAPE@"]) as cursor:
cursor.insertRow([inWGS84Point])
# create a featureset object and load the fc
inputFeatureSet = arcpy.FeatureSet()
inputFeatureSet.load(fc)
# set the remaining tool parameters
inputMaterialType = "Allylamine"
inputPlacardID = 1560
inputWindBearing = 10
inputDayOrNight = "Day"
inputLargeOrSmall = "Large"
outputERGAreas = os.path.join(arcpy.env.scratchGDB, "ERGAreas")
outputERGLines = os.path.join(arcpy.env.scratchGDB, "ERGLines")
# Testing ERG By Chemical
outputERGAreas, outputERGLines = testERGByChemical(inputFeatureSet,
inputMaterialType,
inputWindBearing,
inputDayOrNight,
inputLargeOrSmall,
outputERGAreas,
outputERGLines)
# Verify Results
countAreas = int(arcpy.GetCount_management(outputERGAreas).getOutput(0))
print("ERG Area count: " + str(countAreas))
countLines = int(arcpy.GetCount_management(outputERGLines).getOutput(0))
print("ERG Line count: " + str(countLines))
if (countAreas != 3) or (countLines != 3):
print("Invalid output count (there should be 3 areas and 3 lines)!")
raise Exception("Test Failed")
print("Test Passed")
# Testing ERG By Placard
outputERGAreas, outputERGLines = testERGByPlacard(inputFeatureSet,
inputPlacardID,
inputWindBearing,
inputDayOrNight,
inputLargeOrSmall,
outputERGAreas,
outputERGLines)
# Verify Results
countAreas = int(arcpy.GetCount_management(outputERGAreas).getOutput(0))
print("ERG Area count: " + str(countAreas))
countLines = int(arcpy.GetCount_management(outputERGLines).getOutput(0))
print("ERG Line count: " + str(countLines))
if (countAreas != 3) or (countLines != 3):
print("Invalid output count (there should be 3 areas and 3 lines)!")
raise Exception("Test Failed")
print("Test Passed")
except arcpy.ExecuteError:
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
# return a system error code
sys.exit(-1)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = ("PYTHON ERRORS:\nTraceback info:\n" + tbinfo +
| "\nError Info:\n" + str(sys.exc_info()[1]))
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python er | ror messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# return a system error code
sys.exit(-1)
# MAIN =============================================
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os.path
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", defa | ult=8000, help="run on the given port", type=int)
class IndexHandler(tornado.web.Request | Handler):
def get(self):
self.render(
"index.html",
header_text = "Hi! I am the header",
footer_text = "the copyright stuff"
)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
debug=True,
autoescape=None
)
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
# an instance of Application is created and sent as an parameter.
# earlier this was done by following :
# app = tornado.web.Application(
# handlers=[(r'/', IndexHandler)],
# template_path=os.path.join(os.path.dirname(__file__), "templates"),
# debug=True,
# autoescape=None
# )
# http_server = tornado.httpserver.HTTPServer(app)
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
source_id', 'zone_id'])
# Deleting field 'FareRule.payment_method'
db.delete_column('gtfs_farerule', 'payment_method')
# Deleting field 'FareRule.price'
db.delete_column('gtfs_farerule', 'price')
# Deleting field 'FareRule.currency_type'
db.delete_column('gtfs_farerule', 'currency_type')
# Deleting field 'FareRule.transfer_duration'
db.delete_column('gtfs_farerule', 'transfer_duration')
# Deleting field 'FareRule.transfers'
db.delete_column('gtfs_farerule', 'transfers')
# Deleting field 'FareRule.farerule_id'
db.delete_column('gtfs_farerule', 'farerule_id')
# Deleting field 'FareRule.agency'
db.delete_column('gtfs_farerule', 'agency_id')
# Adding field 'FareRule.fare'
db.add_column('gtfs_farerule', 'fare', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['gtfs.Fare']), keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'Zone', fields ['source', 'zone_id']
db.delete_unique('gtfs_zone', ['source_id', 'zone_id'])
# Removing unique constraint on 'Shape', fields ['source', 'shape_id']
db.delete_unique('gtfs_shape', ['source_id', 'shape_id'])
# Removing unique constraint on 'Fare', fields ['source', 'fare_id']
db.delete_unique('gtfs_fare', ['source_id', 'fare_id'])
# Deleting model 'Fare'
db.delete_table('gtfs_fare')
# Deleting field 'Zone.source'
db.delete_column('gtfs_zone', 'source_id')
# User chose to not deal with backwards NULL issues for 'FareRule.payment_method'
raise RuntimeError("Cannot reverse this migration. 'FareRule.payment_method' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.price'
raise RuntimeError("Cannot reverse this migration. 'FareRule.price' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.currency_type'
raise RuntimeError("Cannot reverse this migration. 'FareRule.currency_type' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.transfer_duration'
raise RuntimeError("Cannot reverse this migration. 'FareRule.tran | sfer_duration' and its values cannot be | restored.")
# Adding field 'FareRule.transfers'
db.add_column('gtfs_farerule', 'transfers', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False)
# User chose to not deal with backwards NULL issues for 'FareRule.farerule_id'
raise RuntimeError("Cannot reverse this migration. 'FareRule.farerule_id' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'FareRule.agency'
raise RuntimeError("Cannot reverse this migration. 'FareRule.agency' and its values cannot be restored.")
# Deleting field 'FareRule.fare'
db.delete_column('gtfs_farerule', 'fare_id')
models = {
'gtfs.agency': {
'Meta': {'unique_together': "(('source', 'agency_id'),)", 'object_name': 'Agency'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.TextField', [], {}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'gtfs.block': {
'Meta': {'unique_together': "(('source', 'block_id'),)", 'object_name': 'Block'},
'block_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.calendar': {
'Meta': {'object_name': 'Calendar'},
'end_date': ('django.db.models.fields.DateField', [], {}),
'friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'service': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gtfs.Service']", 'unique': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gtfs.calendardate': {
'Meta': {'object_name': 'CalendarDate'},
'date': ('django.db.models.fields.DateField', [], {}),
'exception_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Service']"})
},
'gtfs.fare': {
'Meta': {'unique_together': "(('source', 'fare_id'),)", 'object_name': 'Fare'},
'currency_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'fare_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.IntegerField', [], {}),
'price': ('django.db.models.fields.FloatField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'transfer_duration': ('django.db.models.fields.IntegerField', [], {}),
'transfers': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'gtfs.farerule': {
'Meta': {'object_name': 'FareRule'},
'contains': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_contains'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'destination': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_destinations'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'fare': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Fare']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_origins'", 'null': 'True', 'to': "orm['gtfs.Zone']"}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Route']", 'null': 'True'})
},
'gtfs.frequency': {
'Meta': {'object_name': 'Frequency'},
'end_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'end_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'headway_secs': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.IntegerField', [], {'null': 'Tru |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version('surveys', '__init__.py')
if sys.argv[-1] == 'publish':
try:
import wheel
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('py | thon setup.py bdist_wheel upload | ')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.md').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-surveys',
version=version,
description="""Surveys for django""",
long_description=readme + '\n\n' + history,
author='Sebastian Reyes Espinosa',
author_email='sebaslander@gmail.com',
url='https://github.com/sebastian-code/django-surveys',
packages=[
'surveys',
],
include_package_data=True,
install_requires=[
'jsonfield==1.0.3',
],
license="BSD",
zip_safe=False,
keywords='django-surveys',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
import random
import math
import sympy
from sympy import latex, fraction, Symbol, Rational
localid =11181500100000
letter=["a","b","c","d"]
n=[0,0,0,0,0,0]
m=[0,0,0,0,0]
f = open("111815001.tex","w") #opens file with name of "test.txt"
for x in range(0, 1000):
localid = localid +1
writewrong=["\correctchoice{\(","\wrongchoice{\(","\wrongchoice{\(","\wrongchoice{\("]
for count in range (0,5):
n[count]=random.randint(-20, 20)
m[1]=n[4]-n[2]
m[2]=n[3]-n[1]
m[3]=n[2]-n[1]
m[4]=n[4]-n[3]
if n[2]==n[4]:
letter[0]='undefined'
letter[2]=latex(Rational(-m[3],m[2]))
letter[3]=latex(Rational(-m[4],m[3]))
letter[1]=latex(Rational(m[4],m[3]))
else:
letter[0]=latex(Rational(m[1],m[2]))
letter[1]=latex(Rational(-m[1],m[2]))
letter[2]=latex(Rational(-m[2],m[1]))
letter[3]=latex(Rational(m[2],m[1]))
zz=random.randint(1,6)
if zz==1:
letter[1]=latex(Rational(m[4],m[3]))
elif zz==2:
letter[2]=latex(Rational(m[4],m[3]))
elif zz==3:
letter[3]=latex(Rational(m[4],m[3]))
n[5]=random.randint(0,10)
if n[2]==n[4]:
letter[0]='undefined'
elif n[5]==8:
zz=random.randint(1,3)
letter[zz]='undefined'
if(len(letter)==4):
for z in range (0, 4):
writewrong[z]=writewrong[z]+str(letter[z])
random.shuffle(writewrong)
f.write("\n\n\n")
f.write("\\element{slope}{")
f.write("\n")
f.write("\\begin{question}{")
f.write(str(localid))
f.write("}")
f.write("\n")
f.write("Find the slope using | points: (")
f.write(str(n[1]))
f.write(",")
f.write(str(n[2]))
f.write(") and (")
f.write(str(n[3]))
f.write(",")
f.write(str(n[4]))
f.write("):")
f.write("\n")
f.write("\\begin{choiceshoriz}")
f.write("\n")
for y in range(0, 4):
f.write("\n")
f.write(writewrong[ | y])
f.write("\)}")
f.write("\n")
f.write("\\end{choiceshoriz}")
f.write("\n")
f.write("\\end{question}")
f.write("\n")
f.write("}")
f.close()
|
function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RecoveryServicesProvider or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.RecoveryServicesProvider]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoveryServicesProvider"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._refresh_provider_initial(
fabric_name=fabric_name,
provider_name=provider_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RecoveryServicesProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_refresh_provider.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabr | icName}/replicationRecoveryServicesProviders/{providerName}/refreshProvider'} # type: ignore
def _delete_initial(
self,
fabric_name, # type: str
provider_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwar | gs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationRecoveryServicesProviders/{providerName}/remove'} # type: ignore
def begin_delete(
self,
fabric_name, # type: str
provider_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes provider from fabric. Note: Deleting provider for any fabric other than SingleHost is unsupported. To maintain backward compatibility for released clients the object "deleteRspInput" is used (if the object is empty we assume that it is old client and continue the old behavior).
The operation to removes/delete(unregister) a recovery services provider from the vault.
:param fabric_name: Fabric name.
:type fabric_name: str
:param provider_name: Recovery services provider name.
:type provider_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
fabric_name=fabric_name,
provider_name=provider_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("se |
from distutils.dir_util import copy_tree
from operator import itemgetter
import pandas as pd
import sys
from jinja2 import Environment, FileSystemLoader
import os
def generate_reports(folder):
hosts = []
# get all the paths of the root folder
files = [os.path.join(folder, fn) for fn in next(os.walk(folder))[2] if not fn.startswith(".")]
for logfile in files:
try:
data = pd.read_csv(logfile, delim_whitespace=True, comment='#', header=-1, index_col='timestamp',
parse_dates={'timestamp': [0, 1]})
print "reading data from " + logfile
except Exception, err:
print "duplicate index occured in " + logfile
print "There are two similar timestamps in the log." \
" To correct that error remove the duplicate entry from " + logfile
hostname = os.path.basename(logfile).replace('.tab', "")
host_data = {}
host_data['name'] = hostname
# CPU data
host_data['cpu_data'] = data.ix[:, 2].to_json(date_format='iso')
host_data['cpu_load'] = data.ix[:, 16].to_json(date_format='iso')
# Memorydata
host_data['mem_data'] = data.ix[:, 20].apply(lambda x: x / 1024000).to_json(date_format='iso')
# Disk data
host_data['disk_read'] = data.ix[:, 66].apply(lambda x: x / 1024).to_json(date_format='iso')
host_data['disk_write'] = data.ix[:, 67].apply(lambda x: x / 1024).to_json(date_format='iso')
# Network Data
host_data['net_rx'] = data.ix[:, 57].to_json(date_format='iso')
host_data['net_tx'] = data.ix[:, 58].to_json(date_format='iso')
hosts.append(host_data)
env = Environment(loader=FileSystemLoader('templates'))
env.add_extension("chartkick.ext.charts")
cpu_template = env.get_template('cpu_template.html')
memory_template = env.get_template('memory_template.html')
disk_template = env.get_template('disk_template.html')
network_template = env.get_template('network_template.html')
cpu_output = cpu_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
memory_output = memory_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
disk_output = disk_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
network_output = network_template.render(
hosts=sorted(hosts, key=itemgetter('name'), reverse=True),
)
test_name = os.path.basename(folder)
test_name += "-report"
if not os.path.exists(test_name):
os.mkdir(test_name)
os.chdir(test_name)
# creating folder structure
if not os.path.exists('css'):
os.mkdir('css')
if not os.path.exists('js'):
os.mkdir('js')
if not os.path.exists('img'):
os.mkdir('img')
if not os.path.exists('fonts'):
os.mkdir('fonts')
copy_tree(os.path.abspath('../css'), 'css')
copy_tree(os.path.abspath('../js'), 'js')
co | py_tree(os.path.abspath('../img'), 'img')
copy_tree(os.path.abspath('../fonts'), 'fonts')
with open('report_cpu.html', 'w') as f:
f.write(cpu_output)
with open('report_memory.html', 'w') as f:
f.write(memory_output)
with open('rep | ort_disk.html', 'w') as f:
f.write(disk_output)
with open('report_network.html', 'w') as f:
f.write(network_output)
def main(argv):
try:
folder = argv[1].strip()
generate_reports(folder)
print "########################################"
print "report generated successfully"
except Exception, err:
print err.message
print "should provide an input folder. ex : python plotter.py <input-folder>"
if __name__ == '__main__':
main(sys.argv)
|
# encoding: utf-8
from __future__ import unicode_literals
import unittest
from mock import Mock
from pdef.tests.inheritance.protocol import *
from pdef.tests.interfaces.protocol import *
from pdef.tests.messages.protocol import *
class TestMessageDescriptor(unittest.TestCase):
def test(self):
descriptor = TestMessage.descriptor
assert descriptor.pyclass is TestMessage
assert descriptor.base is None
assert descriptor.discriminator is None
assert descriptor.discriminator_value is None
assert len(descriptor.subtypes) == 0
assert len(descriptor.fields) == 3
def test__nonpolymorphic_inheritance(self):
base = TestMessage.descriptor
descriptor = TestComplexMessage.descriptor
assert descriptor.pyclass is TestComplexMessage
assert descriptor.base is TestMessage.descriptor
assert descriptor.inherited_fields == base.fields
assert descriptor.fields == base.fields + descriptor.declared_fields
assert len(descriptor.subtypes) == 0
def test__polymorphic_inheritance(self):
base = Base.descriptor
subtype = Subtype.descriptor
subtype2 = Subtype2.descriptor
msubtype = MultiLevelSubtype.descriptor
discriminator = base.find_field('type')
assert base.base is None
assert subtype.base is base
assert subtype2. | base is base
assert msubtype.base is subtype
assert base.discriminator is discriminat | or
assert subtype.discriminator is discriminator
assert subtype2.discriminator is discriminator
assert msubtype.discriminator is discriminator
assert base.discriminator_value is None
assert subtype.discriminator_value is PolymorphicType.SUBTYPE
assert subtype2.discriminator_value is PolymorphicType.SUBTYPE2
assert msubtype.discriminator_value is PolymorphicType.MULTILEVEL_SUBTYPE
assert set(base.subtypes) == {subtype, subtype2, msubtype}
assert set(subtype.subtypes) == {msubtype}
assert not subtype2.subtypes
assert not msubtype.subtypes
assert base.find_subtype(None) is base
assert base.find_subtype(PolymorphicType.SUBTYPE) is subtype
assert base.find_subtype(PolymorphicType.SUBTYPE2) is subtype2
assert base.find_subtype(PolymorphicType.MULTILEVEL_SUBTYPE) is msubtype
class TestFieldDescriptor(unittest.TestCase):
def test(self):
string0 = TestMessage.string0
bool0 = TestMessage.bool0
assert string0.name == 'string0'
assert string0.type is descriptors.string0
assert bool0.name == 'bool0'
assert bool0.type is descriptors.bool0
def test_discriminator(self):
field = Base.type
assert field.name == 'type'
assert field.type is PolymorphicType.descriptor
assert field.is_discriminator
def test_default_value(self):
message = TestMessage()
assert message.string0 == ''
assert not message.has_string0
message.string0 = 'hello'
assert message.string0 == 'hello'
assert message.has_string0
def test_default_value__set_mutable(self):
message = TestComplexMessage()
assert not message.has_list0
assert not message.has_set0
assert not message.has_map0
assert not message.has_message0
list0 = message.list0
set0 = message.set0
map0 = message.map0
message0 = message.message0
assert list0 == []
assert set0 == set()
assert map0 == {}
assert message0 == TestMessage()
assert message.list0 is list0
assert message.set0 is set0
assert message.map0 is map0
assert message.message0 is message0
def test_python_descriptor_protocol(self):
class A(object):
field = descriptors.field('field', lambda: descriptors.string0)
has_field = field.has_property
def __init__(self, field=None):
self.field = field
a = A()
assert a.field == ''
assert a.has_field is False
a.field = 'hello'
assert a.field == 'hello'
assert a.has_field
class TestInterfaceDescriptor(unittest.TestCase):
def test(self):
descriptor = TestInterface.descriptor
method = descriptor.find_method('method')
assert descriptor.pyclass is TestInterface
assert descriptor.exc is TestException.descriptor
assert len(descriptor.methods) == 13
assert method
def test_inheritance(self):
base = TestInterface.descriptor
descriptor = TestSubInterface.descriptor
assert descriptor.base is base
assert len(descriptor.methods) == (len(base.methods) + 1)
assert descriptor.find_method('subMethod')
assert descriptor.exc is TestException.descriptor
class TestMethodDescriptor(unittest.TestCase):
def test(self):
method = TestInterface.descriptor.find_method('message0')
assert method.name == 'message0'
assert method.result is TestMessage.descriptor
assert len(method.args) == 1
assert method.args[0].name == 'msg'
assert method.args[0].type is TestMessage.descriptor
def test_args(self):
method = TestInterface.descriptor.find_method('method')
assert len(method.args) == 2
assert method.args[0].name == 'arg0'
assert method.args[1].name == 'arg1'
assert method.args[0].type is descriptors.int32
assert method.args[1].type is descriptors.int32
def test_post_terminal(self):
descriptor = TestInterface.descriptor
method = descriptor.find_method('method')
post = descriptor.find_method('post')
interface = descriptor.find_method('interface0')
assert method.is_terminal
assert not method.is_post
assert post.is_terminal
assert post.is_post
assert not interface.is_terminal
assert not interface.is_post
def test_invoke(self):
service = Mock()
method = TestInterface.descriptor.find_method('method')
method.invoke(service, 1, arg1=2)
service.method.assert_called_with(1, arg1=2)
class TestEnumDescriptor(unittest.TestCase):
def test(self):
descriptor = TestEnum.descriptor
assert descriptor.values == ('ONE', 'TWO', 'THREE')
def test_find_value(self):
descriptor = TestEnum.descriptor
assert descriptor.find_value('one') == TestEnum.ONE
assert descriptor.find_value('TWO') == TestEnum.TWO
class TestListDescriptor(unittest.TestCase):
def test(self):
list0 = descriptors.list0(descriptors.string0)
assert list0.element is descriptors.string0
class TestSetDescriptor(unittest.TestCase):
def test(self):
set0 = descriptors.set0(descriptors.int32)
assert set0.element is descriptors.int32
class TestMapDescriptor(unittest.TestCase):
def test(self):
map0 = descriptors.map0(descriptors.string0, descriptors.int32)
assert map0.key is descriptors.string0
assert map0.value is descriptors.int32
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# harlib
# Copyright (c) 2014-2017, Andrew Robbins, All rights reserved.
#
# This library ("it") is free software; it is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; you can redistribute it and/or
# modify it under the terms of LGPLv3 <https://www.gnu.org/licenses/lgpl.html>.
from __future__ import absolute_import
from collections import Mapping
from .metamodel import HarObject
from .messages import (
HarCookie,
HarHeader,
HarQueryStringParam,
HarPostDataParam,
HarMessageBody,
HarMessage,
)
try:
from typing import Any, Dict, List, NamedTuple, Optional
except ImportError:
pass
class HarRequestBody(HarMessageBody):
# <postData>
_required = [
'mimeType',
]
| _optional = {
'_size': -1,
'text': '', # HAR-1.2 required
'comment': '',
'_compression': -1,
'_encoding': '',
'params': [],
}
_types = {
'_size': int,
'_compression': int, |
'params': [HarPostDataParam],
}
def __init__(self, obj=None):
# type: (Dict) -> None
har = obj or None
if isinstance(obj, Mapping):
har = obj
elif isinstance(obj, HarObject):
har = obj.to_json()
else:
har = self.decode(obj)
super(HarRequestBody, self).__init__(har)
class HarRequest(HarMessage):
# type: NamedTuple('HarRequest', [
# ('method', str),
# ('url', str),
# ('cookies', List[HarCookie]),
# ('headers', List[HarHeader]),
# ('queryString', List[HarQueryStringParam]),
# ('httpVersion', str),
# ('headersSize', int),
# ('bodySize', int),
# ('postData', HarRequestBody),
# ('_requestLine', str),
# ('_requestLineSize', str),
# ('_endpointID', str),
# ('_originURL', str),
# ('_required', List[str]),
# ('_optional', Dict[str, Any]),
# ('_types', Dict[str, Any]),
# ('_ordered', List[str]),
# ])
_required = [
'method',
'url',
'cookies',
'headers',
'queryString', # HAR-1.2 required
]
_optional = {
'httpVersion': '', # HAR-1.2 required
'headersSize': -1,
'bodySize': -1,
'postData': {'mimeType': 'UNKNOWN'},
'comment': '',
'_requestLine': '',
'_requestLineSize': -1,
'_endpointID': '',
'_originURL': '',
}
_types = {
'cookies': [HarCookie],
'headers': [HarHeader],
'postData': HarRequestBody,
'queryString': [HarQueryStringParam],
'headersSize': int,
'bodySize': int,
'_requestLineSize': int,
}
_ordered = [
'method',
'url',
'httpVersion',
'cookies',
'headers',
'queryString',
'postData',
'headersSize',
'bodySize',
'comment',
'_requestLine',
'_requestLineSize',
]
def __init__(self, obj=None):
# type: (Dict) -> None
har = obj or None
if isinstance(obj, Mapping):
har = obj
elif isinstance(obj, HarObject):
har = obj.to_json()
else:
har = self.decode(obj)
super(HarRequest, self).__init__(har)
@property
def size(self):
# type: () -> int
return self.headersSize + self.bodySize
def get_param(self, name, default=None):
# type: (str, str) -> Optional[str]
for param in self.queryString:
if param.name == name:
return param.value
return default
def post_param(self, name, default=None):
# type: (str, str) -> Optional[str]
for param in self.postData.params:
if param.name == name:
return param
return default
|
"""Plugin for Arte.tv, bi-lingual art and culture channel."""
import re
from itertools import chain
from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HDSStream, HLSStream, HTTPStream, RTMPStream
SWF_URL = "http://www.arte.tv/player/v2/jwplayer6/mediaplayer.6.6.swf"
JSON_VOD_URL = "https://api.arte.tv/api/player/v1/config/{}/{}"
JSON_LIVE_URL = "https://api.arte.tv/api/player/v1/livestream/{}"
_url_re = re.compile(r"""
https?://(?:\w+\.)?arte.tv/guide/
(?P<language>[a-z]{2})/
(?:
(?P<video_id>.+?)/.+ | # VOD
(?:direct|live) # Live TV
)
""", re.VERBOSE)
_video_schema = validate.Schema({
"videoJsonPlayer": {
"VSR": validate.any(
[],
{
validate.text: {
"height": int,
"mediaType": validate.text,
"url": validate.text,
validate.optional("streamer"): validate.text
},
},
),
"VTY": validate.text
}
})
class ArteTV(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _create_stream(self, stream, is_live):
stream_name = "{0}p".format(stream["height"])
stream_type = stream["mediaType"]
stream_url = stream["url"]
if stream_type in ("hls", "mp4"):
if urlparse(stream_url).path.endswith("m3u8"):
try:
streams = HLSStream.parse_variant_playlist(self.session, stream_url)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in streams.items():
yield stream
except IOError as err:
self.logger.error("Failed to extract HLS streams: {0}", err)
else:
yield stream_name, HTTPStream(self.session, stream_url)
elif stream_type == "f4m":
try:
streams = HDSStream.parse_manifest(self.session, stream_url)
for stream in streams.items():
yield stream
except IOError as err:
self.logger.error("Failed to extract HDS streams: {0}", err)
elif stream_type == "rtmp":
| params = {
"rtmp": stream["streamer"],
"playpath": stream["url"],
| "swfVfy": SWF_URL,
"pageUrl": self.url,
}
if is_live:
params["live"] = True
else:
params["playpath"] = "mp4:{0}".format(params["playpath"])
stream = RTMPStream(self.session, params)
yield stream_name, stream
def _get_streams(self):
match = _url_re.match(self.url)
language = match.group('language')
video_id = match.group('video_id')
if video_id is None:
json_url = JSON_LIVE_URL.format(language)
else:
json_url = JSON_VOD_URL.format(language, video_id)
res = http.get(json_url)
video = http.json(res, schema=_video_schema)
if not video["videoJsonPlayer"]["VSR"]:
return
is_live = video["videoJsonPlayer"]["VTY"] == "LIVE"
vsr = video["videoJsonPlayer"]["VSR"].values()
streams = (self._create_stream(stream, is_live) for stream in vsr)
return chain.from_iterable(streams)
__plugin__ = ArteTV
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from twitter.common.collections import OrderedSet
from pants.option.optionable import Optionable
from pants.option.scope import ScopeInfo
from pants.subsystem.subsystem_client_mixin import SubsystemClientMixin, SubsystemDependency
class SubsystemError(Exception):
"""An error in a subsystem."""
class Subsystem(SubsystemClientMixin, Optionable):
"""A separable piece of functionality that may be reused across multiple tasks or other code.
Subsystems encapsulate the configuration and initialization of things like JVMs,
Python interpreters, SCMs and so on.
Subsystem instances can be global or per-optionable. Global instances are useful for representing
global concepts, such as the SCM used in the workspace. Per-optionable instances allow individual
Optionable objects (notably, tasks) to have their own configuration for things such as artifact
caches.
Each subsystem type has an option scope. The global instance of that subsystem initializes
itself from options in that scope. An optionable-specific instance initializes itself from options
in an appropriate subscope, which defaults back to the global scope.
For example, the global artifact cache options would be in scope `cache`, but the
compile.java task can override those options in scope `cache.compile.java`.
Subsystems may depend on other subsystems, and therefore mix in SubsystemClientMixin.
:API: public
"""
options_scope_category = ScopeInfo.SUBSYSTEM
class UninitializedSubsystemError(SubsystemError):
def __init__(self, class_name, scope):
super(Subsystem.UninitializedSubsystemError, self).__init__(
'Subsystem "{}" not initialized for scope "{}". '
'Is subsystem missing from subsystem_dependencies() in a task? '.format(class_name, scope))
class CycleException(Exception):
"""Thrown when a circular dependency is detected."""
def __init__(self, cycle):
message = 'Cycle detected:\n\t{}'.format(' ->\n\t'.join(
'{} scope: {}'.format(subsystem, subsystem.options_scope) for subsystem in cycle))
super(Subsystem.CycleException, self).__init__(message)
@classmethod
def scoped(cls, optionable):
"""Returns a dependency on this subsystem, scoped to `optionable`.
Return value is suitable for use in SubsystemClientMixin.subsystem_dependencies().
"""
return SubsystemDependency(cls, optionable.options_scope)
@classmethod
def get_scope_info(cls, subscope=None):
if subscope is None:
return super(Subsystem, cls).get_scope_info()
else:
return ScopeInfo(cls.subscope(subscope), ScopeInfo.SUBSYSTEM, cls)
@classmethod
def closure(cls, subsystem_types):
"""Gathers the closure of the `subsystem_types` and their transitive `dependencies`.
:param subsystem_types: An iterable of subsystem types.
:returns: A set containing the closure of subsystem types reachable from the given
`subsystem_types` roots.
:raises: :class:`pants.subsystem.subsystem.Subsystem.CycleException` if a dependency cycle is
detected.
"""
known_subsystem_types = set()
path = OrderedSet()
def collect_subsystems(subsystem):
if subsystem in path:
cycle = list(path) + [subsystem]
raise cls.CycleException(cycle)
path.add(subsystem)
if subsystem not in known_subsystem_types:
known_subsystem_types.add(subsystem)
for dependency in subsystem.subsystem_dependencies():
collect_subsystems(dependency)
path.remove(subsystem)
for subsystem_type in subsystem_types:
collect_subsystems(subsystem_type)
return known_subsystem_types
@classmethod
def subscope(cls, scope):
"""Create a subscope under this Subsystem's scope."""
return '{0}.{1}'.format(cls.options_scope, scope)
# The full Options object for this pants run. Will be set after options are parsed.
# TODO: A less clunky way to make option values available?
_options = None
@classmethod
def set_options(cls, options):
cls._options = options
# A cache of (cls, scope) -> the instance of cls tied to that scope.
_scoped_instances = {}
@classmethod
def global_instance(cls):
"""Returns the global instance of this subsystem.
:API: public
:returns: The global subsystem instance.
:rtype: :class:`pants.subsystem.subsystem.Subsystem`
"""
return cls._instance_for_scope(cls.options_scope)
@classmethod
def scoped_instance(cls, optionable):
"""Returns an instance of this subsystem for exclusive use by the given `optionable`.
:API: public
:param optionable: An optionable type or instance to scope this subsystem under.
:type: :class:`pants.option.optionable.Optionable`
:returns: The scoped subsystem instance.
:rtype: :class:`pants.subsystem.subsystem.Subsystem`
"""
if not isinstance(optionable, Optionable) and not issubclass(optionable, Optionable):
raise TypeError('Can only scope an instance against an Optionable, given {} of type {}.'
.format(optionable, type(optionable)))
return cls._instance_for_scope(cls.subscope(optionable.options_scope))
@classmethod
def _instance_for_scope(cls, scope):
if cls._options is None:
raise cls.UninitializedSubsystemError(cls.__name__, scope)
key = (cls, scope)
if key not in cls._scoped_instances:
cls._scoped_instances[key] = cls(scope, cls._options.for_scope(scope))
return cls._scoped_instances[key]
@classmethod
def reset(cls, reset_options=True):
"""Forget all option values and cached subsystem instances.
Used primarily for test isolation and to reset subsystem state for pantsd.
"""
if reset_options:
cls._options = None
cls._scoped_instances = {}
def __init__(self, scope, scoped_options):
| """Note: A subsystem has no access to options in scopes other than its own.
TODO: We'd like that to be true of Tasks some day. Subsystems will help with that.
Task code should call scoped_instance() or global_instan | ce() to get a subsystem instance.
Tests can call this constructor directly though.
:API: public
"""
super(Subsystem, self).__init__()
self._scope = scope
self._scoped_options = scoped_options
self._fingerprint = None
@property
def options_scope(self):
return self._scope
def get_options(self):
"""Returns the option values for this subsystem's scope.
:API: public
"""
return self._scoped_options
|
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.utils.encoding import python_2_unicode_compatible
from misago.acl import version as acl_version
from misago.core.utils import slugify
__all__ = ['Rank']
class RankManager(models.Manager):
def get_default(self):
return self.get(is_default=True)
def make_rank_default(self, rank):
with transaction.atomic():
self.filter(is_default=True).update(is_default=False)
rank.is_default = True
rank.save(update_fields=['is_default'])
@python_2_unicode_compatible
class Rank(models.Model):
name = models.CharField(max_length=255)
slug = models.CharField(unique=True, max_length=255)
description = models.TextField(null=True, blank=True)
title = models.CharField(max_length=255, null=True, blank=True)
roles = models.ManyToManyField('misago_acl.Role', blank=True)
css_class = models.CharField(max_length=255, null=True, blank=True)
is_default = models.BooleanField(default=False)
is_tab = models.BooleanField(default=False)
order = models.IntegerField(default=0)
objects = RankManager()
class Meta:
get_latest_by = 'order'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
self.set_order()
else:
acl_versi | on.i | nvalidate()
return super(Rank, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
acl_version.invalidate()
return super(Rank, self).delete(*args, **kwargs)
def get_absolute_url(self):
return reverse('misago:users-rank', kwargs={'slug': self.slug})
def set_name(self, name):
self.name = name
self.slug = slugify(name)
def set_order(self):
try:
self.order = Rank.objects.latest('order').order + 1
except Rank.DoesNotExist:
self.order = 0
|
d, **kwargs)
@instrument_url
def client_patch_multipart(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
"""
Use this for patch requests that have file uploads or
that need some sort of multi-part content. In the future
Django's test client may become a bit more flexible,
so we can hopefully eliminate this. (When you post
with the Django test client, it deals with MULTIPART_CONTENT
automatically, but not patch.)
"""
encoded = encode_multipart(BOUNDARY, info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.patch(
url,
encoded,
content_type=MULTIPART_CONTENT,
**kwargs)
@instrument_url
def client_put(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.put(url, encoded, **kwargs)
@instrument_url
def client_delete(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.delete(url, encoded, **kwargs)
@instrument_url
def client_options(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
return django_client.options(url, encoded, **kwargs)
@instrument_url
def client_post(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
django_client = self.client # see WRAPPER_COMMENT
return django_client.post(url, info, **kwargs)
@instrument_url
def client_post_request(self, url, req):
# type: (Text, Any) -> HttpResponse
"""
We simulate hitting an endpoint here, although we
actually resolve the URL manually and hit the view
directly. We have this helper method to allow our
instrumentation to work for /notify_tornado and
future similar methods that require doing funny
things to a request object.
"""
| match = resolve(url)
| return match.func(req)
@instrument_url
def client_get(self, url, info={}, **kwargs):
# type: (Text, Dict[str, Any], **Any) -> HttpResponse
django_client = self.client # see WRAPPER_COMMENT
return django_client.get(url, info, **kwargs)
example_user_map = dict(
hamlet=u'hamlet@zulip.com',
cordelia=u'cordelia@zulip.com',
iago=u'iago@zulip.com',
prospero=u'prospero@zulip.com',
othello=u'othello@zulip.com',
AARON=u'AARON@zulip.com',
aaron=u'aaron@zulip.com',
ZOE=u'ZOE@zulip.com',
)
mit_user_map = dict(
sipbtest=u"sipbtest@mit.edu",
starnine=u"starnine@mit.edu",
espuser=u"espuser@mit.edu",
)
# Non-registered test users
nonreg_user_map = dict(
test=u'test@zulip.com',
test1=u'test1@zulip.com',
alice=u'alice@zulip.com',
newuser=u'newuser@zulip.com',
bob=u'bob@zulip.com',
cordelia=u'cordelia@zulip.com',
newguy=u'newguy@zulip.com',
me=u'me@zulip.com',
)
def nonreg_user(self, name):
# type: (str) -> UserProfile
email = self.nonreg_user_map[name]
return get_user(email, get_realm_by_email_domain(email))
def example_user(self, name):
# type: (str) -> UserProfile
email = self.example_user_map[name]
return get_user(email, get_realm('zulip'))
def mit_user(self, name):
# type: (str) -> UserProfile
email = self.mit_user_map[name]
return get_user(email, get_realm('zephyr'))
def nonreg_email(self, name):
# type: (str) -> Text
return self.nonreg_user_map[name]
def example_email(self, name):
# type: (str) -> Text
return self.example_user_map[name]
def mit_email(self, name):
# type: (str) -> Text
return self.mit_user_map[name]
def notification_bot(self):
# type: () -> UserProfile
return get_user('notification-bot@zulip.com', get_realm('zulip'))
def login_with_return(self, email, password=None):
# type: (Text, Optional[Text]) -> HttpResponse
if password is None:
password = initial_password(email)
return self.client_post('/accounts/login/',
{'username': email, 'password': password})
def login(self, email, password=None, fails=False):
# type: (Text, Optional[Text], bool) -> HttpResponse
if password is None:
password = initial_password(email)
if not fails:
self.assertTrue(self.client.login(username=email, password=password))
else:
self.assertFalse(self.client.login(username=email, password=password))
def logout(self):
# type: () -> None
self.client.logout()
def register(self, email, password):
# type: (Text, Text) -> HttpResponse
self.client_post('/accounts/home/', {'email': email})
return self.submit_reg_form_for_user(email, password)
def submit_reg_form_for_user(self, email, password, realm_name="Zulip Test",
realm_subdomain="zuliptest",
from_confirmation='', full_name=None, timezone=u'', **kwargs):
# type: (Text, Text, Optional[Text], Optional[Text], Optional[Text], Optional[Text], Optional[Text], **Any) -> HttpResponse
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
You can pass the HTTP_HOST variable for subdomains via kwargs.
"""
if full_name is None:
full_name = email.replace("@", "_")
return self.client_post('/accounts/register/',
{'full_name': full_name,
'password': password,
'realm_name': realm_name,
'realm_subdomain': realm_subdomain,
'key': find_key_by_email(email),
'timezone': timezone,
'terms': True,
'from_confirmation': from_confirmation},
**kwargs)
def get_confirmation_url_from_outbox(self, email_address, path_pattern="(\S+)>"):
# type: (Text, Text) -> Text
from django.core.mail import outbox
for message in reversed(outbox):
if email_address in message.to:
return re.search(settings.EXTERNAL_HOST + path_pattern,
message.body).groups()[0]
else:
raise AssertionError("Couldn't find a confirmation email.")
def get_api_key(self, email):
# type: (Text) -> Text
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def get_server_api_key(self, server_uuid):
# type: (Text) -> Text
if server_uuid not in API_KEYS:
API_KEYS[server_uuid] = get_remote_server_by_uuid(server_uuid).api_key
return API_KEYS[server_uuid]
def api_auth(self, identifier):
# type: (Text) -> Dict[str, Text]
"""
identifier: Can be an email or a remote server uuid.
"""
if is_remote_server(identifier):
api_key = self.get_server_api_key(identifier)
else:
api_key = self.get_api_key(identifier)
credentials = u"%s:%s" % (identifier, api_key)
return {
'HTTP_AUTHORIZATION': u'Basic ' + base64.b64encode(credentials.encode('utf-8')).decode('utf-8')
|
"""bookstore_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from oth | er_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls im | port url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from store import urls as store_urls
from django.conf import settings
from django.conf.urls.static import static
from tastypie.api import Api
from store.api import ReviewResource
v1_api=Api(api_name='v1')
v1_api.register(ReviewResource())
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^store/', include(store_urls)),
url('', include('social.apps.django_app.urls', namespace ="social")),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^api/', include(v1_api.urls)),
]+ static(settings.MEDIA_URL, document_root =settings.MEDIA_ROOT)
|
gnore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_gateway_name: str,
put_express_route_gateway_parameters: "_models.ExpressRouteGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteGateway"]:
"""Creates or updates a ExpressRoute gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param put_express_route_gateway_parameters: Parameters required in an ExpressRoute gateway PUT
operation.
:type put_express_route_gateway_parameters: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.ExpressRouteGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
put_express_route_gateway_parameters=put_express_route_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
r | eturn AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._cli | ent,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> "_models.ExpressRouteGateway":
"""Fetches the details of a ExpressRoute gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version |
from __future__ import unicode_literals
import re
import sys
from textwrap import TextWrapper
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext, ugettext_lazy as _
# make use of a favourite notifier app such as django-notification
# but if not installed or not desired, fallback will be to do basic emailing
name = getattr(settings, 'POSTMAN_NOTIFIER_APP', 'notification')
if name and name in settings.INSTALLED_APPS:
name = name + '.models'
__import__(name)
notification = sys.modules[name]
else:
notification = None
# give priority to a favourite mailer app such as django-mailer
# but if not installed or not desired, fallback to django.core.mail
name = getattr(settings, 'POSTMAN_MAILER_APP', 'mailer')
if name and name in settings.INSTALLED_APPS:
send_mail = __import__(name, globals(), locals(), [str('send_mail')]).send_mail
else:
from django.core.mail import send_mail
# to disable email notification to users
DISABLE_USER_EMAILING = getattr(settings, 'POSTMAN_DISABLE_USER_EMAILING', False)
# default wrap width; referenced in forms.py
WRAP_WIDTH = 55
def format_body(sender, body, indent=_("> "), width=WRAP_WIDTH):
"""
Wrap the text and prepend lines with a prefix.
The aim is to get lines with at most `width` chars.
But does not wrap if the line is already prefixed.
Prepends each line with a localized prefix, even empty lines.
Existing line breaks are preserved.
Used for quoting messages in replies.
"""
indent = force_unicode(indent) # join() doesn't work on lists with lazy translation objects
wrapper = TextWrapper(width=width, initial_indent=indent, subsequent_indent=indent)
# rem: TextWrapper doesn't add the indent on an empty text
quote = '\n'.join([line.startswith(indent) and indent+line or wrapper.fill(line) or indent for line in body.splitlines()])
return ugettext("\n\n{sender} wrote:\n{body}\n").format(sender=sender, body=quote)
def format_subject(subject):
"""
Prepend a pattern to the subject, unless already there.
Matching is case-insensitive.
"""
str = ugettext("Re: {subject}")
pattern = '^' + str.replace('{subject}', '.*') + '$'
return subject if re.match(pattern, subject, re.IGNORECASE) else str.format(subject=subject)
def email(subject_template, message_template, recipient_list, object, action, site):
"""Compose and send an email."""
ctx_dict = {'site': site, 'object': object, 'action': action}
subject = render_to_string(subject_template, ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string(message_template, ctx_dict)
# during the development phase, consider using the setting: EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
def email_visitor(object, action, site):
"""Email a visitor."""
email('postman/email_visitor_subject.txt', 'postman/email_visitor.txt', [object.email], object, action, site)
def notify_user(object, action, site):
"""Notify a user."""
if action == 'rejection':
user = object.sender
label = 'postman_rejection'
elif action == 'acceptance':
user = object.recipient
parent = object.parent
label = 'postman_reply' if (parent and parent.sender_id == object.recipient_id) else 'postman_message'
else:
return
if notification:
# the context key 'message' is already used in django-notification/models.py/send_now() (v0.2.0)
notification.send(users=[user], label=label, extra_context={'pm_message': object, 'pm_action': action})
else:
if not DISABLE_USER_EMAILING and user.email and user.is_active:
email('postman/email_user_subject.txt', 'postman/em | ail_user.txt', [ | user.email], object, action, site)
|
from __future__ import absolute_import, unicode_literals
import os
from itertools import count
import pytest
from case import Mock, patch
from celery.concurrency.base import BasePool, apply_target
from celery.exceptions import WorkerShutdown, WorkerTerminate
class test_BasePool:
def test_apply_target(self):
scratch = {}
counter = count(0)
def gen_callback(name, retval=None):
def callback(*args):
scratch[name] = (next(counter), args)
return retv | al
return callback
apply_target(gen_callback('target', 42),
args=(8, 16),
callback=gen_callback('callback'),
accept_callback=gen_callback('accept_callback'))
assert scratch['target'] == (1, (8, 16))
assert scratch['callback'] == (2, (42,))
pa1 = scratch[' | accept_callback']
assert pa1[0] == 0
assert pa1[1][0] == os.getpid()
assert pa1[1][1]
# No accept callback
scratch.clear()
apply_target(gen_callback('target', 42),
args=(8, 16),
callback=gen_callback('callback'),
accept_callback=None)
assert scratch == {
'target': (3, (8, 16)),
'callback': (4, (42,)),
}
def test_apply_target__propagate(self):
target = Mock(name='target')
target.side_effect = KeyError()
with pytest.raises(KeyError):
apply_target(target, propagate=(KeyError,))
def test_apply_target__raises(self):
target = Mock(name='target')
target.side_effect = KeyError()
with pytest.raises(KeyError):
apply_target(target)
def test_apply_target__raises_WorkerShutdown(self):
target = Mock(name='target')
target.side_effect = WorkerShutdown()
with pytest.raises(WorkerShutdown):
apply_target(target)
def test_apply_target__raises_WorkerTerminate(self):
target = Mock(name='target')
target.side_effect = WorkerTerminate()
with pytest.raises(WorkerTerminate):
apply_target(target)
def test_apply_target__raises_BaseException(self):
target = Mock(name='target')
callback = Mock(name='callback')
target.side_effect = BaseException()
apply_target(target, callback=callback)
callback.assert_called()
@patch('celery.concurrency.base.reraise')
def test_apply_target__raises_BaseException_raises_else(self, reraise):
target = Mock(name='target')
callback = Mock(name='callback')
reraise.side_effect = KeyError()
target.side_effect = BaseException()
with pytest.raises(KeyError):
apply_target(target, callback=callback)
callback.assert_not_called()
def test_does_not_debug(self):
x = BasePool(10)
x._does_debug = False
x.apply_async(object)
def test_num_processes(self):
assert BasePool(7).num_processes == 7
def test_interface_on_start(self):
BasePool(10).on_start()
def test_interface_on_stop(self):
BasePool(10).on_stop()
def test_interface_on_apply(self):
BasePool(10).on_apply()
def test_interface_info(self):
assert BasePool(10).info == {
'max-concurrency': 10,
}
def test_interface_flush(self):
assert BasePool(10).flush() is None
def test_active(self):
p = BasePool(10)
assert not p.active
p._state = p.RUN
assert p.active
def test_restart(self):
p = BasePool(10)
with pytest.raises(NotImplementedError):
p.restart()
def test_interface_on_terminate(self):
p = BasePool(10)
p.on_terminate()
def test_interface_terminate_job(self):
with pytest.raises(NotImplementedError):
BasePool(10).terminate_job(101)
def test_interface_did_start_ok(self):
assert BasePool(10).did_start_ok()
def test_interface_register_with_event_loop(self):
assert BasePool(10).register_with_event_loop(Mock()) is None
def test_interface_on_soft_timeout(self):
assert BasePool(10).on_soft_timeout(Mock()) is None
def test_interface_on_hard_timeout(self):
assert BasePool(10).on_hard_timeout(Mock()) is None
def test_interface_close(self):
p = BasePool(10)
p.on_close = Mock()
p.close()
assert p._state == p.CLOSE
p.on_close.assert_called_with()
def test_interface_no_close(self):
assert BasePool(10).on_close() is None
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
import os
from googletest.test import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-env-var-test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
pr | int('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs googletest-env-var-test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=en | viron).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
SetEnvVar('TESTBRIDGE_TEST_RUNNER_FAIL_FAST', None) # For 'fail_fast' test
TestFlag('fail_fast', '1', '0')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('brief', '1', '0')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT."""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
dt_app.main.forms import *
from mdt_app.models import *
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestQuerySelectFunctions:
def test_get_meetings(self):
"""returns future meetings that are not cancelled in order"""
past = Meeting.query.filter_by(date='2010-11-15').first()
cancelled = Meeting.query.filter_by(is_cancelled=True).first()
meetings = get_meetings().all()
assert meetings[0].date < meetings[1].date
assert past not in meetings
assert cancelled not in meetings
def test_get_consultants(self):
"""returns only consultants in order"""
consultants = get_consultants().all()
not_consultant = User.query.filter_by(is_consultant=False).first()
assert not_consultant not in consultants
assert consultants[0].username < consultants[1].username
def test_get_users(self):
"""returns confirmed users by username"""
confirmed = get_users().all()
unconfirmed = User.query.filter_by(is_confirmed=False).first()
assert confirmed[0].username < confirmed[1].username
assert unconfirmed not in confirmed
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestCaseForm:
def setup(self):
meeting = Meeting.query.filter_by(date='2050-10-16').first()
consultant = User.query.filter_by(initials='AC').first()
self.form = CaseForm(case_id=-1,
patient_id=1,
meeting=meeting,
consultant=consultant,
mdt_vcmg='MDT',
medical_history='medical history here',
question='question here')
def test_validate_meeting(self):
"""
Custom validation failures:
- Case already exists for that patient on that date
- meeting and add_meeting both filled in
- no meeting or add_meeting data
"""
existing_date = Meeting.query.filter_by(date='2050-10-30').first()
existing_case = CaseForm(data=self.form.data,
meeting=existing_date)
double_meeting_in = CaseForm(data=self.form.data,
add_meeting='2050-11-15',)
no_meeting = CaseForm(data=self.form.data,
meeting=None)
assert self.form.validate() is True
with pytest.raises(ValidationError):
no_meeting.validate_meeting(no_meeting.meeting)
with pytest.raises(ValidationError):
existing_case.validate_meeting(existing_case.meeting)
with pytest.raises(ValidationError):
double_meeting_in.validate_meeting(double_meeting_in.meeting)
def test_validate_add_meeting(self):
"""Validate if meeting does not already exist on that date"""
existing_date = Meeting.query.first()
new_meeting = CaseForm(data=self.form.data,
meeting=None,
add_meeting='2050-11-15')
existing_meeting = CaseForm(data=self.form.data,
meeting=None,
add_meeting=existing_date.date)
assert new_meeting.validate() is True
with pytest.raises(ValidationError):
existing_meeting.validate_add_meeting(existing_meeting.add_meeting)
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestCaseEditForm:
def setup(self):
meeting = Meeting.query.filter_by(date='2050-10-16').first()
consultant = User.query.filter_by(initials='AC').first()
self.user1 = User.query.first()
self.form = CaseForm(case_id=2,
patient_id=1,
meeting=meeting,
consultant=consultant,
mdt_vcmg='MDT',
medical_history='medical history here',
question='question here')
def test_validate_no_actions(self):
"""Validate only if no actions exist or are in the form"""
no_problems = CaseEditForm(data=self.form.data,
no_actions=True)
form_actions = CaseEditForm(data=self.form.data,
no_actions=True,
action='dummy action',
action_to=self.user1)
saved_actions = CaseEditForm(data=self.form.data,
no_actions=True,
case_id=1)
assert no_problems.validate() is True
with pytest.raises(ValidationError):
form_actions.validate_no_actions(form_actions.no_actions)
with pytest.raises(ValidationError):
saved_actions.validate_no_actions(saved_actions.no_actions)
def test_validate_action(self):
user1 = User.query.first()
"""
Validate passes if
- action and action_to are blank
- action, action_to and discussion are filled
Validate fails if
- one of discussion, action or action_to are blank
"""
no_data = CaseEditForm(data=self.form.data)
no_problems = CaseEditForm(data=self.form.data,
discussion='discussed',
action='dummy action',
action_to=self.user1)
no_discussion = CaseEditForm(data=no_problems.data,
discussion=None)
no_action = CaseEditForm(data=no_problems.data,
action=None)
no_action_to = CaseEditForm(data=no_problems.data,
action_to=None)
assert no_data.validate() is True
no_data.validate_action(no_data.action)
assert no_problems.validate() is True
no_problems.validate_action(no_problems.action)
with pytest.raises(ValidationError):
no_discussion.validate_action(no_discussion.action)
with pytest.raises(ValidationError):
no_action.validate_action(no_action.action)
with pytest.raises(ValidationError):
no_action_to.validate_action(no_action_to.action)
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestMeetingForm:
"""Validate if meeting on that date doesn't have the same id"""
def setup(self):
self.new_meeting = MeetingForm(id=-1,
date='2050-11-15')
def test_validate_date(self):
existing_meeting = Meeting.query.first()
last_meeting = Meeting.query.all()[-1]
create_meeting = MeetingForm(data=self.new_meeting.data)
edit_meeting = MeetingForm(data=self.new_meeting.data,
id=existing_meeting.id)
create_date_clash = MeetingForm(data=self.new_meeting.data,
date=existing_meeting.date)
edit_date_clash = MeetingForm(id=last_meeting.id + 1,
date=existing_meeting.date)
assert create_meeting.validate() is True
create_meeting.validate_date(create_meeting.date)
assert edit_meeting.validate() is True
| edit_meeting.v | alidate_date(edit_meeting.date)
with pytest.raises(ValidationError):
create_date_clash.validate_date(create_date_clash.date)
with pytest.raises(ValidationError):
edit_date_clash.validate_date(edit_date_clash.date)
@pytest.mark.usefixtures('db_session', 'populate_db')
class TestPatientForm:
def setup(self):
self.patient = PatientForm(id=-1,
hospital_number='15975346',
first_name='New',
last_name='PATIENT',
date_of_birth='1987-12-05',
sex='F')
self.existing_patients = Patient.query.all()
def test_validate_hospital_num |
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,too-many-instance-attributes
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import absolute_import, division
from copy import deepcopy
import numpy as np
from filterpy.common import pretty_str
class MMAEFilterBank(object):
"""
Implements the fixed Multiple Model Adaptive Estimator (MMAE). This
is a bank of independent Kalman filters. This estimator computes the
likelihood that each filter is the correct one, and blends their state
estimates weighted by their likelihood to produce the state estimate.
Parameters
----------
filters : list of Kalman filters
List of Kalman filters.
p : list-like of floats
Initial probability that each filter is the correct one. In general
you'd probably set each element to 1./len(p).
dim_x : float
number of random variables in the state X
H : Measurement matrix
Attributes
----------
x : numpy.array(dim_x, 1)
Current state estimate. Any call to update() or predict() updates
this variable.
P : numpy.array(dim_x, dim_x)
Current state covariance matrix. Any call to update() or predict()
updates this variable.
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
z : ndarray
Last measurement used in update(). Read only.
filters : list of Kalman filters
List of Kalman filters.
Examples
--------
..code:
ca = make_ca_filter(dt, noise_factor=0.6)
cv = make_ca_filter(dt, noise_factor=0.6)
cv.F[:,2] = 0 # remove acceleration term
cv.P[2,2] = 0
cv.Q[2,2] = 0
filters = [cv, ca]
bank = MMAEFilterBank(filters, p=(0.5, 0.5), dim_x=3)
for z in zs:
bank.predict()
bank.update(z)
Also, see my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
References
----------
Zarchan and Musoff. "Fundamentals of Kalman filtering: A Practical
Approach." AIAA, third edition.
"""
def __init__(self, filters, p, dim_x, H=None):
if len(filters) != len(p):
raise ValueError('length of filters and p must be the same')
if dim_x < 1:
raise ValueError('dim_x must be >= 1')
self.filters = filters
self.p = np.asarray(p)
self.dim_x = dim_x
if H is None:
self.H = None
else:
self.H = np.copy(H)
# try to form a reasonable initial values, but good luck!
try:
self.z = np.copy(filters[0].z)
self.x = np.copy(filters[0].x)
self.P = np.copy(filters[0].P)
except AttributeError:
self.z = 0
self.x = None
self.P = None
# these will always be a copy of x,P after predict() is called
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
# these will always be a copy of x,P after update() is called
self.x_post = self.x.copy()
self.P_post = self.P.copy()
def predict(self, u=0):
"""
Predict next position using the Kalman filter state propagation
equations for each filter in the bank.
Parameters
----------
u : np.array
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
for f in self.filters:
f.predict(u) |
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
def update(self, z, R=None, H=None):
"""
Add a new measurement (z) to the Kalman filter. If z is None, nothing
is changed.
Parameters
--------- | -
z : np.array
measurement for this update.
R : np.array, scalar, or None
Optionally provide R to override the measurement noise for this
one call, otherwise self.R will be used.
H : np.array, or None
Optionally provide H to override the measurement function for this
one call, otherwise self.H will be used.
"""
if H is None:
H = self.H
# new probability is recursively defined as prior * likelihood
for i, f in enumerate(self.filters):
f.update(z, R, H)
self.p[i] *= f.likelihood
self.p /= sum(self.p) # normalize
# compute estimated state and covariance of the bank of filters.
self.P = np.zeros(self.filters[0].P.shape)
# state can be in form [x,y,z,...] or [[x, y, z,...]].T
is_row_vector = (self.filters[0].x.ndim == 1)
if is_row_vector:
self.x = np.zeros(self.dim_x)
for f, p in zip(self.filters, self.p):
self.x += np.dot(f.x, p)
else:
self.x = np.zeros((self.dim_x, 1))
for f, p in zip(self.filters, self.p):
self.x += np.dot(f.x, p)
for x, f, p in zip(self.x, self.filters, self.p):
y = f.x - x
self.P += p*(np.outer(y, y) + f.P)
# save measurement and posterior state
self.z = deepcopy(z)
self.x_post = self.x.copy()
self.P_post = self.P.copy()
def __repr__(self):
return '\n'.join([
'MMAEFilterBank object',
pretty_str('dim_x', self.dim_x),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('log-p', self.p),
])
|
eturns:
the resource from which this trained component can be loaded
"""
self.warn_if_pos_features_cannot_be_computed(training_data)
feature_to_idx_dict = self._create_feature_to_idx_dict(training_data)
self._set_feature_to_idx_dict(feature_to_idx_dict=feature_to_idx_dict)
if not self._feature_to_idx_dict:
rasa.shared.utils.io.raise_warning(
"No lexical syntactic features could be extracted from the training "
"data. In order for this component to work you need to define "
"`features` that can be found in the given training data."
)
self.persist()
return self._resource
def warn_if_pos_features_cannot_be_computed(
self, training_data: TrainingData
) -> None:
"""Warn if part-of-speech features are needed but not given."""
training_example = next(
(
message
for message in training_data.training_examples
if message.get(TOKENS_NAMES[TEXT], [])
),
Message(),
)
tokens_example = training_example.get(TOKENS_NAMES[TEXT], [])
configured_feature_names = set(
feature_name
for pos_config in self._feature_config
for feature_name in pos_config
)
if {"pos", "pos2"}.intersection(
configured_feature_names
) and not tokens_example[0].data.get(POS_TAG_KEY, []):
rasa.shared.utils.io.raise_warning(
f"Expected training data to include tokens with part-of-speech tags"
f"because the given configuration includes part-of-speech features "
f"`pos` and/or `pos2`. "
f"Please add a {SpacyTokenizer.__name__} to your "
f"configuration if you want to use the part-of-speech-features in the"
f"{self.__class__.__name__}. "
f"Continuing without the part-of-speech-features."
)
def _create_feature_to_idx_dict(
self, training_data: TrainingData
) -> Dict[Tuple[int, Text], Dict[Text, int]]:
"""Create a nested dictionary of all feature values.
Returns:
a nested mapping that maps from tuples of positions (in the window) and
supported feature names to "raw feature to index" mappings, i.e.
mappings that map the respective raw feature values to unique indices
(where `unique` means unique with respect to all indices in the
*nested* mapping)
"""
# collect all raw feature values
feature_vocabulary: Dict[Tuple[int, Text], Set[Text]] = dict()
for example in training_data.training_examples:
tokens = example.get(TOKENS_NAMES[TEXT], [])
sentence_features = self._map_tokens_to_raw_features(tokens)
for token_features in sentence_features:
for position_and_feature_name, feature_value in token_features.items():
feature_vocabulary.setdefault(position_and_feature_name, set()).add(
feature_value
)
# assign a unique index to each feature value
return self._build_feature_to_index_map(feature_vocabulary)
def _map_tokens_to_raw_features(
self, tokens: List[Token]
) -> List[Dict[Tuple[int, Text], Text]]:
"""Extracts the raw feature values.
Args:
tokens: a tokenized text
Returns:
a list of feature dictionaries for each token in the given list
where each feature dictionary maps a tuple containing
- a position (in the window) and
- a supported feature name
to the corresponding raw feature value
"""
sentence_features = []
# in case of an even number we will look at one more word before,
# e.g. window size 4 will result in a window range of
# [-2, -1, 0, 1] (0 = current word in sentence)
window_size = len(self._feature_config)
half_window_size = window_size // 2
window_range = range(-half_window_size, half_window_size + window_size % 2)
assert len(window_range) == window_size
for anchor in range(len(tokens)):
token_features: Dict[Tuple[int, Text], Text] = {}
for window_position, relative_position in enumerate(window_range):
absolute_position = anchor + relative_position
# skip, if current_idx is pointing to a non-existing token
if absolute_position < 0 or absolute_position >= len(tokens):
continue
token = tokens[absolute_position]
for feature_name in self._feature_config[window_position]:
token_features[
(window_position, feature_name)
] = self._extract_raw_features_from_token(
token=token,
feature_name=feature_name,
token_position=absolute_position,
num_tokens=len(tokens),
)
sentence_features.append(token_features)
return sentence_features
@staticmethod
def _build_feature_to_index_map(
feature_vocabulary: Dict[Tuple[int, Text], Set[Text]]
) -> Dict[Tuple[int, Text], Dict[Text, int]]:
"""Creates a nested dictionary for mapping raw features to indices.
Args:
feature_vocabulary: a mapping from tuples of positions (in the window) and
supported feature names to the set of possible feature values
Returns:
a nested mapping that maps from tuples of positions (in the window) and
supported feature names to "raw feature to index" mappings, i.e.
mappings that map the respective raw feature values to unique indices
(where `unique` means unique with respect to all indices in the
*nested* mapping)
"""
# Note that this will only sort the top level keys - and we keep
# doing it to ensure consistently with what was done before)
ordered_feature_vocabulary: OrderedDict[
Tuple[int, Text], Set[Text]
] = OrderedDict(sorted(feature_vocabulary.items()))
# create the nested mapping
feature_to_idx_dict: Dict[Tuple[int, Text], Dict[Text, int]] = {}
offset = 0
for (
position_and_feature_name,
feature_values,
) in ordered_feature_vocabulary.items():
sorted_feature_values = sorted(feature_values)
feature_to_idx_dict[position_and_feature_name] = {
feature_value: feat | ure_idx
for feature_idx, feature_value in enumerate(
sorted_feature_values, start=offset
)
}
offset += len(feature_values)
return feature_to_idx_dict
def process(self, messages: List[Message]) -> List[Message]:
"""Featurizes all given messages in-place.
Args:
| messages: messages to be featurized.
Returns:
The same list with the same messages after featurization.
"""
for message in messages:
self._process_message(message)
return messages
def process_training_data(self, training_data: TrainingData) -> TrainingData:
"""Processes the training examples in the given training data in-place.
Args:
training_data: the training data
Returns:
same training data after processing
"""
self.process(training_data.training_examples)
return training_data
def _process_message(self, message: Message) -> None:
"""Featurizes the given message in-place.
Args:
message: a message to be featurized
"""
if not self._feature_to_idx_dict:
rasa.shared.utils.io.raise_warning(
f"The {self.__class__.__name__} {self._identifier} has not been "
f"trained properly |
#BitArray
#Yu.Yang
#
class bitarray():
def __init__(self,length,defaultValue=False):
if (length < 0):
raise Exception("Length param error")
self.array=[]
self.length=length
fillValue=defaultValue
for i in range(self.length):
self.array.append(defaultValue)
self.version=0
def input_from_array(self,value):
if(isinstance(value,list)==False):
raise Exception("value is not a Array")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.Set(i,value[i])
self.version+=1
return self
def __len__(self):
return self.length
def __str__(self):
str="["
for i in range(self.length):
str+="1" if self.array[i]==True else "0"
str+=" "
str+="]"
return str
def Get (self,index):
if (index < 0 or index >=self.length):
raise Exception("ArgumentOutOfRangeException if index < 0 or index >= GetLength()")
return self.array[index]
def Set (self,index,value | ):
if (index < 0 or index >=self.length):
raise Exception("ArgumentOutOfRangeException if index < 0 or index >= GetLength()")
if (value):
self.array[index]=True
else:
self.array[index]=False
self.version+=1
def SetAll(self,value):
for i in range(self.length):
self.Set(i,value)
self.version+=1
def And (self, | value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.array[i]&=value.Get(i)
self.version+=1
return self
def Or (self,value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.array[i]|=value.Get(i)
self.version+=1
return self
def Xor (self,value):
if(isinstance(value,BitArray)==False):
raise Exception("value is not a BitArray")
if (value is None or len(value)!=self.length):
raise Exception("ArgumentException if value == null or value.Length != this.Length.")
for i in range(self.length):
self.array[i]^=value.Get(i)
self.version+=1
return self
def Not (self):
for i in range(self.length):
self.array[i] =not self.array[i]
self.version+=1
return self
|
"""Numeric integration of data coming from a source sensor over time."""
from decimal import Decimal, DecimalException
import logging
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
PLATFORM_SCHEMA,
STATE_CLASS_TOTAL,
SensorEntity,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_METHOD,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TIME_DAYS,
TIME_HOURS,
TIME_MINUTES,
TIME_SECONDS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_SOURCE_ID = "source"
CONF_SOURCE_SENSOR = "source"
CONF_ROUND_DIGITS = "round"
CONF_UNIT_PREFIX = "unit_prefix"
CONF_UNIT_TIME = "unit_time"
CONF_UNIT_OF_MEASUREMENT = "unit"
TRAPEZOIDAL_METHOD = "trapezoidal"
LEFT_METHOD = "left"
RIGHT_METHOD = "right"
INTEGRATION_METHOD = [TRAPEZOIDAL_METHOD, LEFT_METHOD, RIGHT_METHOD]
# SI Metric prefixes
UNIT_PREFIXES = {None: 1, "k": 10 ** 3, "M": 10 ** 6, "G": 10 ** 9, "T": 10 ** 12}
# SI Time prefixes
UNIT_TIME = {
TIME_SECONDS: 1,
TIME_MINUTES: 60,
TIME_HOURS: 60 * 60,
TIME_DAYS: 24 * 60 * 60,
}
ICON = "mdi:chart-histogram"
DEFAULT_ROUND = 3
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_UNIT_OF_MEASUREMENT),
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SOURCE_SENSOR): cv.entity_id,
vol.Optional(CONF_ROUND_DIGITS, default=DEFAULT_ROUND): vol.Coerce(int),
vol.Optional(CONF_UNIT_PREFIX, default=None): vol.In(UNIT_PREFIXES),
vol.Optional(CONF_UNIT_TIME, default=TIME_HOURS): vol.In(UNIT_TIME),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_METHOD, default=TRAPEZOIDAL_METHOD): vol.In(
INTEGRATION_METHOD
),
}
),
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the integration sensor."""
integral = IntegrationSensor(
config[CONF_SOURCE_SENSOR],
config.get(CONF_NAME),
config[CONF_ROUND_DIGITS],
config[CONF_UNIT_PREFIX],
config[CONF_UNIT_TIME],
config.get(CONF_UNIT_OF_MEASUREMENT),
config[CONF_METHOD],
)
async_add_entities([integral])
class IntegrationSensor(RestoreEntity, SensorEntity):
"""Representation of an integration sensor."""
def __init__(
self,
source_entity,
name,
round_digits,
unit_prefix,
unit_time,
unit_of_measurement,
integration_method,
):
"""Initialize the integration sensor."""
self._sensor_source_id = source_entity
self._round_digits = round_digits
self._state = None
self._method = integration_method
self._name = name if name is not None else f"{source_entity} integral"
self._unit_template = (
f"{'' if unit_prefix is None else unit_prefix}{{}}{unit_time}"
)
self._unit_of_measurement = unit_of_measurement
self._unit_prefix = UNIT_PREFIXES[unit_prefix]
self._unit_time = UNIT_TIME[unit_time]
self._attr_state_class = STATE_CLASS_TOTAL
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
if state := await self.async_get_last_state():
try:
self._state = Decimal(state.state)
except (DecimalException, ValueError) as err:
_LOGGER.warning("Could not restore last state: %s", err)
else:
self._attr_device_class = state.attributes.get(ATTR_DEVICE_CLASS)
if self._unit_of_measurement is None:
self._unit_of_measurement = state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
@callback
def calc_integration(event):
"""Handle the sensor state changes."""
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
if self._unit_of_measurement is None:
unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._unit_of_measurement = self._unit_template.format(
"" if unit is None else unit
)
if (
self.device_class is None
and new_state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_POWER
):
self._attr_device_class = DEVICE_CLASS_ENERGY
if (
old_state is None
or new_state is None
or old_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE)
or new_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE)
):
return
try:
# integration as the Riemann integral of previous measures.
area = 0
elapsed_time = (
new_state.last_updated - old_state.last_updated
).total_seconds()
if self._method == TRAPEZOIDAL_METHOD:
area = (
(Decimal(new_state.state) + Decimal(old_state.state))
* Decimal(elapsed_time)
/ 2
)
elif self._method == LEFT_METHOD:
area = Decimal(old_state.state) * Decimal(elapsed_time)
elif self._method == RIGHT_METHOD:
area = Decimal(new_state.state) * Decimal(elapsed_time)
integral = area / (self._unit_prefix * self._unit_time)
assert isinstance(integral, Decimal)
except Va | lueError a | s err:
_LOGGER.warning("While calculating integration: %s", err)
except DecimalException as err:
_LOGGER.warning(
"Invalid state (%s > %s): %s", old_state.state, new_state.state, err
)
except AssertionError as err:
_LOGGER.error("Could not calculate integral: %s", err)
else:
if isinstance(self._state, Decimal):
self._state += integral
else:
self._state = integral
self.async_write_ha_state()
async_track_state_change_event(
self.hass, [self._sensor_source_id], calc_integration
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
if isinstance(self._state, Decimal):
return round(self._state, self._round_digits)
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_SOURCE_ID: self._sensor_source_id}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
#!/usr/bin/pyt | hon2
# -*- coding: utf-8 -*-
import json, web
from lib.log import Log
class Env(object):
@staticmethod
def get(key):
if key and key in web.ctx.env:
return web.ctx.env[key]
else:
return web.ctx.env
@staticmethod
def set(key, value):
web.ctx.env[key] = value
@staticmethod
def setFromFile(file):
fenv = open(file)
jenv = json.load | (fenv)
for key,value in jenv.items():
web.ctx.env[key] = value |
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_serialization import jsonutils
from six.moves import http_client
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import router as router_v3
from cinder.api.v3 import workers
from cinder.common import constants
from cinder import context
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
SERVICES = (
[objects.Service(id=1, host='host1', binary=constants.VOLUME_BINARY,
cluster_name='mycluster'),
objects.Service(id=2, host='host2', binary=constants.VOLUME_BINARY,
cluster_name='mycluster')],
[objects.Service(id=3, host='host3', binary=constants.VOLUME_BINARY,
cluster_name='mycluster'),
objects.Service(id=4, host='host4', binary=constants.VOLUME_BINARY,
cluster_name='mycluster')],
)
def app():
# no auth, just let environ['cinder.context'] pass through
api = router_v3.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v3'] = api
return mapper
@ddt.ddt
class WorkersTestCase(test.TestCase):
"""Tes Case for the cleanup of Workers entries."""
def setUp(self):
super(WorkersTestCase, self).setUp()
self.context = context.RequestContext(user_id=None,
project_id=fake.PROJECT_ID,
is_admin=True,
read_deleted='no',
overwrite=False)
self.controller = workers.create_resource()
def _get_resp_post(self, body, version=mv.WORKERS_CLEANUP, ctxt=None):
"""Helper to execute a POST workers API call."""
req = webob.Request.blank('/v3/%s/workers/cleanup' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['OpenStack-API-Version'] = 'volume ' + version
req.environ['cinder.context'] = ctxt or self.context
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(app())
return res
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_old_api_version(self, rpc_mock):
res = self._get_resp_post({}, mv.get_prior_version(mv.WORKERS_CLEANUP))
self.assertEqual(http_clien | t.NOT_FOUND, res.status_code)
rpc_mock.assert_not_called()
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_not_authorized(self, rpc_mock):
ctxt = context.RequestContext(user_id=None,
| project_id=fake.PROJECT_ID,
is_admin=False,
read_deleted='no',
overwrite=False)
res = self._get_resp_post({}, ctxt=ctxt)
self.assertEqual(http_client.FORBIDDEN, res.status_code)
rpc_mock.assert_not_called()
@ddt.data({'binary': 'nova-scheduler'},
{'disabled': 'sure'}, {'is_up': 'nop'},
{'resource_type': 'service'}, {'resource_id': 'non UUID'},
{'is_up': 11}, {'disabled': 11},
{'is_up': ' true '}, {'disabled': ' false '})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_wrong_param(self, body, rpc_mock):
res = self._get_resp_post(body)
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
expected = 'Invalid input'
self.assertIn(expected, res.json['badRequest']['message'])
rpc_mock.assert_not_called()
@ddt.data({'fake_key': 'value'})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_with_additional_properties(self, body, rpc_mock):
res = self._get_resp_post(body)
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
expected = 'Additional properties are not allowed'
self.assertIn(expected, res.json['badRequest']['message'])
rpc_mock.assert_not_called()
def _expected_services(self, cleaning, unavailable):
def service_view(service):
return {'id': service.id, 'host': service.host,
'binary': service.binary,
'cluster_name': service.cluster_name}
return {'cleaning': [service_view(s) for s in cleaning],
'unavailable': [service_view(s) for s in unavailable]}
@ddt.data({'service_id': 10}, {'binary': 'cinder-volume'},
{'binary': 'cinder-scheduler'}, {'disabled': 'false'},
{'is_up': 'no'}, {'resource_type': 'Volume'},
{'resource_id': fake.VOLUME_ID, 'host': 'host@backend'},
{'host': 'host@backend#pool'},
{'cluster_name': 'cluster@backend'},
{'cluster_name': 'cluster@backend#pool'},
{'service_id': None},
{'cluster_name': None}, {'host': None},
{'resource_type': ''}, {'resource_type': None},
{'resource_id': None})
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup',
return_value=SERVICES)
def test_cleanup_params(self, body, rpc_mock):
res = self._get_resp_post(body)
self.assertEqual(http_client.ACCEPTED, res.status_code)
rpc_mock.assert_called_once_with(self.context, mock.ANY)
cleanup_request = rpc_mock.call_args[0][1]
for key, value in body.items():
if key in ('disabled', 'is_up'):
if value is not None:
value = value == 'true'
self.assertEqual(value, getattr(cleanup_request, key))
self.assertEqual(self._expected_services(*SERVICES), res.json)
@mock.patch('cinder.db.worker_get_all',
return_value=[mock.Mock(service_id=1, resource_type='Volume')])
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup',
return_value=SERVICES)
def test_cleanup_missing_location_ok(self, rpc_mock, worker_mock):
res = self._get_resp_post({'resource_id': fake.VOLUME_ID})
self.assertEqual(http_client.ACCEPTED, res.status_code)
rpc_mock.assert_called_once_with(self.context, mock.ANY)
cleanup_request = rpc_mock.call_args[0][1]
self.assertEqual(fake.VOLUME_ID, cleanup_request.resource_id)
self.assertEqual(1, cleanup_request.service_id)
self.assertEqual('Volume', cleanup_request.resource_type)
self.assertEqual(self._expected_services(*SERVICES), res.json)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup')
def test_cleanup_missing_location_fail_none(self, rpc_mock):
res = self._get_resp_post({'resource_id': fake.VOLUME_ID})
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
self.assertIn('Invalid input', res.json['badRequest']['message'])
rpc_mock.assert_not_called()
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup',
return_value=[1, 2])
def test_cleanup_missing_location_fail_multiple(self, rpc_mock):
res = self._get_resp_post({'resource_id': fake.VOLUME_ID})
self.assertEqual(http_client.BAD_REQUEST, res.status_code)
self.assertIn('Invalid input', res.json['badRequest']['message'])
rpc_mock.assert_not_called()
|
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.search import search, Pattern, Constraint
from pattern.en import parsetree, parse, Sentence
# What we call a "search word" in example 01-search.py
# is actually called a constraint, because it can contain different options.
# Options are separated by "|".
# The next search pattern retrieves words that are a noun OR an adjective:
s = parsetree("big white rabbit")
print(search("NN|JJ", s))
print("")
# This pattern yields phrases containing an adjective followed by a noun.
# Consecutive constraints are separated by a space:
print(search("JJ NN", s))
print("")
# Or a noun preceded by any number of adjectives:
print(search("JJ?+ NN", s))
print("")
# Note: NN marks singular nouns, NNS marks plural nouns.
# If you want to include both, use "NN*" as a constraint.
# This works for NN*, VB*, JJ*, RB*.
s = parsetree("When I sleep the big white rabbit will stare at my feet.")
m = search("rabbit stare at feet", s)
print(s)
print(m)
print("")
# Why does this work?
# The word "will" is included in the result, even if the pattern does not define it.
# The pattern should break when it does not encounter "stare" after "rabbit."
# It works because "will stare" is | one verb chunk.
# The "stare" constraint matches the head word of the chunk ("stare"),
# so "will stare" is considered an overspecified version of "stare".
# The same happens with "my feet" and the "rabbit" constraint,
# which matches the overspecified chunk "the big white rabbit".
p = Pattern.fromstring("rabbit stare at fe | et", s)
p.strict = True # Now it matches only what the pattern explicitly defines (=no match).
m = p.search(s)
print(m)
print("")
# Sentence chunks can be matched by tag (e.g. NP, VP, ADJP).
# The pattern below matches anything from
# "the rabbit gnaws at your fingers" to
# "the white rabbit looks at the carrots":
p = Pattern.fromstring("rabbit VP at NP", s)
m = p.search(s)
print(m)
print("")
if m:
for w in m[0].words:
print("%s\t=> %s" % (w, m[0].constraint(w)))
print("")
print("-------------------------------------------------------------")
# Finally, constraints can also include regular expressions.
# To include them we need to use the full syntax instead of the search() function:
import re
r = re.compile(r"[0-9|\.]+") # all numbers
p = Pattern()
p.sequence.append(Constraint(words=[r]))
p.sequence.append(Constraint(tags=["NN*"]))
s = Sentence(parse("I have 9.5 rabbits."))
print(s)
print(p.search(s))
print("")
|
#############################################
# This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
import sys
from unittest.case import _ExpectedFailure, _UnexpectedSuccess, SkipTest
import warnings
from six import u
import unittest2
class WatchedTestCase(unittest2.TestCase):
'''
This test case extends the unittest.TestCase to add support for
registering TestWatchers for listening on TestEvents.
'''
def __init__(self, *args, **kwargs):
self.__wtf_test_watchers__ = []
super(WatchedTestCase, self).__init__(*args, **kwargs)
# '_' prefix is added to hide it form nosetest
def _register_watcher(self, watcher, position=-1):
"""
Register a test watcher.
Args:
watcher: A test watcher to register.
Kwargs:
position: position in execution queue to insert this watcher.
"""
self.__wtf_test_watchers__.insert(position, watcher)
# '_' prefix is added to hide it form nosetest
def _unregister_watcher(self, watcher):
""""
Unregister a test watcher.
Args:
watcher : Reference to TestWatcher to unregister.
"""
self.__wtf_test_watchers__.remove(watcher)
def get_log(self):
"""
Get a log of events fired.
Returns:
list - list of string names of events fired.
"""
log = []
for watcher in self.__wtf_test_watchers__:
log = watcher.get_log() + log
return log
def run(self, result=None):
"""
Overriding the run() method to insert calls to our TestWatcher call-backs.
Most of this method is a copy of the unittest.TestCase.run() method source.
Kwargs:
result: Test | Result object.
"""
orig_result = result
if result is None:
| result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
# Track if clean up was run, so we can run clean up if setup failed.
did_tear_down_execute = False
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
# Run our test watcher actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.before_setup(self, result)
# Run test setup.
self.setUp()
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
try:
# Run our test watcher actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.before_test(self, result)
# Run our test
testMethod()
# Run our test watcher post test actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.on_test_pass(self, result)
except self.failureException as e:
result.addFailure(self, sys.exc_info())
# Run our test watcher on fail actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.on_test_failure(self, result, e)
except _ExpectedFailure, e:
addExpectedFailure = getattr(
result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn(u("Use of a TestResult without an addExpectedFailure method is deprecated"),
DeprecationWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(
result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn(u("Use of a TestResult without an addUnexpectedSuccess method is deprecated"),
DeprecationWarning)
result.addFailure(self, sys.exc_info())
except SkipTest, e:
self._addSkip(result, str(e))
except Exception as e:
result.addError(self, sys.exc_info())
# Run our test watcher on error actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.on_test_error(self, result, e)
else:
success = True
try:
did_tear_down_execute = True
# Run our test watcher after test actions.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.after_test(self, result)
# Do tear down.
self.tearDown()
except Exception:
result.addError(self, sys.exc_info())
success = False
finally: # Run our test watcher actions for after tear down..
for test_watcher in self.__wtf_test_watchers__:
test_watcher.after_teardown(self, result)
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
# Execute tear down if it did not get executed.
if not did_tear_down_execute:
# Run our test watcher after test actions.
try:
for test_watcher in self.__wtf_test_watchers__:
test_watcher.after_test(self, result)
self.tearDown()
except:
# do nothing, test case would already failed and failure is
# already handled.
pass
finally: # Run our test watcher actions for after tear down..
for test_watcher in self.__wtf_test_watchers__:
test_watcher.after_teardown(self, result)
# Remove test watchers. For some strange reason these apply to all test
# cases, not just the currently running one. So we remove them
# here.
self.__wtf_test_watchers__ = []
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun' |
_ | _author__ = 'E | lmira'
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import mimetypes
from selenium import webdriver
import selenium.common.exceptions
class WebDriver:
"""Use a Selenium web driver to ease login, navigation, and access of
HTTPS sites.
WARNING: Always use a dummy Firefox profile for scraping, never
your main profile! This avoids possibility of corrupting your main
profile.
"""
def __init__(self, firefox_profile,
download_path=None, download_types=None):
self.fp = webdriver.FirefoxProfile(firefox_profile)
if download_path:
self._configure_downloads(download_path, download_types)
self.driver = webdriver.Firefox(self.fp)
self.driver.set_page_load_timeout(5)
def _configure_downloads(self, path, types):
"""WARNING: Changes profile settings.
Examples of types include audio/mpeg and text/csv. See Firefox options for complete list."""
self.fp.set_preference('media.play-stand-alone', False)
self.fp.set_preference('browser.download.folderList', 2)
self.fp.set_preference('browser.download.manager.showWhenStarting', False)
self.fp.set_preference('browser.do | wnload.dir', path)
mime_ | types = ', '.join([mimetypes.types_map[type] for type in types])
self.fp.set_preference('browser.helperApps.neverAsk.saveToDisk', types)
def get_html(self, url):
self.driver.get(url)
html_source = self.driver.page_source
return html_source
def __getattr__(self, name):
'''Forward undefined fields to underlying driver.'''
return getattr(self.driver, name)
|
#!/usr/bin/env python3
from .proc_base import ProcBase
class ProcMaps(ProcBase):
'''Object represents the /proc/[pid]/maps file.'''
def __init__(self, pid):
| '''
Read file by calling base class constructor
which populates self.content. This file is
already ASCII printable, so no further
parsing is needed.
'''
super().__init__('/proc/{0}/maps'.format(pid))
def dump(self):
'''Print information gathered to stdout.'''
super().dump() # Print file header
if self.content:
print(sel | f.content)
|
#!/usr/bin/env python3
# noinspection PyUnresolvedReferences
import init_django
from django.db import transaction
from common.utils import utcnow
from main.archive import DataArchiver
from main.delete import DataDeleter
from main.models import Ranking
from main.purge import purge_player_data
from tasks.base import Command
class Main(Command):
def __init__(self):
super().__init__("Delete ranking and all cache data and ranking data linked to it, used for broken "
"rankings.",
pid_file=True, stoppable=False)
self.add_argument('--delete', dest="delete", action='store_true', default=False,
help="If this is not set, deletes a dry run will be performed instead.")
self.add_argument('--keep-rankings', '-r', dest="keep_rankings", default=None,
help="Comma separated list of rankings to keep.")
def run(self, args, logger):
keep_ids = (int(id) for id in args.keep_rankings.split(","))
with transaction.atomic():
remove_ids = [r.id for r in Ranking.objects.exclude(id__in=keep_ids)]
|
data_deleter = DataDeleter(dry_run=not args.delete)
data_archiver = DataArchiver(utcnow(), remove=True)
# Remove rankings.
for remove_id in remove_ids:
| data_deleter.delete_ranking(remove_id)
# Archive all rankings except the last.
if args.delete:
rankings = Ranking.objects.order_by("-id")[1:]
for ranking in rankings:
logger.info(f"archiving ranking {ranking.id}")
data_archiver.archive_ranking(ranking, self.check_stop)
else:
logger.info("DRY RUN no archiving of rankings")
# Delete ladders that are no longer needed.
keep_season_ids = {r.season_id for r in Ranking.objects.all()}
data_deleter.delete_ladders(tuple(keep_season_ids))
# Delete cache data that is unused.
data_deleter.agressive_delete_cache_data()
# Purge players and teams.
if args.delete:
purge_player_data(check_stop=self.check_stop)
else:
logger.info("DRY RUN no purge player data")
return 0
if __name__ == '__main__':
Main()()
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RReprex(RPackage):
"""Convenience wrapper that uses the 'rmarkdown' package to render small
snippets of code to target formats that include both code and output.
The goal is to encourage the sharing of small, reproducible, and
runnable examples on code-oriented websites, such as
<http://stackoverflow.com> and <https://github.com>, or in email.
'reprex' also extracts clean, runnable R code from various common
formats, such as copy/paste from an R session."" | "
homepage = "https://github.com/jennybc/reprex"
url = "https://cloud.r-project.org/src/contrib/reprex_0.1.1.tar.gz"
list_url = "https://cloud.r-project.org/src/ | contrib/Archive/reprex"
version('0.3.0', sha256='203c2ae6343f6ff887e7a5a3f5d20bae465f6e8d9745c982479f5385f4effb6c')
version('0.2.1', sha256='5d234ddfbcadc5a5194a58eb88973c51581e7e2e231f146974af8f42747b45f3')
version('0.1.1', sha256='919ae93039b2d8fb8eace98da9376c031d734d9e75c237efb24d047f35b5ba4b')
depends_on('r@3.0.2:', when='@:0.1.2', type=('build', 'run'))
depends_on('r@3.1:', when='@0.2.0:', type=('build', 'run'))
depends_on('r-callr@2.0.0:', type=('build', 'run'))
depends_on('r-clipr@0.4.0:', type=('build', 'run'))
depends_on('r-knitr', when='@:0.1.9', type=('build', 'run'))
depends_on('r-rmarkdown', type=('build', 'run'))
depends_on('r-whisker', type=('build', 'run'))
depends_on('r-rlang', when='@0.2.0:', type=('build', 'run'))
depends_on('r-withr', when='@0.2.0:', type=('build', 'run'))
depends_on('r-fs', when='@0.2.1:', type=('build', 'run'))
depends_on('pandoc@1.12.3:')
|
xt_page()
self.assertEqual(cursor, b64encode(self._END))
self.assertTrue(more_results)
self.assertTrue(iterator._more_results)
self.assertEqual(iterator._end_cursor, None)
self.assertEqual(b64decode(iterator._start_cursor), self._END)
self.assertEqual(len(entities), 1)
self.assertEqual(entities[0].key.path,
[{'kind': self._KIND, 'id': self._ID}])
self.assertEqual(entities[0]['foo'], u'Foo')
qpb = _pb_from_query(query)
qpb.offset = 0
qpb.start_cursor = b64decode(self._START)
qpb.end_cursor = b64decode(self._END)
EXPECTED = {
'dataset_id': self._DATASET,
'query_pb': qpb,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
self.assertEqual(connection._called_with, [EXPECTED]) |
def test_next_page_w_cursors_w_bogus_more(self):
conne | ction = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection, cursor=self._END, more=True)
epb, cursor, _ = connection._results.pop()
connection._results.append((epb, cursor, 4)) # invalid enum
iterator = self._makeOne(query, connection)
self.assertRaises(ValueError, iterator.next_page)
def test___iter___no_more(self):
from gcloud.datastore.query import _pb_from_query
connection = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection)
iterator = self._makeOne(query, connection)
entities = list(iterator)
self.assertFalse(iterator._more_results)
self.assertEqual(len(entities), 1)
self.assertEqual(entities[0].key.path,
[{'kind': self._KIND, 'id': self._ID}])
self.assertEqual(entities[0]['foo'], u'Foo')
qpb = _pb_from_query(query)
qpb.offset = 0
EXPECTED = {
'dataset_id': self._DATASET,
'query_pb': qpb,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
self.assertEqual(connection._called_with, [EXPECTED])
def test___iter___w_more(self):
from gcloud.datastore.query import _pb_from_query
connection = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection, cursor=self._END, more=True)
self._addQueryResults(connection)
iterator = self._makeOne(query, connection)
entities = list(iterator)
self.assertFalse(iterator._more_results)
self.assertEqual(len(entities), 2)
for entity in entities:
self.assertEqual(
entity.key.path,
[{'kind': self._KIND, 'id': self._ID}])
self.assertEqual(entities[1]['foo'], u'Foo')
qpb1 = _pb_from_query(query)
qpb1.offset = 0
qpb2 = _pb_from_query(query)
qpb2.offset = 0
qpb2.start_cursor = self._END
EXPECTED1 = {
'dataset_id': self._DATASET,
'query_pb': qpb1,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
EXPECTED2 = {
'dataset_id': self._DATASET,
'query_pb': qpb2,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
self.assertEqual(len(connection._called_with), 2)
self.assertEqual(connection._called_with[0], EXPECTED1)
self.assertEqual(connection._called_with[1], EXPECTED2)
class Test__pb_from_query(unittest2.TestCase):
def _callFUT(self, query):
from gcloud.datastore.query import _pb_from_query
return _pb_from_query(query)
def test_empty(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
pb = self._callFUT(_Query())
self.assertEqual(list(pb.projection), [])
self.assertEqual(list(pb.kind), [])
self.assertEqual(list(pb.order), [])
self.assertEqual(list(pb.group_by), [])
self.assertEqual(pb.filter.property_filter.property.name, '')
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.operator, datastore_pb.CompositeFilter.AND)
self.assertEqual(list(cfilter.filter), [])
self.assertEqual(pb.start_cursor, b'')
self.assertEqual(pb.end_cursor, b'')
self.assertEqual(pb.limit, 0)
self.assertEqual(pb.offset, 0)
def test_projection(self):
pb = self._callFUT(_Query(projection=['a', 'b', 'c']))
self.assertEqual([item.property.name for item in pb.projection],
['a', 'b', 'c'])
def test_kind(self):
pb = self._callFUT(_Query(kind='KIND'))
self.assertEqual([item.name for item in pb.kind], ['KIND'])
def test_ancestor(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
from gcloud.datastore.key import Key
from gcloud.datastore.helpers import _prepare_key_for_request
ancestor = Key('Ancestor', 123, dataset_id='DATASET')
pb = self._callFUT(_Query(ancestor=ancestor))
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.operator, datastore_pb.CompositeFilter.AND)
self.assertEqual(len(cfilter.filter), 1)
pfilter = cfilter.filter[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
ancestor_pb = _prepare_key_for_request(ancestor.to_protobuf())
self.assertEqual(pfilter.value.key_value, ancestor_pb)
def test_filter(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
query = _Query(filters=[('name', '=', u'John')])
query.OPERATORS = {
'=': datastore_pb.PropertyFilter.EQUAL,
}
pb = self._callFUT(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.operator, datastore_pb.CompositeFilter.AND)
self.assertEqual(len(cfilter.filter), 1)
pfilter = cfilter.filter[0].property_filter
self.assertEqual(pfilter.property.name, 'name')
self.assertEqual(pfilter.value.string_value, u'John')
def test_filter_key(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
from gcloud.datastore.key import Key
from gcloud.datastore.helpers import _prepare_key_for_request
key = Key('Kind', 123, dataset_id='DATASET')
query = _Query(filters=[('__key__', '=', key)])
query.OPERATORS = {
'=': datastore_pb.PropertyFilter.EQUAL,
}
pb = self._callFUT(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.operator, datastore_pb.CompositeFilter.AND)
self.assertEqual(len(cfilter.filter), 1)
pfilter = cfilter.filter[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
key_pb = _prepare_key_for_request(key.to_protobuf())
self.assertEqual(pfilter.value.key_value, key_pb)
def test_order(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
pb = self._callFUT(_Query(order=['a', '-b', 'c']))
self.assertEqual([item.property.name for item in pb.order],
['a', 'b', 'c'])
self.assertEqual([item.direction for item in pb.order],
[datastore_pb.PropertyOrder.ASCENDING,
datastore_pb.PropertyOrder.DESCENDING,
datastore_pb.PropertyOrder.ASCENDING])
def test_group_by(self):
pb = self._callFUT(_Query(group_by=['a', 'b', 'c']))
self.assertEqual([item.name for item in pb.group_by],
['a', 'b', 'c'])
class _Query(object):
def __init__(self,
dataset_id=None,
kind=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
group_by=()):
self.dataset_id = dataset_id
self.kind = |
# encoding: utf-8
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.http import HttpResponse
from django.shortcuts import | render
from django.contrib import admin
from data import get_ingredients, get_cocktails
admin.autodiscover()
def ingredients(request):
return render(request, "ingredients.haml", {"ingredients": get_ingredients(request | .GET.get("filtering_string", []))})
def cocktails(request):
print request.GET.keys()
cocktails = get_cocktails(request.GET.keys())
if cocktails:
return render(request, "cocktails.haml", {"cocktails": cocktails})
else:
return HttpResponse(u"<p>Aucun cocktails ne peut être fait avec ces ingrédients</p>")
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name="home.html"), name='home'),
url(r'^ingredients/$', ingredients, name='ingredients'),
url(r'^cocktails/$', cocktails, name='cocktails'),
# url(r'^cocktails/', include('cocktails.foo.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
#!/usr/bin/env python
digits = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
found = True
while found:
input_string = input('Please give me some digits... \n')
found = False
for character in input_string:
if | character not in | digits:
# we have a non digit!
print('Error, you gave me non digits')
found = True
break
print('starting real work on', input_string)
|
# Copyright 2011-2016 Josh Kearney
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a | pplicable law or agreed to in writing, software
# distributed under the Licens | e is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Version Handling"""
import os
import sys
__VERSION__ = "0.8.9"
def current_git_hash():
"""Return the current git hash."""
git_file = ".git/refs/heads/master"
git_path = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir, os.pardir, git_file))
if not os.path.exists(git_path):
git_path = os.getcwd() + "/" + git_file
if not os.path.exists(git_path):
git_path = os.getcwd() + "/../" + git_file
if not os.path.exists(git_path):
return None
with open(git_path, "r") as git:
git_hash = git.read()
return git_hash[0:5]
def version_string():
"""Return the full version."""
git_hash = current_git_hash()
if git_hash:
return "pyhole v%s (%s) - https://github.com/jk0/pyhole" % (
__VERSION__, git_hash)
return "pyhole v%s - https://github.com/jk0/pyhole" % __VERSION__
def version_hash():
"""Return the current version with git hash."""
git_hash = current_git_hash()
return "%s-%s" % (__VERSION__, git_hash)
def version():
"""Return the current version."""
return __VERSION__
|
import nltk
from read_lineByLine_toList import read_line_by_line
from find_consonants import count_consonants
import random
male_list = read_line_by_line('datasets/male.csv')
female_list = read_line_by_line('datasets/female.csv')
# ambil kata pertama dari kalimat
def get_first_word(words):
words_list = words.split()
if len(words_list) > 1 :
# kalau lebih besar dari satu kata dalam kalimat, return kata pertama
return words_list[0]
else:
# kalau hanya satu kata dalam kalimat, return kata itu
return words
def gender_features_la | st_two(word):
word = get_first_word(word) # ambil kata pertama
return {'last_two': word[-2:]} # return huruf 2 kata terakhir
labeled_names = (
[(name, 'male') for name in male_list] +
[(name, 'female') for name in female_list] |
)
random.shuffle(labeled_names)
featuresets = [(gender_features_last_two(n), gender) for (n, gender) in labeled_names]
train_set = featuresets[500:]
test_set = featuresets[:500]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print ("ACCURACY IS: ")
print (nltk.classify.accuracy(classifier, test_set))
classifier.show_most_informative_features()
test_name = "aldey Wahyu Putra"
test_name.capitalize()
print(test_name + " is " + classifier.classify(gender_features_last_two(test_name)))
|
#!/usr/bin/env python
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of t | he GNU General Public L | icense as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# https://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import time
import os
def main(args):
path = os.path.abspath(args[1])
fo = open(path, 'r+')
content = fo.readlines()
content.append('faux editor added at %s\n' % time.time())
fo.seek(0)
fo.write(''.join(content))
fo.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[:]))
|
ort variable_scope
resnet_arg_scope = resnet_utils.resnet_arg_scope
@add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with variable_scope.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = layers.batch_norm(
inputs, activation_fn=nn_ops.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers_lib.conv2d(
preact,
depth, [1, 1],
stride=stride,
normalizer_fn=None,
activation_fn=None,
scope='shortcut')
residual = layers_lib.conv2d(
preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = layers_lib.conv2d(
residual,
depth, [1, 1],
stride=1,
normalizer_fn=None,
activation_fn=None,
scope='conv3')
output = shortcut + residual
return utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num | _classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the correspondi | ng
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with variable_scope.variable_scope(
scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with arg_scope(
[layers_lib.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with arg_scope([layers.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with arg_scope(
[layers_lib.conv2d], activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = layers.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = layers.batch_norm(
net, activation_fn=nn_ops.relu, scope='postnorm')
if global_pool:
# Global average pooling.
net = math_ops.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = layers_lib.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = layers.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, imple |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed | under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" | BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesV2ExtendTest(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(VolumesV2ExtendTest, cls).setup_clients()
cls.client = cls.volumes_client
@test.attr(type='gate')
def test_volume_extend(self):
# Extend Volume Test.
self.volume = self.create_volume()
extend_size = int(self.volume['size']) + 1
self.client.extend_volume(self.volume['id'], extend_size)
self.client.wait_for_volume_status(self.volume['id'], 'available')
volume = self.client.get_volume(self.volume['id'])
self.assertEqual(int(volume['size']), extend_size)
class VolumesV1ExtendTest(VolumesV2ExtendTest):
_api_version = 1
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cryptography.fernet import Fernet
from airflow import settings
from airflow.models import Variable, crypto
from tests.test_utils.config import conf_vars
class TestVariable(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_variable_no_encryption(self):
"""
Test variables without encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertFalse(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_variable_with_encryption(self):
"""
Test variables with encryption
"""
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var | .val, 'value')
| def test_var_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
Variable.set('key', 'value')
session = settings.Session()
test_var = session.query(Variable).filter(Variable.key == 'key').one()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key1).decrypt(test_var._val.encode()), b'value')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_var.val, 'value')
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
self.assertTrue(test_var.is_encrypted)
self.assertEqual(test_var.val, 'value')
self.assertEqual(Fernet(key2).decrypt(test_var._val.encode()), b'value')
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys, getopt, re, os
try:
from splinter import Browser
except:
print "Please install Splinter: http://splinter.readthedocs.org/en/latest/install.html"
sys.exit();
import getpass
from splinter.request_handler.status_code import HttpResponseError
def main(argv):
email = None
txtopt = None
profile = None
self = None
socks = None
socksPort = None
try:
opts, args = getopt.getopt(argv, "ho:m:p:s:S:P:",["port=","socks=","self=","profile=","output=","mail=","help"])
except:
print "Use --help for help"
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print 'Usage %s options \n' % (os.path.basename(__file__))
print ' -h, --help This help'
print ' -m, --mail Your facebook login email'
print ' -o, --output Your output file name'
print ' -p, --profile Profile to capture friends(name after facebook.com/)'
print ' -s, --self Your profile name(name after facebook.com/)'
print ' -S, --socks Socks Proxy Address for Tor use'
print ' -P, --port Port Socks for Tor use'
sys.exit()
elif opt in ("-o","--output"):
txtopt = arg
elif opt in ("-m","--mail"):
email = arg
elif opt in ("-p","--profile"):
profile = arg
elif opt in ("-s","--self"):
self = arg
elif opt in ("-S","--socks"):
socks = arg
elif opt in ("-P","--port"):
socksPort = arg
if not email or not txtopt or not self:
print 'Use --help for help'
sys.exit()
password = getpass.getpass()
if socks and socksProt:
proxy_settings = {
'network.proxy.type':1,
'network.proxy.socks': socks,
'network.proxy.socks_port': socksPort
}
browser = Browser('firefox',profile_preferences=proxy_settings)
else:
browser = Browser()
# with Browser() as browser:
browser.visit('https://m.facebook.com/')
browser.fill("email",email);
browser.fill("pass",password);
browser.find_by_name("login").click()
if browser.is_element_present_by_css('.login_error_box'):
print 'The email and password didn\'t work.'
sys.exit()
try:
fileopt = open(txtopt, 'a')
except:
sys.exit('Unable to open file %s' % txtopt)
if not profile:
browser.find_link_by_text("Profile").click()
print 'Accessing profile at %s\n' % browser.url
browser.find_link_by_text("Friends").click()
print 'Accessing fr | iends at %s\n' % browser.url
else:
url = 'https://m.facebook.com/%s/friends?refid=17' % profile
print 'Accessing profile friends at %s\n' % url
browser.visit(url)
friends = browser.find_by_css('a')
notList = ["/a/mobile/friends/add_friend.php","language.php","/help/","/settings/","/pages/","/bugnub/","/policies/","/logout","/home","/friends","/messages/","/notifications.php","/buddylist.php","/menu/","/photo.php","/mbasic/","%s | "%profile,"%s"%self]
for friend in friends:
if all([x not in friend['href'] for x in notList ]):
fileopt.write('%s\n' % friend['href'])
print '%s' % friend.value
while browser.is_element_present_by_css("#m_more_friends"):
browser.find_by_css('#m_more_friends a').first.click()
friends = browser.find_by_css('a')
for friend in friends:
if all([x not in friend['href'] for x in notList ]):
fileopt.write('%s\n' % friend['href'])
print '%s' % friend.value
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
sys.stdout.write('\nQuit by keyboard interrupt sequence!')
|
== None:
return_id = 0
else:
sock = oerp_destino['sock']
return_id = 0
control_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','search',[('model_name','=',model)])
if control_ids:
data = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','read',control_ids,['max_id'])
for data_item in data:
return_id = data_item['max_id']
return return_id
def update_model_id(oerp_destino,model=None,maxId=0):
""" Updates control table with maximum processed ID """
if model == None:
return None
sock = oerp_destino['sock']
control_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','search',[('model_name','=',model)])
vals_update = {
'max_id': maxId
}
vals_insert = {
'model_name': model,
'max_id': maxId
}
if control_ids:
return_id = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','write',control_ids,vals_update)
else:
return_id = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
'oerp_migrator.control','create',vals_insert)
return None
def get_field_type(oerp_origen,model):
if not oerp_origen or not model:
import pdb;pdb.set_trace()
exit(1)
args = [('model','=',model)]
fields = ['name','ttype','relation','required']
model_search = 'ir.model.fields'
args = [('model','=',model)]
sock = oerp_origen['sock']
field_ids = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'],model_search,'search',args)
data = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'],model_search,'read',field_ids,fields)
return_dict = {}
for data_item in data:
return_dict[data_item['name']] = [data_item['ttype'],data_item['relation'],data_item['required']]
return return_dict
def get_lookup_ids(oerp_destino=None,relation_parm=None,ids_parm=None):
if not oerp_destino or not relation_parm or not ids_parm:
import pdb;pdb.set_trace()
exit(1)
sock = oerp_destino['sock']
args = [('name','=',ids_parm[1])]
obj_destino_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],relation_parm,'search',args)
if obj_destino_ids:
return obj_destino_ids[0]
else:
#import pdb;pdb.set_trace()
args = [('origin_id','=',ids_parm[0])]
try:
obj_destino_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],relation_parm,'search',args)
if obj_destino_ids:
return obj_destino_ids[0]
else:
return 0
except:
logging.error("Problem looking up id for %s. Assigning default value"%(relation_parm))
return 1
return 0
def read_models(config=None,section=None):
if not config or not section:
exit(1)
return_dict = {}
for dict_keys in config.keys():
return_dict[dict_keys] = {}
if dict_keys not in ['origin','destination']:
fields = config[dict_keys]['fields']
if 'filter' in config[dict_keys].keys():
_filter = config[dict_keys]['filter']
else:
_filter = ''
return_dict[dict_keys]['filter'] = _filter
return_dict[dict_keys]['sequence'] = config[dict_keys]['sequence']
return_dict[dict_keys]['fields'] = fields.split(',')
return return_dict
def connect_openerp(dict_parms = None):
if not dict_parms:
exit(1)
# Get the uid
dict_connection = {}
sock_common = xmlrpclib.ServerProxy ('http://'+dict_parms['hostname']+':'+str(dict_parms['port'])+'/xmlrpc/common')
# import pdb;pdb.set_trace()
uid = sock_common.login(dict_parms['dbname'], dict_parms['username'], dict_parms['password'])
#replace localhost with the address of the server
sock = xmlrpclib.ServerProxy('http://'+dict_parms['hostname']+':'+str(dict_parms['port'])+'/xmlrpc/object')
dict_connection['uid'] = uid
dict_connection['pwd'] = dict_parms['password']
dict_connection['dbname'] = dict_parms['dbname']
dict_connection['sock'] = sock
return dict_connection
def migrate_model(oerp_origen = None, oerp_destino = None, model = None, fields = None, filter_parm = ''):
if not oerp_origen or not oerp_destino or not model or not fields:
exit(1)
logging.info("Migrando modelo %s"%(model))
# data_obj = oerp_origen.get(model)
sock = oerp_origen['sock']
if filter_parm <> '':
data_ids = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'], model,'search',[])
else:
filter_id = get_model_id(oerp_destino,model)
data_ids = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'], model,'search',[('id','>',filter_id)])
field_types = get_field_type(oerp_origen,model)
fields.append('create_date')
data_items = sock.execute(oerp_origen['dbname'],oerp_origen['uid'],oerp_origen['pwd'], model,'read',data_ids,fields)
max_id = 0
for data in data_items:
dict_insert = {}
for field in fields:
if field in field_types:
if field_types[field][0] not in ['many2many','one2many','many2one']:
if field_types[field][0] != 'boolean' and data[field]:
# if field_types[field][0] == 'char':
dict_insert[field] = data[field]
else:
if data[field]:
dict_insert[field] = data[field]
else:
if field_types[field][0] == 'many2one':
if data[field]:
dict_insert_field = get_lookup_ids(oerp_destino,field_types[field][1],data[field])
if dict_insert_field <> 0:
dict_insert[field] = dict_insert_field
else:
dict_insert[field] = data[field][0]
else:
if field_types[field][2]:
dict_insert[field] = 1
if 'id' not in dict_insert.keys():
dict_insert['origin_id'] = data['id']
if data['id'] > max_id:
max_id = data['id']
logging.debug(dict_insert)
sock_destino = oerp_destino['sock']
destination_ids = sock_destino.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'], \
model,'search',[('origin_id','=',data['id'])])
if destination_ids:
data_items = sock_destino.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],\
model,'write',destination_ids,dict_insert)
else:
try:
data_items = sock_destino.execute(oerp_destino['dbname'],oerp_destino['uid | '],oerp_destino['pwd'],\
model,'create',dict_insert)
except:
logging.error(dict_insert)
# import pdb;pdb.set_trace()
pass
update_model_id(oerp_destino,model,max_id)
logging.info("Fin migración modelo %s"%(model))
return None
def validate_setup(dict_models = {}, oerp_destino = {}):
if not dict_models:
logging.error("No dict_models p | arameter in validate_setup")
return False
if not oerp_destino:
logging.error("No oerp_destino parameter in validate_setup")
return False
for model in dict_models.keys():
if model not in ['origin','destination']:
args = [('model','=',model)]
fields = ['name','ttype','relation','required']
model_search = 'ir.model.fields'
args = [('model','=',model),('name','=','origin_id')]
sock = oerp_destino['sock']
origin_ids = sock.execute(oerp_destino['dbname'],oerp_destino['uid'],oerp_destino['pwd'],model_search,'search',args)
if not origin_ids:
logging.error("Model "+model+" does not have origin_id column")
return False
return True
def main(configfile_parm = ''):
logging.basicConfig(filename='migrator.log',level=logging.DEBUG)
logging.info("Comenzando la migración")
stream = file(configfile_parm,'r')
dict_yaml = yaml.safe_load(stream)
if not dict_yaml['origin'] or not dict_yaml['destination']:
logging.error('No origin/destination specified in yaml file.')
exit(1)
dict_origin = dict_yaml['origin']
logging.info("Origin host: %s port: %s database: %s"%(dict_origin['hostname'],dict_origin['port'],dict_origin['dbname']))
dict_destination = dict_yaml['destination']
logging.info("Destination host: %s port: %s database: %s"%(dict_destination['hostname'],dict_destination['port'],dict_destination['dbname']))
dict_models = read_models(dict_yaml,"objetos")
for key,value in dict_models.items():
logging.info(key)
logging.info(value)
oerp_origen = connect_openerp(dict |
import sys
sys.path.append('../../python/')
i | mport urpc
import randexample_p | b2
if __name__ == '__main__':
client = urpc.client.SingleThreadClient()
request = randexample_pb2.Request()
reply = randexample_pb2.Reply()
request.nMessage = 1
request.nSample = 10
client.sendRequest('some service',8,request)
reply.ParseFromString(client.getReply())
print reply
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Ap | pend rpcauth to kzcash.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.p | ath.join(self.options.tmpdir+"/node0", "kzcash.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
ue())[0]
if self.format.GetSelection() == 0:
self.fbb.SetValue(filename + self.extension, None)
else:
self.fbb.SetValue(filename + ".pdf", None)
def Sauver(self):
self.fbb.Disable()
self.sauver.Disable()
if self.sauver_ouvrir:
self.sauver_ouvrir.Disable()
self.filename = self.fbb.GetValue()
f, e = os.path.splitext(self.filename)
if e == ".pdf":
self.pdf = True
self.oo_filename = f + self.extension
else:
self.pdf = False
self.oo_filename = self.filename
config.documents_directory = os.path.dirname(self.filename)
dlg = None
try:
if self.modifications.multi is not False:
errors = {}
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
for i, (filename, modifs) in enumerate(simple_modifications):
self.gauge.SetValue((100 * i) / len(simple_modifications))
errors.update(GenerateDocument(modifs, filename=filename))
if self.pdf:
f, e = os.path.splitext(filename)
convert_to_pdf(filename, f + ".pdf")
os.remove(filename)
else:
self.filename = self.filename.replace(" <prenom> <nom>", "")
self.oo_filename = self.oo_filename.replace(" <prenom> <nom>", "")
errors = GenerateDocument(self.modifications, filename=self.oo_filename, gauge=self.gauge)
if self.pdf:
convert_to_pdf(self.oo_filename, self.filename)
os.remove(self.oo_filename)
self.document_generated = True
if errors:
message = "Document %s généré avec des erreurs :\n" % self.filename
for label in errors.keys():
message += '\n' + label + ' :\n '
message += '\n '.join(errors[label])
dlg = wx.MessageDialog(self, message, 'Message', wx.OK | wx.ICON_WARNING)
except IOError:
print(sys.exc_info())
dlg = wx.MessageDialog(self, "Impossible de sauver le document. Peut-être est-il déjà ouvert ?", 'Erreur',
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
return
except Exception as e:
info = sys.exc_info()
message = ' [type: %s value: %s traceback: %s]' % (info[0], info[1], traceback.extract_tb(info[2]))
dlg = wx.MessageDialog(self, message, 'Erreur', wx.OK | wx.ICON_WARNING)
if dlg:
dlg.ShowModal()
dlg.Destroy()
self.EndModal(wx.ID_OK)
def OnSauver(self, _):
self.modifications.multi = False
self.Sauver()
def OnSauverOuvrir(self, event):
self.OnSauver(event)
if self.document_generated:
if self.filename.endswith(".pdf"):
StartAcrobatReader(self.filename)
else:
StartLibreOffice(self.filename)
def OnSauverUnitaire(self, _):
self.Sauver()
def OnSauverEnvoyer(self, event):
self.OnSauverUnitaire(event)
if sel | f.document_generated:
if self.modifications.multi is not False:
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
emails = '\n'.join(
[" - %s (%s)" % (modifs.email_subjec | t, ", ".join(modifs.email_to)) for filename, modifs in
simple_modifications])
if len(emails) > 1000:
emails = emails[:1000] + "\n..."
dlg = wx.MessageDialog(self, "Ces emails seront envoyés :\n" + emails, 'Confirmation',
wx.OK | wx.CANCEL | wx.ICON_WARNING)
response = dlg.ShowModal()
dlg.Destroy()
if response != wx.ID_OK:
return
for filename, modifs in simple_modifications:
if self.pdf:
oo_filename = filename
filename, e = os.path.splitext(oo_filename)
filename += ".pdf"
try:
SendDocument(filename, modifs)
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (filename, e),
'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
else:
try:
SendDocument(self.filename, self.modifications)
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (self.filename, e),
'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def OnSauverEnvoyerCAF(self, event):
self.OnSauver(event)
if self.document_generated:
try:
root, ext = os.path.splitext(self.modifications.introduction_filename)
introduction_filename = root + " CAF" + ext
SendDocument(self.filename, self.modifications, to=[database.creche.caf_email], introduction_filename=GetTemplateFile(introduction_filename))
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (self.filename, e), "Erreur", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def StartLibreOffice(filename):
if sys.platform == 'win32':
filename = "".join(["file:", urllib.pathname2url(os.path.abspath(filename.encode("utf-8")))])
# print filename
try:
StarDesktop, objServiceManager, core_reflection = getOOoContext()
StarDesktop.LoadComponentFromURL(filename, "_blank", 0, MakePropertyValues(objServiceManager, [
["ReadOnly", False],
["Hidden", False]]))
except Exception as e:
print("Exception ouverture LibreOffice", e)
dlg = wx.MessageDialog(None, "Impossible d'ouvrir le document\n%r" % e, "Erreur", wx.OK|wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
else:
paths = []
if sys.platform == "darwin":
paths.append("/Applications/LibreOffice.app/Contents/MacOS/soffice")
paths.append("/Applications/OpenOffice.app/Contents/MacOS/soffice")
else:
paths.append("/usr/bin/libreoffice")
paths.append("ooffice")
for path in paths:
try:
print(path, filename)
subprocess.Popen([path, filename])
return
except Exception as e:
print(e)
pass
dlg = wx.MessageDialog(None, "Impossible de lancer OpenOffice / LibreOffice", 'Erreur', wx.OK|wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
DDE_ACROBAT_STRINGS = ["AcroviewR15", "AcroviewA15", "AcroviewR12", "AcroviewA12", "AcroviewR11", "AcroviewA11",
"AcroviewR10", "AcroviewA10", "acroview"]
dde_server = None
def StartAcrobatReader(filename):
global dde_server
import win32api
import win32ui
import dde
filename = str(os.path.abspath(filename))
path, name = os.path.split(filename)
reader = win32api.FindExecutable(name, path)
os.spawnl(os.P_NOWAIT, reader[1], " ")
for t in range(10):
time.sleep(1)
for acrobat in DDE_ACROBAT_STRINGS:
try:
if not dde_server:
dde_server = dde.CreateServer()
dde_server.Create('Gertrude')
c = dde.CreateConversation(dde_server)
c.ConnectTo(acrobat, 'control')
c.Exec('[DocOpen("%s")]' % ( |
oint/load_controls" option (which produced the initial control bound files).
def read_default_control_bounds(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m_bounds = {"lower_bound": {}, "upper_bound": {}}
# Loop over controls
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype != 'default':
continue
have_bound = {}
# Loop over lower and upper bound
for k in m_bounds.keys():
have_bound[k] = superspud(model_options, "libspud.have_option('/adjoint/controls/control["+str(i)+"/bounds/"+k+"')")
if not have_bound[k]:
continue
act_flag = False # Check that at least one control bound file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_'+cname+ '_'+k+'_[0-9]*.pkl'):
try:
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_'+ cname+ '_'+k+'_'):len(ctrl_file)-4])
except:
print "Error while reading the control bound files."
print "The control bound file ", ctrl_file, " does not conform the standard naming conventions for control files."
exit()
f = open(ctrl_file, 'rb')
m_bounds[k][(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print "Warning: Found no control bound file for control ", cname, "."
return m_bounds
# Completes the control bounds by adding the missing controls and filling them with nan's
def complete_default_control_bounds(m, m_bounds):
bound_types = {"lower_bound": {}, "upper_bound": {}}
for bound_type in bound_types:
for control in m.keys():
if m_bounds[bound_type].has_key(control):
continue
# We need objects as dtype because we want to keep the Nones for later
m_bounds[bound_type][control] = numpy.empty(shape = m[control].shape, dtype=object)
m_bounds[bound_type][control].fill(None)
return m_bounds
# Returns the control derivatives for both the custom and the default controls.
def read_control_derivatives(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
functional_name = superspud(opt_options, "libspud.get_option('/functional/name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
derivs = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
act_flag = False # Check that at least one control file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_adjoint_'+functional_name+'_'+ cname+ '_TotalDerivative_[0-9]*.pkl'):
try:
# The naming convenction is control+simulation_name+control_name+TotalDerivative, but do not forget that
# the derivatives where produced during the adjoint run in which the simulation name is simulation_name+functional_name
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_adjoint_'+functional_name+'_'+ cname+ '_TotalDerivative_'):len(ctrl_file)-4])
except:
print "Error while reading the control derivative files."
print "The control file ", ctrl_file, " does not conform the standard naming conventions for control files."
exit()
f = open(ctrl_file, 'rb')
derivs[(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print "Warning: Found no control derivative file for control ", cname, "."
elif ctype == 'custom':
control_derivative_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/control_derivative')")
d = {}
exec control_derivative_code in d
derivs[cname] = d['control_derivative']()
else:
print "Unknown control type " + ctype + "."
exit()
return derivs
# Writes the custom controls onto disk
def update_custom_controls(m, opt_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# With the custom type, the user specifies a python function to update the controls.
if ctype == 'custom':
update_control_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/update_control')")
d = {}
exec update_control_code in d
d['update_control'](m[cname])
# Writes the default controls onto disk
def update_default_controls(m, opt_options, model_options):
global debug
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
# Loop over default controls
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
# Loop over controls
for k in m.keys():
# Check if that is a control we are looking for
if k[0] == cname:
timestep = k[1]
file_name = 'control_'+simulation_name + '_' + cname + '_' + str(timestep) + '.p | kl'
if not os.path.isfile(file_name):
print "E | rror: writing control file ", file_name, " which did not exist before."
exit()
if debug:
# Check that the file we are writing has the same shape than the one we are writing
f = open(file_name, 'rb')
m_old = pickle.load(f)
if m[k].shape != m_old.shape:
print "Error: The shape of the control in ", file_name, " changed."
exit()
f.close()
f = open(file_name, 'wb')
pickle.dump(m[k], f)
f.close()
# Check the consistency of model and option file
def check_option_consistency(opt_options, model_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# Check that the default controls exist in the model
# and that custom controls not.
if ctype == 'custom':
if superspud(model_options, "libspud.have_option('/adjoint/controls/control::" + cname + "')"):
print "The custom control " + cname + " is a default control in the model option tree."
exit()
elif ctype== 'default':
if not superspud(model_options, "libspud.have_option('/adjoint/controls/control::" + cname + "')"):
print "The default control " + cname + " was not found in the model option tree."
exit()
else:
print "Unknown control type " + ctype + "."
exit()
# Check that the the controls in dJdm are consistent with the ones in m
# If m_bounds is present, it also checks the consistency of the bounds
def check_control_consistency(m, djdm, m_bounds=None):
djdm_keys = djdm.keys()
m_keys = m.keys()
djdm_keys.sort()
m_keys.sort()
if m_keys != djdm_keys:
print "Error: The controls are not consistent with the controls derivatives."
print "The controls are:", m_keys
print "The control derivatives are:", djdm_keys
print "Check the consistency of the control definition in the model and the optimality |
# -*- coding: utf-8 -*-
import cherrypy
class Root(object):
exposed = True
@cherrypy.tools.json_out()
def GET(self, id=None):
return ["Hello", "world", "!"]
if __name__ == '__main__':
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Conte | nt-Type', 'text/plain')],
}
}
cherrypy.quickstart(Root | (), '/', conf)
|
import pytest
import numpy as np
import scipy.sparse
import qutip
from qutip.fastsparse import fast_csr_matrix
from qutip.cy.checks import (_test_sorting, _test_coo2csr_inplace_struct,
_test_csr2coo_struct, _test_coo2csr_struct)
from qutip.random_objects import rand_jacobi_rotation
def _unsorted_csr(N, density=0.5):
M = scipy.sparse.diags(np.arange(N), 0, dtype=complex, format='csr')
nvals = N**2 * density
while M.nnz < 0.95*nvals:
M = rand_jacobi_rotation(M)
M = M.tocsr()
return fast_csr_matrix((M.data, M.indices, M.indptr), shape=M.shape)
def sparse_arrays_equal(a, b):
return not (a != b).data.any()
@pytest.mark.repeat(20)
def test_coo2csr_struct():
"Cython structs : COO to CSR"
A = qutip.rand_dm(5, 0.5).data
assert sparse_arrays_equal(A, _test_coo2csr_struct(A.tocoo()))
@pytest.mark.repeat(20)
def test_indices_sort():
"Cython structs : sort CSR indices inplace"
A = _unsorted_csr(10, 0.25)
B = A.copy()
B.sort_indices()
_test_sorting(A)
assert np.all(A.data == B.data)
assert np.all(A.indices == B.indices)
@pytest.mark.repeat(20)
def test_coo2csr_inplace_nosort():
"Cython structs : COO to CSR inplace | (no sort)"
A = qutip.rand_dm(5, 0.5).data
B = _test_coo2csr_inplace_struct(A.tocoo(), sorted=0)
assert sparse_arrays_equal(A, B)
@pytest.mark.repeat(20)
def test_coo2csr_inplace_sort():
"Cython structs : COO to CSR inplace (sorted)"
A = qutip.rand_dm(5, 0.5).data
B = _test_coo2csr_inplace_struct(A.tocoo(), sorted=1)
assert sparse_arrays_equal(A, B)
@pytest.mark.repeat(20)
def | test_csr2coo():
"Cython structs : CSR to COO"
A = qutip.rand_dm(5, 0.5).data
B = A.tocoo()
C = _test_csr2coo_struct(A)
assert sparse_arrays_equal(B, C)
|
from fanstatic import Library, Resource
import js.jquery
import js.jqueryui
library = Library('dynatree', | 'resources')
dynatree_css = Resource(library, 'src/skin-vista/ui.dynatree.css')
dynatree = Resource(library, 'src/jquery.dynatree.js',
minified='src/jquery.dynatree.min.js',
depends=[dynatree_css, js.jquery.j | query, js.jqueryui.jqueryui])
|
# Copyright 2008, Jeffrey Regier, jeff [at] stat [dot] berkeley [dot] edu
# This file is part of Author-Dedupe.
#
# Author-Dedupe is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Author-Dedupe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Author-Dedupe. If not, see <http://www.gnu.org/licenses/>.
import re
import author
import partition_part
from collections import defaultdict
def compatible_names(a1, a2):
def compatible_name_part(w1, w2):
w1 = re.sub(r'\W', '', w1)
w2 = re.sub(r'\W', '', w2)
l = min(len(w1), len(w2))
if not l:
return False
return w1[:l] == w2[:l]
short, long = list(a1.middle_names), a2.middle_names
if len(short) > len(long):
return compatible_names(a2, a1)
# the front first names must be compatible
# (note: last names here are always equal)
if not compatible_name_part(a1.first_name, a2.first_name):
return False
# try finding each middle name of long in short, and remove the
# middle name from short if found
for wl in long:
if not short:
break
ws = short.pop(0)
if not compatible_name_part(ws, wl):
short.insert(0, ws)
# true iff short is a compatible substring of long
return short == []
class FllnPartition():
"""(first-letter-first-name, last-name) partition"""
def __init__(self, authors, info_comp):
self.info_comp = info_comp
self.load_parts(authors)
self.load_compat_mat( | authors)
def load_parts(self, authors):
self.parts = set()
def singleton_part(a):
part = partition_part.PartitionPart()
part.add(a)
return part
self.parts.update([singleton_part(a) for a in authors])
def load_compat_mat(self, authors):
self.compat_map = defaultdict(set)
for a1 in authors:
for a2 in autho | rs:
if compatible_names(a1, a2):
self.compat_map[a1].add(a2)
def get_partition_compat(self, part):
compat_maps = [self.compat_map[a] for a in part]
return reduce(set.intersection, compat_maps)
def stricter_than(self, less_p, more_p):
less_compat = self.get_partition_compat(less_p)
more_compat = self.get_partition_compat(more_p)
return less_compat < more_compat
def is_equivalent(self, p1, p2):
compat1 = self.get_partition_compat(p1)
compat2 = self.get_partition_compat(p2)
return compat1 == compat2
def target_equivalent(self, source_p):
for p in self.parts:
if p == source_p:
continue
if self.is_equivalent(source_p, p):
return p
def find_stricter(self, source_p):
stricter = []
for p in self.parts:
if p == source_p:
continue
if self.stricter_than(p, source_p):
stricter.append(p)
return stricter
def target_sole_stricter(self, source_p):
stricter = self.find_stricter(source_p)
if len(stricter) == 1:
return stricter[0]
elif len(stricter) > 1:
for s in stricter:
if self.info_comp.compare(source_p, s) < 7e-6:
return s
def merge_iter(self, get_target_f):
num_changes = 0
# copy avoids a run time error when the set changes size
for p in set.copy(self.parts):
target = get_target_f(p)
if target:
target.extend(p)
self.parts.remove(p)
num_changes += 1
return num_changes
def merge(self):
self.merge_iter(self.target_equivalent)
#iteratively merge the parts into the stricter parts,
#when there is only one stricter part
while self.merge_iter(self.target_sole_stricter):
pass
#TODO: why doesn't it ever help to do this more than once?
for part in self.parts:
merged_name = part.full_name()
for a in part:
a.merged_name = merged_name
|
ramework.authtoken.models import Token
from .models import *
from .mommy_recipes import *
def get_response(client, url, params):
return client.get(
url,
params,
format='json'
)
class TestDiarioAwifs(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'MT', 'ano': 2015, 'mes': 10, 'tipo': 'AWIFS'}
deter_awifs_1.make(data_imagem=date(2015, 10, 10))
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
def test_response_diario(self):
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 1)
self.assertEqual(data_received[0]['dia'], 10)
self.assertEqual(data_received[0]['total'], Decimal('0.13'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=0.29)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('0.29'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=0.31)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('0.60'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=1)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('1.60'))
deter_awifs_2.make(data_imagem=date(2015, 11, 1))
deter_awifs_2.make(data_imagem=date(2015, 11, 1))
deter_awifs_2.make(data_imagem=date(2015, 11, 2))
deter_awifs_2.make(data_imagem=date(2015, 11, 3), area_km2=1.2)
self.params = {'uf': 'MT', 'ano': 2015, 'mes': 11, 'tipo': 'AWIFS'}
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 3)
self.assertEqual(response.data[0]['data'][0]['dia'], 1)
self.assertEqual(response.data[0]['data'][0]['total'], Decimal('1.64'))
self.assertEqual(response.data[0]['data'][1]['dia'], 2)
self.assertEqual(response.data[0]['data'][1]['total'], Decimal('0.82'))
self.assertEqual(response.data[0]['data'][2]['dia'], 3)
self.assertEqual(response.data[0]['data'][2]['total'], Decimal('1.2'))
class TestDiarioDeter(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
'tipo': 'DETER', 'estagio': 'Corte Raso'}
daily_deter_1.make(data_imagem=date(2015, 8, 1))
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
def test_response_diario(self):
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[0]['dia']
area = data_received[0]['total']
self.assertEqual(len(data_received), 1)
self.assertEqual(day, 1)
self.assertEqual(area, Decimal('0.23'))
daily_deter_1.make(data_imagem=date(2015, 8, 1), area_km2=1)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[0]['dia']
area = data_received[0]['total']
self.assertEqual(len(data_received), 1)
self.assertEqual(day, 1)
self.assertEqual(area, Decimal('1.23'))
da | ily_deter_1.make(data_ | imagem=date(2015, 8, 9), area_km2=1.89)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[1]['dia']
area = data_received[1]['total']
self.assertEqual(len(data_received), 2)
self.assertEqual(day, 9)
self.assertEqual(area, Decimal('1.89'))
daily_deter_1.make(data_imagem=date(2015, 8, 10), area_km2=1)
daily_deter_1.make(data_imagem=date(2015, 8, 11), area_km2=1)
daily_deter_1.make(data_imagem=date(2015, 8, 10), area_km2=2)
daily_deter_1.make(data_imagem=date(2015, 8, 30), area_km2=2)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 5)
self.assertEqual(data_received[0]['dia'], 1)
self.assertEqual(data_received[1]['dia'], 9)
self.assertEqual(data_received[2]['dia'], 10)
self.assertEqual(data_received[3]['dia'], 11)
self.assertEqual(data_received[4]['dia'], 30)
self.assertEqual(data_received[0]['total'], Decimal('1.23'))
self.assertEqual(data_received[1]['total'], Decimal('1.89'))
self.assertEqual(data_received[2]['total'], Decimal('3'))
self.assertEqual(data_received[3]['total'], Decimal('1'))
self.assertEqual(data_received[4]['total'], Decimal('2'))
class TestDiarioQualif(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'BA', 'ano': 2013, 'mes': 9,
'tipo': 'DETER', 'estagio': 'Corte Raso'}
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
class TestMontly(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-mensal')
# self.user = User.objects.create_user(
# 'test', 'test@test.com', 'password'
# )
# self.token = Token.objects.get(user=self.user)
# def test_response(self):
# response = get_response(self.client, self.url, None)
# self.assertEqual(response.status_code, 200)
def test_daily_deter_response(self):
daily_deter_1.make()
daily_deter_2.make()
response = self.client.post(
revese("api:login"),
{'username': 'test', 'password': 'password'},
format='json'
)
params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
'tipo': 'DETER'}
response = get_response(self.client, self.url, params)
self.assertEqual(response.status_code, 200)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 1)
# def test_public_deter_response(self):
# public_deter_1.make()
# public_deter_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# def test_daily_deter_qualif_response(self):
# daily_deter_qualif_1.make()
# daily_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# self.assertEqual(response.status_code, 200)
# def test_public_deter_qualif_response(self):
# public_deter_qualif_1.make()
# public_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# def test_deter_awifs_response(self):
# deter_awifs_1.make()
# deter_awifs_2.make()
# |
# CHECK_WITH python2
import pkg_resources
import os.path # for dirname, join
import pkgutil # for get_data
static_file_content = pkg_resources.r | esource_string(
'mypack', 'static_file.html').decode()
print('static_file_content is [{0}]'.format(static_file_content))
def get_real_filename(filename):
return os.path.join(os.path.dirname(__file__), filename)
def get_data(filen | ame):
return open(get_real_filename(filename), 'rb').read()
static_file_content2 = get_data('static_file.html').decode()
print('static_file_content2 is [{0}]'.format(static_file_content2))
static_file_content3 = pkgutil.get_data('mypack', 'static_file.html').decode()
print('static_file_content3 is [{0}]'.format(static_file_content3))
|
"""The motion_blinds component."""
import asyncio
from datetime import timedelta
import logging
from socket import timeout
from motionblinds import MotionMulticast
from homeassistant import config_entries, core
from homeassistant.const import CONF_API_KEY, CONF_HOST, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
DOMAIN,
KEY_COORDINATOR,
KEY_GATEWAY,
KEY_MULTICAST_LISTENER,
MANUFACTURER,
MOTION_PLATFORMS,
)
from .gateway import ConnectMotionGateway
_LOGGER = logging.getLogger(__name__)
def setup(hass: core.HomeAssistant, config: dict):
"""Set up the Motion Blinds component."""
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the motion_blinds components from a config entry."""
hass.data.setdefault(DOMAIN, {})
host = entry.data[CONF_HOST]
key = entry.data[CONF_API_KEY]
# Create multicast Listener
if KEY_MULTICAST_LISTENER not in hass.data[DOMAIN]:
multicast = MotionMulticast()
hass.data[DOMAIN][KEY_MULTICAST_LISTENER] = multicast
# start listening for local pushes (only once)
await hass.async_add_executor_job(multicast.Start_listen)
# register stop callback to shutdown listening for local pushes
def stop_motion_multicast(event):
"""Stop multicast thread."""
_LOGGER.debug("Shutting down Motion Listener")
multicast.Stop_listen()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_motion_multicast)
# Connect to motion gateway
multicast = hass.data[DOMAIN][KEY_MULTICAST_LISTENER]
connect_gateway_class = ConnectMotionGateway(hass, multicast)
if not await connect_gateway_class.async_connect_gateway(host, key):
raise ConfigEntryNotReady
motion_gateway = connect_gateway_class.gateway_device
def update_gateway():
"""Call all updates using one async_add_executor_job."""
motion_gateway.Update()
for blind in motion_gateway.device_list.values():
try:
blind.Update()
except timeout:
# let the error be logged and handled by the motionblinds library
pass
async def async_update_data():
"""Fetch data from the gateway and blinds."""
try:
await hass.async_add_executor_job(update_gateway)
except timeout:
# let the error be logged and handled by the motionblinds library
pass
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name=entry.title,
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=600),
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
hass.data[DOMAIN][entry.entry_id] = {
KEY_GATEWAY: motion_gateway,
KEY_COORDINATO | R: coordinator,
}
device_registry = await d | r.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, motion_gateway.mac)},
identifiers={(DOMAIN, entry.unique_id)},
manufacturer=MANUFACTURER,
name=entry.title,
model="Wi-Fi bridge",
sw_version=motion_gateway.protocol,
)
for component in MOTION_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(
hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in MOTION_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
if len(hass.data[DOMAIN]) == 1:
# No motion gateways left, stop Motion multicast
_LOGGER.debug("Shutting down Motion Listener")
multicast = hass.data[DOMAIN].pop(KEY_MULTICAST_LISTENER)
await hass.async_add_executor_job(multicast.Stop_listen)
return unload_ok
|
#!flask/bin/python
import os
import unittest
from coverage import coverage
cov = coverage(branch = True, omit = ['flask/*', 'tests.py'])
cov.start()
from config import basedir
from app import app, db
from app.models import User
from datetime import datetime, timedelta
from app.models import User, Post
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'test.db')
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_avatar | (self):
u = User(nickname = 'john', email = 'john@example.com')
avatar = u.avatar(128)
expected = 'http://www.gravatar.com/avatar/d4c74594d841139328695756648b6bd6'
assert avatar[0:len(expected)] == e | xpected
def test_make_unique_nickname(self):
u = User(nickname = 'john', email = 'john@example.com')
db.session.add(u)
db.session.commit()
nickname = User.make_unique_nickname('susan')
assert nickname == 'susan'
nickname = User.make_unique_nickname('john')
assert nickname != 'john'
# make another user with the new nickname
u = User(nickname = nickname, email = 'susan@example.com')
db.session.add(u)
db.session.commit()
nickname2 = User.make_unique_nickname('john')
assert nickname2 != 'john'
assert nickname2 != nickname
def test_follow(self):
u1 = User(nickname = 'john', email = 'john@example.com')
u2 = User(nickname = 'susan', email = 'susan@example.com')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
assert u1.unfollow(u2) == None
u = u1.follow(u2)
db.session.add(u)
db.session.commit()
assert u1.follow(u2) == None
assert u1.is_following(u2)
assert u1.followed.count() == 1
assert u1.followed.first().nickname == 'susan'
assert u2.followers.count() == 1
assert u2.followers.first().nickname == 'john'
u = u1.unfollow(u2)
assert u != None
db.session.add(u)
db.session.commit()
assert u1.is_following(u2) == False
assert u1.followed.count() == 0
assert u2.followers.count() == 0
def test_follow_posts(self):
u1 = User(nickname = 'john', email = 'john@example.com')
u2 = User(nickname = 'susan', email = 'susan@example.com')
u3 = User(nickname = 'mary', email = 'mary@example.com')
u4 = User(nickname = 'david', email = 'david@example.com')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.add(u4)
# make four posts
utcnow = datetime.utcnow()
p1 = Post(body = "post from john", author = u1, timestamp = utcnow + timedelta(seconds = 1))
p2 = Post(body = "post from susan", author = u2, timestamp = utcnow + timedelta(seconds = 2))
p3 = Post(body = "post from mary", author = u3, timestamp = utcnow + timedelta(seconds = 3))
p4 = Post(body = "post from david", author = u4, timestamp = utcnow + timedelta(seconds = 4))
db.session.add(p1)
db.session.add(p2)
db.session.add(p3)
db.session.add(p4)
db.session.commit()
# setup the followers
u1.follow(u1) # john follows himself
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows david
u2.follow(u2) # susan follows herself
u2.follow(u3) # susan follows mary
u3.follow(u3) # mary follows herself
u3.follow(u4) # mary follows david
u4.follow(u4) # david follows himself
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.add(u4)
db.session.commit()
# check the followed posts of each user
f1 = u1.followed_posts().all()
f2 = u2.followed_posts().all()
f3 = u3.followed_posts().all()
f4 = u4.followed_posts().all()
assert len(f1) == 3
assert len(f2) == 2
assert len(f3) == 2
assert len(f4) == 1
assert f1 == [p4, p2, p1]
assert f2 == [p3, p2]
assert f3 == [p4, p3]
assert f4 == [p4]
def test_delete_post(self):
# create a user and a post
u = User(nickname = 'john', email = 'john@example.com')
p = Post(body = 'test post', author = u, timestamp = datetime.utcnow())
db.session.add(u)
db.session.add(p)
db.session.commit()
# query the post and destroy the session
p = Post.query.get(1)
db.session.remove()
# delete the post using a new session
db.session = db.create_scoped_session()
db.session.delete(p)
db.session.commit()
def test_user(self):
# make valid nicknames
n = User.make_valid_nickname('John_123')
assert n == 'John_123'
n = User.make_valid_nickname('John_[123]\n')
assert n == 'John_123'
# create a user
u = User(nickname = 'john', email = 'john@example.com')
db.session.add(u)
db.session.commit()
assert u.is_authenticated() == True
assert u.is_active() == True
assert u.is_anonymous() == False
assert u.id == int(u.get_id())
def __repr__(self):
return '<User %r>' % (self.nickname)
if __name__ == '__main__':
try:
unittest.main()
except:
pass
cov.stop()
cov.save()
print "\n\nCoverage Report:\n"
cov.report()
print "HTML version: " + os.path.join(basedir, "tmp/coverage/index.html")
cov.html_report(directory = 'tmp/coverage')
cov.erase()
|
import os
import requests
| import json
import pandas as pd
import numpy as np
import time
from datetime import | datetime
TMDB_KEY = "60027f35df522f00e57a79b9d3568423"
"""
def get_tmdb_id_list():
#function to get all Tmdb_id between 06-16
import requests
import json
# from year 1996-2016
year = range(2006,2017)
## 50 pages
page_num = range(1,50)
id_list = []
tmdb_id_query = "https://api.themoviedb.org/3/discover/movie?" \
+ "api_key=%s" \
+ "&language=en-US&sort_by=release_date.asc" \
+ "&include_adult=false&include_video=false" \
+ "&page=%d" \
+ "&primary_release_year=%d"
for n in page_num:
for yr in year:
rq = requests.get(tmdb_id_query % (TMDB_KEY,n,yr)).json()
for item in rq['results']:
id_list.append(item['id'])
return id_list
start = time.time()
ID_LIST = get_tmdb_id_list()
stop = time.time()
print(ID_LIST)
print(stop - start)
"""
query = "https://api.themoviedb.org/3/movie/%d?" \
+"api_key=%s" \
+"&language=en-US"
movie_id = 78
request = requests.get(query %(movie_id,TMDB_KEY)).json()
|
import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from ParamSklearn.components.base import \
ParamSklearnPreprocessingAlgorithm
from ParamSklearn.constants import *
class Balancing(ParamSklearnPreprocessingAlgorithm):
def __init__(self, strategy, random_state=None):
self.strategy = strategy
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def get_weights(self, Y, classifier, preprocessor, init_params, fit_params):
if init_params is None:
init_params = {}
if fit_params is None:
fit_params = {}
# Classifiers which require sample weights:
# We can have adaboost in here, because in the fit method,
# the sample weights are normalized:
# https://github.com/scikit-learn/scikit-learn/blob/0.15.X/sklearn/ensemble/weight_boosting.py#L121
clf_ = ['adaboost', 'gradient_boosting']
pre_ = []
if classifier in clf_ or preprocessor in pre_:
if len(Y.shape) > 1:
offsets = [2 ** i for i in range(Y.shape[1])]
Y_ = np.sum(Y * offsets, axis=1)
else:
Y_ = Y
unique, counts = np.unique(Y_, return_counts=True)
cw = 1. / counts
cw = cw / np.mean(cw)
sample_weights = np.ones(Y_.shape)
for i, ue in enumerate(unique):
mask = Y_ == ue
sample_weights[mask] *= cw[i]
if classifier in clf_:
fit_params['classifier:sample_weight'] = sample_weights
if preprocessor in pre_:
fit_params['preprocessor:sample_weight'] = sample_weights
# Classifiers which can adjust sample weights themselves via the
# argument `class_weight`
clf_ = ['decision_tree', 'extra_trees', 'liblinear_svc',
'libsvm_svc', 'random_forest', 'sgd']
pre_ = ['liblinear_svc_preprocessor',
'extra_trees_preproc_for_classification']
if classifier in clf_:
init_params['classifier:class_weight'] = 'auto'
if preprocessor in pre_:
init_params['preprocessor:class_weight'] = 'auto'
clf_ = ['ridge']
if classifier in clf_:
class_weights = {}
unique, counts = np.unique(Y, return_counts=True)
cw = 1. / counts
cw = cw / np.mean(cw)
for i, ue in enumerate(unique):
class_weights[ue] = cw[i]
if classifier in clf_:
init_params['classifier:class_weight'] = class_weights
return init_params, fit_params
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'Balancing',
'name': 'Balancing Imbalanced Class Distributions',
'handles_missing_values': True,
'handles_nominal_values': True,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA, SIGNED_DATA),
'output': (INPUT,),
'preferred_dtype': None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
# TODO add replace by | zero!
strategy = Cat | egoricalHyperparameter(
"strategy", ["none", "weighting"], default="none")
cs = ConfigurationSpace()
cs.add_hyperparameter(strategy)
return cs
def __str__(self):
name = self.get_properties()['name']
return "ParamSklearn %s" % name
|
from django.template.defaultfilters import rjust
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class RjustTests(SimpleTestCase):
@setup({'rjust01': '{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}'})
def test_rjust01(self):
output = self.engine.render_to_string('rjust01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
@setup({'rjust02': '.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.'})
def test_rjust02(self):
output = self.engine.render_to_string('rjust02', {"a": "a&b", "b": mark_safe("a&b") | })
self.assertEqual(output, ". a&b. . a&b.")
class FunctionTests(SimpleTestCase):
def te | st_rjust(self):
self.assertEqual(rjust('test', 10), ' test')
def test_less_than_string_length(self):
self.assertEqual(rjust('test', 3), 'test')
def test_non_string_input(self):
self.assertEqual(rjust(123, 4), ' 123')
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Autho | rization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from | wavefront_api_client.configuration import Configuration
class ResponseContainerPagedMaintenanceWindow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'response': 'PagedMaintenanceWindow',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None, _configuration=None): # noqa: E501
"""ResponseContainerPagedMaintenanceWindow - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
"""Gets the response of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:return: The response of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:rtype: PagedMaintenanceWindow
"""
return self._response
@response.setter
def response(self, response):
"""Sets the response of this ResponseContainerPagedMaintenanceWindow.
:param response: The response of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:type: PagedMaintenanceWindow
"""
self._response = response
@property
def status(self):
"""Gets the status of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:return: The status of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:rtype: ResponseStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResponseContainerPagedMaintenanceWindow.
:param status: The status of this ResponseContainerPagedMaintenanceWindow. # noqa: E501
:type: ResponseStatus
"""
if self._configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerPagedMaintenanceWindow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseContainerPagedMaintenanceWindow):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResponseContainerPagedMaintenanceWindow):
return True
return self.to_dict() != other.to_dict()
|
# -*- coding: utf-8 -*-
# © 2017 Savoir-faire Linux
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo import _, api, fields, models
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.onchange('parent_id')
def onchange_parent_id(self):
res = super(ResPartner, self).onchange_parent_id()
if self.parent_id:
work_relation_type = self.env['res.partner.relation.type'].search([
('is_work_relation', '=', True),
])
if not work_relation_type:
res['warning'] = {
'title': _('Warning'),
'message': _('You cannot set a parent entity, as there is '
'not any partner relation type flagged as '
'"Work Relation".')
}
self.parent_id = False
return res
@api.model
def create(self, vals):
"""
Create a relation between a contact and its parent only when the
parent is a company.
"""
res = super(ResP | artner, self).create(vals)
if res.parent_id and res.parent_id.is_company:
work_relation_type = self.env['res.partner.relation.type'].search([
('is_work_relation', '=', True),
])
self.env['res.partner.relation'].create({
'left_partner_id': res.id,
'right_partner_id': res.parent_id.id,
'type_id': work_relation_type.id,
'date_start': fields.Date.today(),
| })
return res
|
from django.contrib import admin
from . | models import Lesson, Series
class LessonAdmin(admin.ModelAdmin):
pass
class SeriesAdmin(admin.ModelAdmin):
pass
admin.site.register(Lesson, LessonAdmin)
admin.site.register(Series, Ser | iesAdmin) |
Information about a referenced publication.
"""
def __init__(self, doi=None, isbn=None, issn=None, url=None, title=None, publisher=None, journal=None, volume=None,
issue=None, year=None, figure=None, table=None, pages=None, authors=None, editors=None,
affiliations=None, acknowledgements=None, references=None, tags=None, **kwargs):
"""
Constructor.
:param doi: String with DOI of the published work
:param isbn: String with ISBN of the published work
:param issn: String with ISSN of the published work
:param url: String with URL to the published work
:param title: String with title of the published work.
:param publisher: String with publisher of the work.
:param journal: String with the journal in which the work was published.
:param volume: String with the volume in which the work was published.
:param issue: String with the issue in which the work was published.
:param year: String with the year in which the work was published.
:param figure: Dictionary or :class:`.DisplayItem` object with the figure to reference.
:param table: Dictionary or :class:`.DisplayItem` object with the table to reference.
:param pages: String, integer, dictionary, or :class:`.Pages` object with the starting and ending pages for
the published work.
:param authors: List of strings, dictionaries, or :class:`.Name` objects with information about the authors.
:param editors: List of strings, dictionaries, or :class:`.Name` objects with information about the editors.
:param affiliations: List of strings with affiliations.
:param acknowledgements: List of strings with acknowledgements.
:param references: List of dictionaries or :class:`.Reference` objects with works cited by this published work.
:param tags: List of strings or numbers that are tags for this object.
:param kwargs: Dictionary of fields that are not supported.
"""
super(Reference, self).__init__(tags=tags, **kwargs)
self._doi = None
self.doi = doi
self._isbn = None
self.isbn = isbn
self._issn = None
self.issn = issn
self._url = None
self.url = url
self._title = None
self.title = title
self._publisher = None
self.publisher = publisher
self._journal = None
self.journal = journal
self._volume = None
self.volume = volume
self._issue = None
self.issue = issue
self._year = None
self.year = year
self._figure = None
self.figure = figure
self._table = None
self.table = table
self._pages = None
self.pages = pages
self._authors = None
self.authors = authors
self._editors = None
self.editors = editors
self._affiliations = None
self.affiliations = affiliations
self._acknowledgements = None
self.acknowledgements = acknowledgements
self._references = None
self.references = references
@property
def doi(self):
return self._doi
@doi.setter
def doi(self, doi):
self._validate_type('doi', doi, string_types)
self._doi = doi
@doi.deleter
def doi(self):
self._doi = None
@property
def isbn(self):
return self._isbn
@isbn.setter
def isbn(self, isbn):
self._validate_type('isbn', isbn, string_types)
self._isbn = isbn
@isbn.deleter
def isbn(self):
self._isbn = None
@property
def issn(self):
return self.issn
@issn.setter
def issn(self, issn):
self._validate_type('issn', issn, string_types)
self._issn = issn
@issn.deleter
def issn(self):
self._issn = None
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._validate_type('url', url, string_types)
self._url = url
@url.deleter
def url(self):
self._url = None
@property
def title(self):
return self._title
@title.setter
def title(self, title):
self._validate_type('title', title, string_types)
self._title = title
@title.deleter
def title(self):
self._title = None
@property
def publisher(self):
return self._publisher
@publisher.setter
def publisher(self, publisher):
self._validate_type('publisher', publisher, string_types)
self._publisher = publisher
@publisher.deleter
def publisher(self):
self._publisher = None
@property
def journal(self):
return self._journal
@journal.setter
def journal(self, journal):
self._validate_type('journal', journal, string_types)
self._journal = journal
@journal.deleter
def journal(self):
self._journal = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, volume):
self._validate_type('volume', volume, string_types)
self._volume = volume
@volume.deleter
def volume(self):
self._volume = None
@property
def issue(self):
return self._issue
@issue.setter
def issue(self, issue):
self._validate_type('issue', issue, string_types)
self._issue = issue
@issue.deleter
def issue(self):
self._issue = None
@property
def year(self):
return self._year
@year.setter
def year(self, year):
self._validate_type('year', year, string_types)
self._year = year
@year.deleter
def year(self):
self._year = None
@property
def figure(self):
return self._figure
@figure.setter
def figure(self, figure):
self._validate_type('figure', figure, dict, DisplayItem)
self._figure = self._get_object(DisplayItem, figure)
@figure.deleter
def figure(self):
self._figure = None
@property
def table(self):
return self._table
@table.setter
| def table(self, table):
self._validate_type('table', table, dict, DisplayItem)
self._table = self._get_object(DisplayItem, table)
@table.deleter
def table(self):
self._table = None
@property
def pages(self):
return self._pages
@pages.setter
def pages(self, pages):
self._validate_type('pages', pages, string_types, int, dict, Pages)
self._pages = self._get_object(Pages, pages)
@pages.deleter
def pages(self):
self | ._pages = None
@property
def authors(self):
return self._authors
@authors.setter
def authors(self, authors):
self._validate_list_type('authors', authors, string_types, dict, Name)
self._authors = self._get_object(Name, authors)
@authors.deleter
def authors(self):
self._authors = None
@property
def editors(self):
return self._editors
@editors.setter
def editors(self, editors):
self._validate_list_type('editors', editors, string_types, dict, Name)
self._editors = self._get_object(Name, editors)
@editors.deleter
def editors(self):
self._editors = None
@property
def affiliations(self):
return self._affiliations
@affiliations.setter
def affiliations(self, affiliations):
self._validate_list_type('affiliations', affiliations, string_types)
self._affiliations = affiliations
@affiliations.deleter
def affiliations(self):
self._affiliations = None
@property
def acknowledgements(self):
return self._acknowledgements
@acknowledgements.setter
def acknowledgements(self, acknowledgements):
self._validate_list_type('acknowledgements', acknowledgements, string_types)
self._acknowledgements = acknowledgements
@acknowledgements.deleter
def acknowledgements(self):
self._acknowledgements = None
@property
|
#!/usr/ | bin/env python
import os
import sys
import MySQLdb
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "academicControl.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.a | rgv)
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIE | S OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
from itertools import chain
INL_HEADER_TMPL = """\
/* WARNING: This is auto-generated file. Do not modify, since changes | will
* be lost! Modify the generating script instead.
*
* Generated from {registryName} revision {revision}.
*/\
"""
def genInlHeader (registryName, revision):
return INL_HEADER_TMPL.format(
registryName = registryName,
revision = str(revision))
def genInlHeaderForSource (registrySource):
return genInlHeaderForSource(registrySource.getFilename(), registrySource.getRevision())
def nextMod (val, mod):
if val % mod == 0:
return val + mod
else:
return int(val/mod)*mod + mod
def indentLines (lines):
tabSize = 4
# Split into columns
lineColumns = [line.split("\t") for line in lines if line is not None]
if len(lineColumns) == 0:
return
numColumns = max(len(line) for line in lineColumns)
# Figure out max length per column
columnLengths = [nextMod(max(len(line[ndx]) for line in lineColumns if len(line) > ndx), tabSize) for ndx in range(numColumns)]
for line in lineColumns:
indented = []
for columnNdx, col in enumerate(line[:-1]):
colLen = len(col)
while colLen < columnLengths[columnNdx]:
col += "\t"
colLen = nextMod(colLen, tabSize)
indented.append(col)
# Append last col
indented.append(line[-1])
yield "".join(indented)
def readFile (filename):
f = open(filename, 'rb')
data = f.read()
f.close()
return data
def writeFileIfChanged (filename, data):
if not os.path.exists(filename) or readFile(filename) != data:
if (sys.version_info < (3, 0)):
f = open(filename, 'wt')
else:
f = open(filename, 'wt', newline='\n')
f.write(data)
f.close()
def writeLines (filename, lines):
text = ""
for line in lines:
text += line
text += "\n"
writeFileIfChanged(filename, text)
print(filename)
def writeInlFile (filename, header, source):
writeLines(filename, chain([header], source))
def normalizeConstant (constant):
value = int(constant, base=0)
if value >= 1 << 63:
suffix = 'ull'
elif value >= 1 << 32:
suffix = 'll'
elif value >= 1 << 31:
suffix = 'u'
else:
suffix = ''
return constant + suffix
def commandParams (command):
if len(command.params) > 0:
return ", ".join(param.declaration for param in command.params)
else:
return "void"
def commandArgs (command):
return ", ".join(param.name for param in command.params)
|
from spdy.frames import *
from spdy._zlib_stream import Inflater, Deflater
from bitarray import bitarray
SERVER = 'SERVER'
CLIENT = 'CLIENT'
class SpdyProtocolError(Exception):
pass
def _bitmask(length, split, mask=0):
invert = 1 if mask == 0 else 0
b = str(mask)*split + str(invert)*(length-split)
return int(b, 2)
_first_bit = _bitmask(8, 1, 1)
_last_15_bits = _bitmask(16, 1, 0)
class Context(object):
def __init__(self, side, version=2):
if side not in (SERVER, CLIENT):
raise TypeError("side must be SERVER or CLIENT")
if not version in VERSIONS:
raise NotImplementedError()
self.version = version
self.deflater = Deflater(version)
self.inflater = Inflater(version)
self.frame_queue = []
self.input_buffer = bytearray()
if side == SERVER:
self._stream_id = 2
self._ping_id = 2
else:
self._stream_id = 1
self._ping_id = 1
@property
def next_stream_id(self):
sid = self._stream_id
self._stream_id += 2
return sid
@property
def next_ping_id(self):
pid = self._ping_id
self._ping_id += 2
return pid
def incoming(self, chunk):
self.input_buffer.extend(chunk)
def get_frame(self):
frame, bytes_parsed = self._parse_frame(bytes(self.input_buffer))
if bytes_parsed:
self.input_buffer = self.input_buffer[bytes_parsed:]
return frame
def put_frame(self, frame):
if not isinstance(frame, Frame):
raise TypeError("frame must be a valid Frame object")
self.frame_queue.append(frame)
def outgoing(self):
out = bytearray()
while len(self.frame_queue) > 0:
frame = self.frame_queue.pop(0)
out.extend(self._encode_frame(frame))
return out
def _parse_header_chunk(self, compressed_data, version):
chunk = self.inflater.decompress(compressed_data)
length_size = 2 if version == 2 else 4
headers = {}
#first two bytes: number of pairs
num_values = int.from_bytes(chunk[0:length_size], 'big')
#after that...
cursor = length_size
for _ in range(num_values):
#two/four bytes: length of name
name_length = int.from_bytes(chunk[cursor:cursor+length_size], 'big')
cursor += length_size
#next name_length bytes: name
name = chunk[cursor:cursor+name_length].decode('UTF-8')
cursor += name_length
#two/four bytes: length of value
value_length = int.from_bytes(chunk[cursor:cursor+length_size], 'big')
cursor += length_size
#next value_length bytes: value
value = chunk[cursor:cursor+value_length].decode('UTF-8')
cursor += value_length
if name_length == 0 or value_length == 0:
raise SpdyProtocolError("zero-length name or value in n/v block")
if name in headers:
raise SpdyProtocolError("duplicate name in n/v block")
headers[name] = value
return headers
def _parse_frame(self, chunk):
if len(chunk) < 8:
return (None, 0)
#first bit: control or data frame?
control_frame = (chunk[0] & _first_bit == _first_bit)
if control_frame:
#second byte (and rest of first, after the first bit): spdy version
spdy_version = int.from_bytes(chunk[0:2], 'big') & _last_15_bits
if spdy_version != self.version:
raise SpdyProtocolError("incorrect SPDY version")
#third and fourth byte: frame type
frame_type = int.from_bytes(chunk[2:4], 'big')
if not frame_type in FRAME_TYPES:
raise SpdyProtocolError("invalid frame type: {0}".format(frame_type))
#fifth byte: flags
flags = chunk[4]
#sixth, seventh and eighth bytes: length
length = int.from_bytes(chunk[5:8], 'big')
frame_length = length + 8
if len(chunk) < frame_length:
return (None, 0)
#the rest is data
data = chunk[8:frame_length]
bits = bitarray()
bits.frombytes(data)
frame_cls = FRAME_TYPES[frame_type]
args = {
'version': spdy_version,
'flags': flags
}
for key, num_bits in frame_cls.definition(spdy_version):
if not key:
bits = bits[num_bits:]
continue
if num_bits == -1:
value = bits
else:
value = bits[:num_bits]
bits = bits[num_bits:]
if key == 'headers': #headers are compressed
args[key] = self._parse_header_chunk(value.tobytes(), self.version)
else:
#we h | ave to pad values on the left, because bitarray will assume
#that you want it padded from the right
gap = len(value) % 8
if gap:
zeroes = bitarray(8 - gap)
zeroes.setall(False)
value = zeroes + value
args[key] = int.from_bytes(value.tobytes(), 'big')
if num_bits == -1:
break
frame = frame_cls(**args)
else: #data frame
#first fou | r bytes, except the first bit: stream_id
stream_id = int.from_bytes(_ignore_first_bit(chunk[0:4]), 'big')
#fifth byte: flags
flags = chunk[4]
#sixth, seventh and eight bytes: length
length = int.from_bytes(chunk[5:8], 'big')
frame_length = 8 + length
if len(chunk) < frame_length:
return (0, None)
data = chunk[8:frame_length]
frame = DataFrame(stream_id, data)
return (frame, frame_length)
def _encode_header_chunk(self, headers):
chunk = bytearray()
#first two bytes: number of pairs
chunk.extend(len(headers).to_bytes(2, 'big'))
#after that...
for name, value in headers.items():
name = bytes(name, 'UTF-8')
value = bytes(value, 'UTF-8')
#two bytes: length of name
chunk.extend(len(name).to_bytes(2, 'big'))
#next name_length bytes: name
chunk.extend(name)
#two bytes: length of value
chunk.extend(len(value).to_bytes(2, 'big'))
#next value_length bytes: value
chunk.extend(value)
return self.deflater.compress(bytes(chunk))
def _encode_frame(self, frame):
out = bytearray()
if frame.is_control:
#first two bytes: version
out.extend(frame.version.to_bytes(2, 'big'))
#set the first bit to control
out[0] = out[0] | _first_bit
#third and fourth: frame type
out.extend(frame.frame_type.to_bytes(2, 'big'))
#fifth: flags
out.append(frame.flags)
bits = bitarray()
for key, num_bits in frame.definition(self.version):
if not key:
zeroes = bitarray(num_bits)
zeroes.setall(False)
bits += zeroes
continue
value = getattr(frame, key)
if key == 'headers':
chunk = bitarray()
chunk.frombytes(self._encode_header_chunk(value))
else:
chunk = bitarray(bin(value)[2:])
zeroes = bitarray(num_bits - len(chunk))
zeroes.setall(False)
chunk = zeroes + chunk #pad with zeroes
bits += chunk
if num_bits == -1:
break
data = bits.tobytes()
#sixth, seventh and eighth bytes: length
out.extend(len(data).to_bytes(3, 'big'))
# the rest is data
out.extend(data)
else: #data frame
#first four bytes: stream_id
out.extend(frame.stream_id.to_bytes(4, 'big'))
#fifth: flags
out.append(frame.flags)
#sixth, seventh and eighth bytes: length
data_length = len(frame.data)
out.extend(data_length.to_bytes(3, 'big'))
#rest is data
out.extend(frame.data)
return out
|
#!/bin/python
# coding: utf-8
import argparse
import sys
import signal
import traceback
import datetime
import lglass.object
import lglass_sql.nic
def objects(lines):
obj = []
for line in lines:
if isinstance(line, bytes):
line = line.decode("iso-8859-15")
if not line.strip() and obj:
yield lglass.object.parse_object(obj)
obj = []
elif line[0] in {'%', '#'} or not line.strip():
continue
else:
obj.append(line)
if obj:
yield lglass.object.parse_object(obj)
argparser = argparse.ArgumentParser()
argparser.add_argument("--schema", "-s")
argparser.add_argument("--encoding", "-e", default="iso-8859-15")
argparser.add_argument("database")
args = argparser.parse_args()
database = lglass_sql.nic.NicDatabase(args.database, schema=args.schema)
if hasattr(database, "session"):
session = database.session()
else:
session = database
print("Collecting local objects...", end='', flush=True)
current_objects = set(session.all_ids())
print(" Done.")
stats = dict(created=0,
updated=0,
deleted=0,
ignored=0,
start=datetime.datetime.now())
def report():
global stats
global current_objects
print("Created {} / Updated {} / Deleted {} / "
"Ignored {} objects in {}".format(stats["created"],
stats["updated"],
stats["deleted"],
stats["ignored"],
datetime.datetime.now() - stats["start"]))
print("{} objects left".format(len(current_objects) | ))
signal.signal(signal.SIGUSR1, lambda *args: report())
print("Creating or updating local objects...", end='', flush=True)
for obj in objects(sys.stdin.buffer):
try:
obj = database.create_object(obj)
spec = database.prima | ry_spec(obj)
try:
local_obj = session.fetch(*spec)
except KeyError:
session.save(obj)
stats["created"] += 1
continue
if local_obj.sql_id in current_objects:
current_objects.remove(local_obj.sql_id)
if local_obj == obj:
stats["ignored"] += 1
continue
session.save(obj)
stats["updated"] += 1
except Exception as e:
print("Error at object {!r}".format(obj))
traceback.print_exc()
stats["ignored"] += 1
print("Done")
print("Deleting local objects...", end='', flush=True)
for id_ in current_objects:
try:
session.delete_by_id(id_)
stats["deleted"] += 1
except Exception as e:
traceback.print_exc(e)
stats["ignored"] += 1
if hasattr(session, "commit"):
session.commit()
if hasattr(session, "close"):
session.close()
print("Done")
report()
|
ENU)
VerifyKeymaps()
def DeleteKeymap(map):
path = os.path.join('special://profile/keymaps', map)
DeleteFile(path)
def DeleteFile(path):
tries = 5
while sfile.exists(path) and tries > 0:
tries -= 1
try:
sfile.remove(path)
except:
xbmc.sleep(500)
def verifyLocation():
#if still set to default location reset, to workaround
#Android bug in browse folder dialog
location = ADDON.getSetting('FOLDER')
profile = 'special://profile/addon_data/plugin.program.super.favourites/'
| userdata = 'special://userdata/addon_data/plugin.program.super.favourites/'
if (location == profile) or (location == userdata):
ADDON.setSetting('FOLDER', '')
def verifyPlugins():
folder = os.path.join(ROOT, 'Plugins')
if sfile.exists(folder):
return
try: | sfile.makedirs(folder)
except: pass
def VerifyKeymaps():
reload = False
scriptPath = ADDON.getAddonInfo('profile')
scriptPath = os.path.join(scriptPath, 'captureLauncher.py')
if not sfile.exists(scriptPath):
DeleteKeymap(KEYMAP_MENU) #ensure gets updated to launcher version
src = os.path.join(HOME, 'captureLauncher.py')
sfile.copy(src, scriptPath)
if VerifyKeymapHot():
reload = True
if VerifyKeymapMenu():
reload = True
if not reload:
return
xbmc.sleep(1000)
xbmc.executebuiltin('Action(reloadkeymaps)')
def VerifyKeymapHot():
if ADDON.getSetting('HOTKEY') == GETTEXT(30111): #i.e. programmable
return False
dest = os.path.join('special://profile/keymaps', KEYMAP_HOT)
if sfile.exists(dest):
return False
key = ADDON.getSetting('HOTKEY')
valid = []
for i in range(30028, 30040):
valid.append(GETTEXT(i))
valid.append(GETTEXT(30058))
includeKey = key in valid
if not includeKey:
DeleteKeymap(KEYMAP_HOT)
return True
if isATV():
DialogOK(GETTEXT(30118), GETTEXT(30119))
return False
return WriteKeymap(key.lower(), key.lower())
def WriteKeymap(start, end):
dest = os.path.join('special://profile/keymaps', KEYMAP_HOT)
cmd = '<keymap><Global><keyboard><%s>XBMC.RunScript(special://home/addons/plugin.program.super.favourites/hot.py)</%s></keyboard></Global></keymap>' % (start, end)
f = sfile.file(dest, 'w')
f.write(cmd)
f.close()
xbmc.sleep(1000)
tries = 4
while not sfile.exists(dest) and tries > 0:
tries -= 1
f = sfile.file(dest, 'w')
f.write(cmd)
f.close()
xbmc.sleep(1000)
return True
def VerifyKeymapMenu():
context = ADDON.getSetting('CONTEXT') == 'true'
if not context:
DeleteKeymap(KEYMAP_MENU)
return True
keymap = 'special://profile/keymaps'
dst = os.path.join(keymap, KEYMAP_MENU)
if sfile.exists(dst):
return False
src = os.path.join(HOME, 'resources', 'keymaps', KEYMAP_MENU)
sfile.makedirs(keymap)
sfile.copy(src, dst)
return True
def verifyPlayMedia(cmd):
return True
def verifyPlugin(cmd):
try:
plugin = re.compile('plugin://(.+?)/').search(cmd).group(1)
return xbmc.getCondVisibility('System.HasAddon(%s)' % plugin) == 1
except:
pass
return True
def verifyScript(cmd):
try:
script = cmd.split('(', 1)[1].split(',', 1)[0].replace(')', '').replace('"', '')
script = script.split('/', 1)[0]
return xbmc.getCondVisibility('System.HasAddon(%s)' % script) == 1
except:
pass
return True
def isATV():
return xbmc.getCondVisibility('System.Platform.ATV2') == 1
def GetFolder(title):
default = ROOT
sfile.makedirs(PROFILE)
folder = xbmcgui.Dialog().browse(3, title, 'files', '', False, False, default)
if folder == default:
return None
return folder
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def escape(text):
return str(''.join(html_escape_table.get(c,c) for c in text))
def unescape(text):
text = text.replace('&', '&')
text = text.replace('"', '"')
text = text.replace(''', '\'')
text = text.replace('>', '>')
text = text.replace('<', '<')
return text
def fix(text):
ret = ''
for ch in text:
if ord(ch) < 128:
ret += ch
return ret.strip()
def Clean(name):
import re
name = re.sub('\([0-9)]*\)', '', name)
items = name.split(']')
name = ''
for item in items:
if len(item) == 0:
continue
item += ']'
item = re.sub('\[[^)]*\]', '', item)
if len(item) > 0:
name += item
name = name.replace('[', '')
name = name.replace(']', '')
name = name.strip()
while True:
length = len(name)
name = name.replace(' ', ' ')
if length == len(name):
break
return name.strip()
def CleanForSort(text):
text = text[0]
text = text.lower()
text = Clean(text)
return text
def fileSystemSafe(text):
if not text:
return None
text = re.sub('[:\\\\/*?\<>|"]+', '', text)
text = text.strip()
if len(text) < 1:
return None
return text
def findAddon(item):
try:
try: addon = re.compile('"(.+?)"').search(item).group(1)
except: addon = item
addon = addon.replace('plugin://', '')
addon = addon.replace('script://', '')
addon = addon.replace('/', '')
addon = addon.split('?', 1)[0]
if xbmc.getCondVisibility('System.HasAddon(%s)' % addon) == 0:
addon = None
except:
addon = None
return addon
def getSettingsLabel(addon):
label = xbmcaddon.Addon(addon).getAddonInfo('name')
label = fix(label)
label = label.strip()
try:
if len(label) > 0:
return GETTEXT(30094) % label
except:
pass
return GETTEXT(30094) % GETTEXT(30217)
#logic for setting focus inspired by lambda
def openSettings(addonID, focus=None):
if not focus:
return xbmcaddon.Addon(addonID).openSettings()
try:
xbmc.executebuiltin('Addon.OpenSettings(%s)' % addonID)
value1, value2 = str(focus).split('.')
if FRODO:
xbmc.executebuiltin('SetFocus(%d)' % (int(value1) + 200))
xbmc.executebuiltin('SetFocus(%d)' % (int(value2) + 100))
else:
xbmc.executebuiltin('SetFocus(%d)' % (int(value1) + 100))
xbmc.executebuiltin('SetFocus(%d)' % (int(value2) + 200))
except:
return
#Remove Tags method from
#http://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
TAG_RE = re.compile('<.*?>')
def RemoveTags(html):
return TAG_RE.sub('', html)
def showBusy():
busy = None
try:
import xbmcgui
busy = xbmcgui.WindowXMLDialog('DialogBusy.xml', '')
busy.show()
try: busy.getControl(10).setVisible(False)
except: pass
except:
busy = None
return busy
def showText(heading, text, waitForClose=False):
id = 10147
xbmc.executebuiltin('ActivateWindow(%d)' % id)
xbmc.sleep(100)
win = xbmcgui.Window(id)
retry = 50
while (retry > 0):
try:
xbmc.sleep(10)
win.getControl(1).setLabel(heading)
win.getControl(5).setText(text)
retry = 0
except:
retry -= 1
if waitForClose:
while xbmc.getCondVisibility('Window.IsVisible(%d)' % id) == 1:
xbmc.sleep(50)
def showChangelog(addonID=None):
try:
if addonID:
ADDON = xbmcaddon.Addon(addonID)
else:
ADDON = xbmcaddon.Addon(ADDONID)
text = sfile.read(ADDON.getAddonInfo('changelog'))
title = '%s - %s' % (xbmc.getLocalizedString(24054), ADDON.getAddonInfo('name'))
showText(title, text)
except:
pass
def getAllPlayableFiles(folder):
files |
s.generate(combinations.combine(mode=['graph', 'eager']))
def test_v1_fused_attribute(self):
norm = normalization.BatchNormalization()
inp = keras.layers.Input((4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(2, 2, 2))
norm(inp)
self.assertEqual(norm.fused, False)
class BatchNormalizationV2Test(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2(self):
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': True},
input_shape=(3, 3, 3, 3))
testing_utils.layer_test(
normalization_v2.BatchNormalization,
kwargs={'fused': None},
input_shape=(3, 3, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_v2_fused_attribute(self):
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
norm = normalization_v2.BatchNormalization()
self.assertEqual(norm.fused, None)
inp = keras.layers.Input(shape=(4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(virtual_batch_size=2)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=False)
self.assertEqual(norm.fused, False)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, False)
norm = normalization_v2.BatchNormalization(fused=True, axis=[3])
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4, 4))
norm(inp)
self.assertEqual(norm.fused, True)
with self.assertRaisesRegex(ValueError, 'fused.*renorm'):
normalization_v2.BatchNormalization(fused=True, renorm=True)
with self.assertRaisesRegex(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=2)
with self.assertRaisesRegex(ValueError, 'fused.*when axis is 1 or 3'):
normalization_v2.BatchNormalization(fused=True, axis=[1, 3])
with self.assertRaisesRegex(ValueError, 'fused.*virtual_batch_size'):
normalization_v2.BatchNormalization(fused=True, virtual_batch_size=2)
with self.assertRaisesRegex(ValueError, 'fused.*adjustment'):
normalization_v2.BatchNormalization(fused=True,
adjustment=lambda _: (1, 0))
norm = normalization_v2.BatchNormalization(fused=True)
self.assertEqual(norm.fused, True)
inp = keras.layers.Input(shape=(4, 4))
with self.assertRaisesRegex(ValueError, '4D or 5D input tensors'):
norm(inp)
def test_updates_in_wrap_function(self):
def my_func():
layer = normalization.BatchNormalization()
x = array_ops.ones((10, 1))
y = layer(x, training=True)
# Updates should be tracked in a `wrap_function`.
self.assertLen(layer.updates, 2)
return y
wrapped_fn = wrap_function.wrap_function(my_func, [])
wrapped_fn()
@keras_parameterized.run_all_keras_modes
def test_basic_batchnorm_v2_none_shape_and_virtual_batch_size(self):
# Test case for GitHub issue for 32380
norm = normalization_v2.BatchNormalization(virtual_batch_size=8)
inp = keras.layers.Input(shape=(None, None, 3))
_ = norm(inp)
def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2, 2, 2), dtype=dtype))
norm = layer(momentum=0.8, fused=fused)
model.add(norm)
if dtype == 'float16':
# Keras models require float32 losses.
model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32')))
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
.astype(dtype))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=2e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=2e-1)
@parameterized.parameters(
[normalization.BatchNormalization, normalization_v2.BatchNormalization])
class NormalizationLayersGraphModeOnlyTest(
test.TestCase, parameterized.TestCase):
def test_shared_batchnorm(self, layer):
"""Test that a BN layer can be shared across different data streams."""
with self.cached_session():
# Test single layer reuse
bn = layer()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
new_model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self, layer):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = layer(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile(gradient_descent.GradientDescentOptimizer(0.0 | 1), 'mse')
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse')
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.pr | edict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self, layer):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
Args:
layer: Either V1 or V2 of BatchNormalization layer.
"""
# TODO(fchollet): enable in all execution modes when issue with
# learning phase setting is resolved.
with ops.Graph().as_default(), self.cached_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = layer()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
with keras.backend.learning_phase_scope(1):
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
def _run_layernorm_correctness_test(layer, dtype='float32'):
model = keras.models.Sequential()
model.add(keras.la |
# -*- coding: utf-8 -*-
#
# script.py
# colorific
#
import sys
import optparse
from colorific import config
from colorific.palette import (
extract_colors, print_colors, save_palette_as_image, color_stream_mt,
color_stream_st)
class Application(object):
def __init__(self):
self.parser = self.create_option_parser()
def create_option_parser(self):
usage = '\n'.join([
"%prog [options]",
"",
"Reads a stream of image filenames from stdin, and outputs a ",
"single line for each containing hex color values."])
parser = optparse.OptionParser(usage)
parser.add_option(
'-p',
'--parallel',
action='store',
dest='n_processes',
type='int',
default=config.N_PROCESSES)
parser.add_option(
'--min-saturation',
action='store',
dest='min_saturation',
default=config.MIN_SATURATION,
type='float',
help="Only keep colors which meet this saturation "
"[%.02f]" % config.MIN_SATURATION)
parser.add_option(
'--max-colors',
action='store',
dest='max_colors',
type='int',
default=config.MAX_COLORS,
help="The maximum number of colors to output per palette "
| "[%d]" % config.MAX_COLORS)
parser.add_option(
'--min-distance',
action='store',
dest='min_distance',
type='float',
default=config.MIN_DISTANCE,
help="The minimum distance colors must have to stay separate "
"[%.02f]" % config.MIN_DISTANCE)
parser.add_option(
'--min-prominence',
action='store',
dest='min_pro | minence',
type='float',
default=config.MIN_PROMINENCE,
help="The minimum proportion of pixels needed to keep a color "
"[%.02f]" % config.MIN_PROMINENCE)
parser.add_option(
'--n-quantized',
action='store',
dest='n_quantized',
type='int',
default=config.N_QUANTIZED,
help="Speed up by reducing the number in the quantizing step "
"[%d]" % config.N_QUANTIZED)
parser.add_option(
'-o',
action='store_true',
dest='save_palette',
default=False,
help="Output the palette as an image file")
return parser
def run(self):
argv = sys.argv[1:]
(options, args) = self.parser.parse_args(argv)
if args:
# image filenames were provided as arguments
for filename in args:
try:
palette = extract_colors(
filename,
min_saturation=options.min_saturation,
min_prominence=options.min_prominence,
min_distance=options.min_distance,
max_colors=options.max_colors,
n_quantized=options.n_quantized)
except Exception as e: # TODO: it's too broad exception.
print >> sys.stderr, filename, e
continue
print_colors(filename, palette)
if options.save_palette:
save_palette_as_image(filename, palette)
sys.exit(1)
if options.n_processes > 1:
# XXX add all the knobs we can tune
color_stream_mt(n=options.n_processes)
else:
color_stream_st(
min_saturation=options.min_saturation,
min_prominence=options.min_prominence,
min_distance=options.min_distance,
max_colors=options.max_colors,
n_quantized=options.n_quantized,
save_palette=options.save_palette)
def main():
application = Application()
application.run()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#
# debbindiff: highlight differences between two builds of Debian packages
#
# Copyright © 2014-2015 Jérémy Bobbio <lunar@debian.org>
#
# debbindiff is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# debbindiff is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with debbindiff. If not, see <http://www.gnu.org/licenses/>.
import codecs
from debbindiff.comparators.binary import compare_binary_files
from debbindiff.difference import Dif | ference
def compare_text_files | (path1, path2, encoding, source=None):
if encoding is None:
encoding = 'utf-8'
try:
file1 = codecs.open(path1, 'r', encoding=encoding)
file2 = codecs.open(path2, 'r', encoding=encoding)
difference = Difference.from_file(file1, file2, path1, path2, source)
except (LookupError, UnicodeDecodeError):
# unknown or misdetected encoding
return compare_binary_files(path1, path2, source)
if not difference:
return []
return [difference]
|
import functools
from common.tornado_cookies import get_secure_cookie, generate_secure_cookie
from core import cookies
class Perms(object):
NONE = None
READ = 'r'
WRITE = 'w'
def _permission_level(user, room):
"""
`user`'s permission level on `room`, ignoring cookies
"""
if not user.is_authenticated():
return Perms.READ
else:
return Perms.WRITE
def _get_cached_perm_level(request, cookie_name):
perm = get_secure_cookie(request, cookie_name)
if not perm:
return
assert perm in ('r', 'w')
return perm
def _set_cached_perm_level(response, cookie_name, perm_level):
assert perm_level in ('r', 'w')
cookie_val = generate_secure_cookie(cookie_name, perm_level)
response.set_cookie(cookie_name, cookie_val)
def _perm_level_satisfies(perm_val, perm_req):
"""
If a user has permission level `perm_val`,
and is requesting access level `perm_req`.
"""
if perm_req == perm_val:
return True
if (perm_val == Perms.WRITE) and (perm_req == Perms.READ):
return True
return False
def get_permission(request, response, room, perm_req):
"""
Returns True or False.
Sets a cookie on the response object to cache
the result, if necessary.
"""
assert perm_req in (Perms.READ, Perms.WRITE)
if cookies.has_cached_room_permission(
room.shortname,
perm_req,
functools.partial(get_secure_cookie, request),
session_key=request.sessio | n.session_key,
uid=getattr(request.user, 'id', None)):
return True
# Cached permission does not satisfy requirement.
perm_actual = _permission_level(request.user, room)
if perm_actual == Perms.NONE:
return False
assert perm_actual in (Perms.READ, Perms.WRITE)
result = _perm_level_satisfies(perm_actual, perm_req)
cookie_name = cookies.room_cookie_name(r | oom.shortname, session_key=request.session.session_key, uid=getattr(request.user, 'id', None))
if result:
_set_cached_perm_level(response, cookie_name, perm_actual)
return result
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2016, Marcos Salomão.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import webtest
import endpoints
import logging
from test_utils import TestCase
from protorpc.remote import protojson
from protorpc import message_types
from google.appengine.ext import testbed
from google.appengine.api import users
from app.supplier.services import SupplierService
from app.supplier.messages import SupplierPostMessage
from app.supplier.messages import SupplierGetMessage
from app.supplier.messages import SupplierSearchMessage
from app.supplier.messages import SupplierKeyMessage
from app.supplier.messages import SupplierCollectionMessage
from app.exceptions import NotFoundEntityException
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class Sup | plierTestCase(TestCase):
def setUp(self):
# Call super method
super(SupplierTestCase, self).setUp()
# Create service
supplierService = endpoints.api_server(
[SupplierService], restricted=False)
# Create test
self.testapp = webtest.TestApp(supplierService)
def save(self, request):
""" Call save endpoint.
"""
response = self.testapp.post(
'/_ah/spi/Su | pplierService.save',
protojson.encode_message(request),
content_type='application/json')
self.assertEqual(response.status, '200 OK')
return protojson.decode_message(SupplierGetMessage, response.body)
def search(self, request):
""" Call search endpoint.
"""
response = self.testapp.post('/_ah/spi/SupplierService.search',
protojson.encode_message(request),
content_type='application/json')
self.assertEqual(response.status, '200 OK')
return protojson.decode_message(SupplierCollectionMessage, response.body)
def list(self):
""" Call list endpoint.
"""
response = self.testapp.post(
'/_ah/spi/SupplierService.list',
content_type='application/json')
self.assertEqual(response.status, '200 OK')
return protojson.decode_message(SupplierCollectionMessage, response.body)
def delete(self, id, expect_errors=False):
""" Call delete endpoint.
"""
response = self.testapp.post('/_ah/spi/SupplierService.delete',
protojson.encode_message(
SupplierKeyMessage(id=id)), content_type='application/json',
expect_errors=expect_errors)
if not expect_errors:
self.assertEqual(response.status, '200 OK')
def testSave(self):
""" Save supplier.
"""
request = SupplierPostMessage(
name='Test',
email='email@email.com',
phone='99999999',
location='Test Location')
supplier = self.save(request)
self.assertIsNotNone(supplier)
self.assertIsNotNone(supplier.id)
self.assertEqual(supplier.name, 'Test')
self.assertEqual(supplier.email, 'email@email.com')
self.assertEqual(supplier.phone, '99999999')
self.assertEqual(supplier.location, 'Test Location')
request = SupplierPostMessage(
id=supplier.id,
name='Test123',
email='email123@email.com',
phone='123123123',
location='Test Location 123')
supplier = self.save(request)
self.assertIsNotNone(supplier)
self.assertIsNotNone(supplier.id)
self.assertEqual(supplier.name, 'Test123')
self.assertEqual(supplier.email, 'email123@email.com')
self.assertEqual(supplier.phone, '123123123')
self.assertEqual(supplier.location, 'Test Location 123')
return supplier
def testSearch(self):
""" Search a supplier.
"""
self.testSave()
request = SupplierSearchMessage(name='Test')
list = self.search(request)
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) == 1)
request = SupplierSearchMessage(name='Yyy')
list = self.search(request)
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) == 0)
def testList(self):
""" List all suppliers.
"""
self.testSave()
list = self.list()
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) > 0)
def testDelete(self):
""" Delete the supplier.
"""
supplier = self.testSave()
list = self.list()
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) == 1)
self.delete(supplier.id)
list = self.list()
self.assertIsNotNone(list)
self.assertIsNotNone(list.items)
self.assertTrue(len(list.items) == 0)
self.assertRaises(NotFoundEntityException, self.delete(
id=supplier.id, expect_errors=True))
|
# Copyright (C) 2021 NEC Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s | oftware
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# | License for the specific language governing permissions and limitations
# under the License.
from tacker.db.db_sqlalchemy import models
def apply_filters(query, filters):
"""Apply filters to a SQLAlchemy query.
:param query: The query object to which we apply filters.
:param filters: A dict or an iterable of dicts, where each one includes
the necesary information to create a filter to be applied
to the query. There are single query filters, such as
filters = {'model': 'Foo', 'field': 'name', 'op': '==',
'value': 'foo'}. And multiple query filters, such as
filters = {'and': [
{'field': 'name', 'model': 'Foo', 'value': 'foo',
'op': '=='},
{'field': 'id', 'model': 'Bar', 'value': 'bar',
'op': '=='}
]}
"""
def apply_filter(query, filter):
value = filter.get('value')
op = filter.get('op')
model = getattr(models, filter.get('model'))
column_attr = getattr(model, filter.get('field'))
if 'in' == op:
query = query.filter(column_attr.in_(value))
elif 'not_in' == op:
query = query.filter(~column_attr.in_(value))
elif '!=' == op:
query = query.filter(column_attr != value)
elif '>' == op:
query = query.filter(column_attr > value)
elif '>=' == op:
query = query.filter(column_attr >= value)
elif '<' == op:
query = query.filter(column_attr < value)
elif '<=' == op:
query = query.filter(column_attr <= value)
elif '==' == op:
query = query.filter(column_attr == value)
return query
if 'and' in filters:
for filter in filters.get('and'):
query = apply_filter(query, filter)
else:
query = apply_filter(query, filters)
return query
|
alkeeper = self.match.HomeLineupGoalkeeper
self.lineupdefenders = self.match.HomeLineupDefense
self.lineupmidfielders = self.match.HomeLineupMidfield
self.lineupforwarders = self.match.HomeLineupForward
self.lineupsubs = self.match.HomeLineupSubstitutes
if self.match.HomeLineupCoach:
self.lineupcoach = self.match.HomeLineupCoach.replace(";","")
else: self.lineupcoach = {}
else:
self.teamname = self.match.strHomeTeam
self.LineUpTeamObj = self.match.HomeTeamObj
self.formationlabel = self.match.strHomeFormation
self.lineupgoalkeeper = self.match.strHomeLineupGoalkeeper
self.lineupdefenders = self.match.strHomeLineupDefense |
self.lineupmidfielders = self.match.strHomeL | ineupMidfield
self.lineupforwarders = self.match.strHomeLineupForward
self.lineupsubs = self.match.strHomeLineupSubstitutes
self.lineupcoach = {}
self.getControl(32527).setLabel(translate(32027))
else:
if 'idEvent' not in self.match.__dict__.keys():
if self.match.AwayTeamObj: self.LineUpTeamObj = self.match.AwayTeamObj
else: self.LineUpTeamObj = None
self.teamname = self.match.AwayTeam
self.formationlabel = self.match.AwayTeamFormation
self.lineupgoalkeeper = self.match.AwayLineupGoalkeeper
self.lineupdefenders = self.match.AwayLineupDefense
self.lineupmidfielders = self.match.AwayLineupMidfield
self.lineupforwarders = self.match.AwayLineupForward
self.lineupsubs = self.match.AwayLineupSubstitutes
if self.match.AwayLineupCoach:
self.lineupcoach = self.match.AwayLineupCoach.replace(";","")
else: self.lineupcoach = {}
else:
self.teamname = self.match.strAwayTeam
self.LineUpTeamObj = self.match.AwayTeamObj
self.formationlabel = self.match.strAwayFormation
self.lineupgoalkeeper = self.match.strAwayLineupGoalkeeper
self.lineupdefenders = self.match.strAwayLineupDefense
self.lineupmidfielders = self.match.strAwayLineupMidfield
self.lineupforwarders = self.match.strAwayLineupForward
self.lineupsubs = self.match.strAwayLineupSubstitutes
self.lineupcoach = {}
self.getControl(32527).setLabel(translate(32028))
#Set Labels for the panel
self.getControl(32522).setLabel(translate(32029) + ":")
self.getControl(32523).setLabel(translate(32030) + ":")
#Set team information
#Name
self.getControl(32521).setLabel(self.teamname)
if self.LineUpTeamObj:
if show_alternative == "true":
self.getControl(32521).setLabel(self.LineUpTeamObj.AlternativeNameFirst)
#Set team Badge
if self.LineUpTeamObj.strTeamBadge:
self.getControl(32520).setImage(self.LineUpTeamObj.strTeamBadge)
else:
self.getControl(32520).setImage(os.path.join(addon_path,"resources","img","nobadge_placeholder.png"))
else:
self.getControl(32520).setImage(os.path.join(addon_path,"resources","img","nobadge_placeholder.png"))
#Set team formation label
if self.formationlabel:
self.getControl(32518).setLabel(self.formationlabel)
#Set coach
if self.lineupcoach:
self.getControl(32526).setLabel("[COLOR selected]" + translate(32026) + ":[/COLOR] " + self.lineupcoach)
#Set Lineup
starters = []
if self.lineupgoalkeeper:
self.lineupgoalkeeper = self.lineupgoalkeeper.replace(";","")
starters.append(self.lineupgoalkeeper)
defenders = []
if self.lineupdefenders:
for player in self.lineupdefenders.split(";"):
if player:
defenders.append(player.strip())
starters.append(player.strip())
self.lineupdefenders = defenders
del defenders
midfielders = []
if self.lineupmidfielders:
for player in self.lineupmidfielders.split(";"):
if player:
midfielders.append(player.strip())
starters.append(player.strip())
self.lineupmidfielders = midfielders
del midfielders
forwarders = []
if self.lineupforwarders:
for player in self.lineupforwarders.split(";"):
if player:
forwarders.append(player.strip())
starters.append(player.strip())
self.getControl(32524).reset()
self.getControl(32524).addItems(starters)
self.lineupforwarders = forwarders
#Set Subs
subs = []
if self.lineupsubs:
for player in self.lineupsubs.split(";"):
if player: subs.append(player.strip())
self.getControl(32525).reset()
self.getControl(32525).addItems(subs)
#Players on pitch
pitch = self.getControl(32519)
pitchPosition = pitch.getPosition()
pitchHeight = pitch.getHeight()
pitchWidth = pitch.getWidth()
if self.formationlabel:
formationsjson = eval(FileIO.fileread(json_formations))
formation = formationsjson[self.formationlabel]
else:
formation = None
if formation:
#goalkeeper
goalkeeper = formation["goalkeeper"]
image_size = positions.getShirtHeight(pitchHeight,goalkeeper[1])
image_x = int(goalkeeper[0]*float(pitchWidth))+int(0.15*image_size)
image_y = int(goalkeeper[1]*float(pitchHeight))+int(0.15*image_size)
if self.LineUpTeamObj and self.LineUpTeamObj.strTeamJersey:
image = xbmcgui.ControlImage(image_x,image_y,image_size,image_size, self.LineUpTeamObj.strTeamJersey )
self.controls.append(image)
else:
image = xbmcgui.ControlImage(image_x,image_y,image_size,image_size, os.path.join(addon_path,"resources","img","nokit_placeholder.png") )
self.controls.append(image)
label = positions.getLabel(image, "[B]" + self.lineupgoalkeeper + "[/B]")
self.controls.append(label)
#defenders
defenders = formation["defenders"]
if defenders:
i = 0
for defender in defenders:
image_size = positions.getShirtHeight(pitchHeight,defender[1])
image_x = int(defender[0]*float(pitchWidth))+int(0.15*image_size)
image_y = int(defender[1]*float(pitchHeight))+int(0.15*image_size)
if self.LineUpTeamObj and self.LineUpTeamObj.strTeamJersey:
image = xbmcgui.ControlImage(image_x,image_y,image_size,image_size, self.LineUpTeamObj.strTeamJersey)
self.controls.append(image)
else:
image = xbmcgui.ControlImage(image_x,image_y,image_size,image_size, os.path.join(addon_path,"resources","img","nokit_placeholder.png") )
self.controls.append(image)
label = positions.getLabel(image,"[B]" + self.lineupdefenders[i] + "[/B]")
self.controls.append(label)
i += 1
#midfielders
midfielders = formation["midfielders"]
if midfielders:
i = 0
for midfielder in midfielders:
image_size = positions.getShirtHeight(pitchHeight,midfielder[1])
image_x = int(midfielder[0]*float(pitchWidth))+int(0.15*image_size)
image_y = int(midfielder[1]*float(pitchHeight))+int(0.15*image_size)
if self.LineUpTeamObj and self.LineUpTeamObj.strTeamJersey:
image = xbmcgui.ControlImage(image_x,image_y,image_size,image_size, self.LineUpTeamObj.strTeamJersey)
self.controls.append(image)
else:
image = xbmcgui.ControlImage(image_x,image_y,image_size,image_size, os.path.join(addon_path,"resources","img","nokit_placeholder.png") )
self.controls.append(image)
label = positions.getLabel(image,"[B]" + self.lineupmidfielders[i] + "[/B]")
self.controls.append(label)
i += 1
#forwarders
forwarders = formation["forwarders"]
if forwarders:
i = 0
for forwarder in forwarders:
image_size = positions.getShirtHeight(pitchHeight,forwarder[1])
image_x = int(forwarder[0]*float(pitchWidth))+int(0.15*image_size)
image_y = int(forwarder[1]*float(pitchHeight))+int(0.15*image_size)
if self.LineUpTeamObj and self.LineUpTeamObj.strTeamJersey:
image = xbmcgui.ControlImage(image_x,image_y,image_size,image_size, self.LineUpTeamObj.strTeamJersey)
self.controls.append(image)
else:
image = xbmcgui.ControlImage(image_x,image_y,image_size,image_size, os.path.join(addon_path,"resources","img","nokit_placeholder.png") )
self.controls.append(image)
label = positions.getLabel(image,"[B]" + self.lineupforwarders[i] + "[/B]")
self.controls.append(label)
i += 1
self.addControls(self.controls)
self.setFocusId(32527)
def resetControls(self):
self.removeControls(self.controls)
self.controls = []
def stopRunning(self):
self.isRunning = False
xbmc.execute |
"""check unused import
"""
__revision__ = 1
import os
import sys
class NonRegr:
"""???""" |
def __init__(self):
print 'initialized'
def sys(self):
"""should not get sys from there..."""
print self, sys
def dummy(self, truc):
"""yo"""
return self, truc
def blop(self):
| """yo"""
print self, 'blip'
|
import argparse
import asyncio
import gc
import os.path
import pathlib
import socket
import ssl
PRINT = 0
async def echo_server(loop, address, unix):
if unix:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
sock.setblocking(False)
if PRINT:
print('Server listening at', address)
with sock:
while True:
client, addr = await loop.sock_accept(sock)
if PRINT:
print('Connection from', addr)
loop.create_task(echo_client(loop, client))
async def echo_client(loop, client):
try:
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, NameError):
pass
with client:
while True:
data = await loop.sock_recv(client, 1000000)
if not data:
break
await loop.sock_sendall(client, data)
if PRINT:
print('Connection closed')
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.read(1000000)
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
class EchoProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
self.transport = None
def data_received(self, data):
self.transport.write(data)
class EchoBufferedProtocol(asyncio.BufferedProtocol):
def connection_made(self, transport):
self.transport = transport
# Here the buffer is intended to be copied, so that the outgoing buffer
# won't be wrongly updated by next read
self.buffer = bytearray(256 * 1024)
def connection_lost(self, exc):
self.transport = None
def get_buffer(self, sizehint):
return self.buffer
def buffer_updated(self, nbytes):
self.transport.write(self.buffer[:nbytes])
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--streams', default=False, action='store_true')
parser.add_argument('--proto', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
parser.add_argument('--ssl', default=False, action='store_true')
parser.add_argument('--buffered', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
import uvloop
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('serving on: {}'.format(addr))
server_context = None
if args.ssl:
print('with SSL')
if hasattr(ssl, 'PROTOCOL_TLS'):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
else:
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(
(pathlib.Path(__file__).parent.parent.parent /
'tests' / 'certs' / 'ssl_cert.pem'),
(pathlib.Path(__file__).parent.parent.parent /
'tests' / 'certs' / 'ssl_key.pem'))
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
if args.streams:
if args.proto:
print('cannot use --stream and --proto simultaneously')
exit(1)
if args.buffered:
print('cannot use --stream and --buffered simultaneously')
exit(1)
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr,
ssl=server_context)
else:
coro = asyncio.start_server(echo_client_streams,
*addr,
ssl=server_context)
srv = loop.run_until_complete(coro)
elif args.proto:
if args.streams:
print('cannot use --stream and --proto simultaneously')
exit(1)
| if args.buffered:
print('using buffered protocol')
prot | ocol = EchoBufferedProtocol
else:
print('using simple protocol')
protocol = EchoProtocol
if unix:
coro = loop.create_unix_server(protocol, addr,
ssl=server_context)
else:
coro = loop.create_server(protocol, *addr,
ssl=server_context)
srv = loop.run_until_complete(coro)
else:
if args.ssl:
print('cannot use SSL for loop.sock_* methods')
exit(1)
print('using sock_recv/sock_sendall')
loop.create_task(echo_server(loop, addr, unix))
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
|
import sys
from hexdump import hexdump
# name.id0 - contains contents of B-tree style database
# name.id1 - contains flags that describe each program byte
# name.nam - contains index information related to named program locations
# name.til - contains information about local type definitions
BTREE_PA | GE_SIZE = 8192
#dat = open(sys.argv[1]).read()
#id0 = dat[0x104:]
dat = open("/Users/geohot/tmp/ | test.id0").read()
print hex(len(dat)), len(dat)/BTREE_PAGE_SIZE
for i in range(0, len(dat), BTREE_PAGE_SIZE):
hexdump(dat[i:i+0xC0])
print ""
|
# coding=utf-8
"""Unit tests for mapi/endpoints/tmdb.py."""
import pytest
from mapi.endpoints import tmdb_find, tmdb_movies, tmdb_search_movies
from mapi.exceptions import MapiNotFoundException, MapiProviderException
from tests import JUNK_TEXT
GOONIES_IMDB_ID = "tt0089218"
GOONIES_TMDB_ID = 9340
JUNK_IMDB_ID = "tt1234567890"
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__imdb_success(tmdb_api_key):
expected_top_level_keys = {
"movie_results",
"person_results",
"tv_episode_results",
"tv_results",
"tv_season_results",
}
expected_movie_results_keys = {
"adult",
"backdrop_path",
"genre_ids",
"id",
"original_language",
"original_title",
"overview",
"poster_path",
"popularity",
"release_date",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_find(tmdb_api_key, "imdb_id", GOONIES_IMDB_ID)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert len(result.get("movie_results", {})) > 0
assert expected_movie_results_keys == set(
result.get("movie_results", {})[0].keys()
)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__api_key_fail():
with pytest.raises(MapiProviderException):
tmdb_find(JUNK_TEXT, "imdb_id", GOONIES_IMDB_ID, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__invalid_id_imdb(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_find(tmdb_api_key, "imdb_id", JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__not_found(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_find(tmdb_api_key, "imdb_id", JUNK_IMDB_ID)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_movies__success(tmdb_api_key):
expected_top_level_keys = {
"adult",
"backdrop_path",
"belongs_to_collection",
"budget",
"genres",
"homepage",
"id",
"imdb_id",
"original_language",
"original_title",
"overview",
"popularity",
"poster_path",
"production_companies",
"production_countries",
"release_date",
"revenue",
"runtime",
"spoken_languages",
"status",
"tagline",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_movies(tmdb_api_key, GOONIES_TMDB_ID)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert result.get("original_title") == "The Goonies"
def test_tmdb_movies__api_key_fail():
with pytest.raises(MapiProviderException):
tmdb_movies(JUNK_TEXT, "", cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_movies__id_tmdb_fail(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_movies(tmdb_api_key, JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_movies__not_found(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_movies(tmdb_api_key, "1" * 10)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__success(tmdb_api_key):
expected_top_level_keys = {
"page",
"results",
"total_pages",
"total_results",
}
expected_results_keys = {
"adult",
"backdrop_path",
"genre_ids",
"id",
"original_language",
"original_title",
"overview",
"popularity",
"poster_path",
"release_date",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_search_movies(tmdb_api_key, "the goonies", 1985)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert isinstance(result["results"], list)
assert expected_results_keys == set(result.get("result | s", [{}])[0].keys())
assert len(result["results"]) == 1
assert result["results"][0]["original_title"] == "The Goonies"
| result = tmdb_search_movies(tmdb_api_key, "the goonies")
assert len(result["results"]) > 1
def test_tmdb_search_movies__bad_api_key():
with pytest.raises(MapiProviderException):
tmdb_search_movies(JUNK_TEXT, "the goonies", cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__bad_title(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_search_movies(tmdb_api_key, JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__bad_year(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_search_movies(
tmdb_api_key, "the goonies", year=JUNK_TEXT, cache=False
)
|
from __future__ import a | bsolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task | will use this app.
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for analyzing the operations and variables in a TensorFlow graph.
To analyze the operations in a graph:
images, labels = LoadData(...)
predictions = MyModel(images)
slim.model_analyzer.analyze_ops(tf.get_default_graph(), print_info=True)
To analyze the model variables in a graph:
variables = tf.model_variables()
slim.model_analyzer.analyze_vars(variables, print_info=False)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def tensor_description(var):
"""Returns a compact and informative string about a tensor.
Args:
var: A tensor variable.
Returns:
a string with type and size, e.g.: (float32 1x8x8x1024).
"""
description = '(' + str(var.dtype.name) + ' '
sizes = var.get_shape()
for i, size in enumerate(sizes):
description += str(size)
if i < len(sizes) - 1:
description += 'x'
description += ')'
return description
def analyze_ops(graph, print_info=False):
"""Compute the estimated size of the ops.outputs in the graph.
Args:
graph: the graph containing the operations.
print_info: Optional, if true print ops and their outputs.
Returns:
total size of the ops.outputs
"""
if print_info:
print('---------')
print('Operations: name -> (type shapes) [size]')
print('---------')
total_size = 0
for op in graph.get_operations():
op_size = 0
shapes = []
for output in op.outputs:
# if output.num_elements() is None or [] assume size 0.
output_size = output.get_shape().num_elements() or 0
if output.get_shape():
shapes.append(tensor_description(output))
op_size += output_size
if print_info:
print(op.name, '\t->', ', '.join(shapes), '[' + str(op_size) + ']')
total_size += op_size
return total_size
def analyze_vars(variables, print_info=False):
"""Prints the names and shapes of the variables.
Args:
variables: list of variables, for example tf.all_variables().
print_info: Optional, if true print variables and their shape.
Returns:
total size of the variables.
"""
if print_info:
print('---------')
print('Variables: name (type shape) [size]')
print('---------')
total_size = 0
for var in variables:
| # if var.num_elements() is None or [] assume size 0.
var_size = var.get_shape().num_elements() or 0
total_size += var_size
if print_info:
print(var.name, te | nsor_description(var), '[' + str(var_size) + ']')
return total_size
|
class Solution:
# @param {integer[] | } height
# @return {integer}
def largestRectangleArea(self, height):
n = len(height)
ma = 0
stack | = [-1]
for i in xrange(n):
while(stack[-1] > -1):
if height[i]<height[stack[-1]]:
top = stack.pop()
ma = max(ma, height[top]*(i-1-stack[-1]))
else:
break
stack.append(i)
while(stack[-1] != -1):
top = stack.pop()
ma = max(ma, height[top]*(n-1-stack[-1]))
return ma |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.