code
stringlengths 1
199k
|
|---|
"""Integration test for breakpad in content shell.
This test checks that content shell and breakpad are correctly hooked up, as
well as that the tools can symbolize a stack trace."""
import glob
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
CONCURRENT_TASKS=4
def main():
parser = optparse.OptionParser()
parser.add_option('', '--build-dir', default='',
help='The build output directory.')
parser.add_option('', '--binary', default='',
help='The path of the binary to generate symbols for.')
parser.add_option('', '--no-symbols', default=False, action='store_true',
help='Symbols are not expected to work.')
parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type='int', help='Number of parallel tasks to run.')
parser.add_option('-v', '--verbose', action='store_true',
help='Print verbose status output.')
(options, _) = parser.parse_args()
if not options.build_dir:
print "Required option --build-dir missing."
return 1
if not options.binary:
print "Required option --binary missing."
return 1
if not os.access(options.binary, os.X_OK):
print "Cannot find %s." % options.binary
return 1
failure = ''
# Create a temporary directory to store the crash dumps and symbols in.
crash_dir = tempfile.mkdtemp()
try:
print "# Generate symbols."
breakpad_tools_dir = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'components', 'crash', 'content', 'tools')
generate_symbols = os.path.join(
breakpad_tools_dir, 'generate_breakpad_symbols.py')
symbols_dir = os.path.join(crash_dir, 'symbols')
cmd = [generate_symbols,
'--build-dir=%s' % options.build_dir,
'--binary=%s' % options.binary,
'--symbols-dir=%s' % symbols_dir,
'--jobs=%d' % options.jobs]
if options.verbose:
cmd.append('--verbose')
print ' '.join(cmd)
failure = 'Failed to run generate_breakpad_symbols.py.'
subprocess.check_call(cmd)
print "# Run content_shell and make it crash."
cmd = [options.binary,
'--run-layout-test',
'chrome://crash',
'--enable-crash-reporter',
'--crash-dumps-dir=%s' % crash_dir]
if options.verbose:
print ' '.join(cmd)
failure = 'Failed to run content_shell.'
if options.verbose:
subprocess.check_call(cmd)
else:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
print "# Retrieve crash dump."
dmp_files = glob.glob(os.path.join(crash_dir, '*.dmp'))
failure = 'Expected 1 crash dump, found %d.' % len(dmp_files)
if len(dmp_files) != 1:
raise Exception(failure)
dmp_file = dmp_files[0]
minidump = os.path.join(crash_dir, 'minidump')
dmp_to_minidump = os.path.join(breakpad_tools_dir, 'dmp2minidump.py')
cmd = [dmp_to_minidump, dmp_file, minidump]
if options.verbose:
print ' '.join(cmd)
failure = 'Failed to run dmp_to_minidump.'
subprocess.check_call(cmd)
print "# Symbolize crash dump."
minidump_stackwalk = os.path.join(options.build_dir, 'minidump_stackwalk')
cmd = [minidump_stackwalk, minidump, symbols_dir]
if options.verbose:
print ' '.join(cmd)
failure = 'Failed to run minidump_stackwalk.'
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stack = proc.communicate()[0]
# Check whether the stack contains a CrashIntentionally symbol.
found_symbol = 'CrashIntentionally' in stack
if options.no_symbols:
if found_symbol:
if options.verbose:
print stack
failure = 'Found unexpected reference to CrashIntentionally in stack'
raise Exception(failure)
else:
if not found_symbol:
if options.verbose:
print stack
failure = 'Could not find reference to CrashIntentionally in stack.'
raise Exception(failure)
except:
print "FAIL: %s" % failure
return 1
else:
print "PASS: Breakpad integration test ran successfully."
return 0
finally:
try:
shutil.rmtree(crash_dir)
except:
print 'Failed to delete temp directory "%s".' % crash_dir
if '__main__' == __name__:
sys.exit(main())
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_vxlan_gateway
version_added: "2.4"
short_description: Manages gateway for the VXLAN network on HUAWEI CloudEngine devices.
description:
- Configuring Centralized All-Active Gateways or Distributed Gateway for
the VXLAN Network on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
notes:
- Ensure All-Active Gateways or Distributed Gateway for the VXLAN Network can not configure at the same time.
options:
dfs_id:
description:
- Specifies the ID of a DFS group.
The value must be 1.
required: false
default: null
dfs_source_ip:
description:
- Specifies the IPv4 address bound to a DFS group.
The value is in dotted decimal notation.
required: false
default: null
dfs_source_vpn:
description:
- Specifies the name of a VPN instance bound to a DFS group.
The value is a string of 1 to 31 case-sensitive characters without spaces.
If the character string is quoted by double quotation marks, the character string can contain spaces.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
dfs_udp_port:
description:
- Specifies the UDP port number of the DFS group.
The value is an integer that ranges from 1025 to 65535.
required: false
default: null
dfs_all_active:
description:
- Creates all-active gateways.
required: false
choices: ['enable', 'disable']
default: null
dfs_peer_ip:
description:
- Configure the IP address of an all-active gateway peer.
The value is in dotted decimal notation.
required: false
default: null
dfs_peer_vpn:
description:
- Specifies the name of the VPN instance that is associated with all-active gateway peer.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vpn_instance:
description:
- Specifies the name of a VPN instance.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vpn_vni:
description:
- Specifies a VNI ID.
Binds a VXLAN network identifier (VNI) to a virtual private network (VPN) instance.
The value is an integer ranging from 1 to 16000000.
required: false
default: null
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
required: false
default: null
vbdif_bind_vpn:
description:
- Specifies the name of the VPN instance that is associated with the interface.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vbdif_mac:
description:
- Specifies a MAC address for a VBDIF interface.
The value is in the format of H-H-H. Each H is a 4-digit hexadecimal number, such as C(00e0) or C(fc01).
If an H contains less than four digits, 0s are added ahead. For example, C(e0) is equal to C(00e0).
A MAC address cannot be all 0s or 1s or a multicast MAC address.
required: false
default: null
arp_distribute_gateway:
description:
- Enable the distributed gateway function on VBDIF interface.
required: false
choices: ['enable','disable']
default: null
arp_direct_route:
description:
- Enable VLINK direct route on VBDIF interface.
required: false
choices: ['enable','disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan gateway module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring Centralized All-Active Gateways for the VXLAN Network
ce_vxlan_gateway:
dfs_id: 1
dfs_source_ip: 6.6.6.6
dfs_all_active: enable
dfs_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Bind the VPN instance to a Layer 3 gateway, enable distributed gateway, and configure host route advertisement.
ce_vxlan_gateway:
vbdif_name: Vbdif100
vbdif_bind_vpn: vpn1
arp_distribute_gateway: enable
arp_direct_route: enable
provider: "{{ cli }}"
- name: Assign a VNI to a VPN instance.
ce_vxlan_gateway:
vpn_instance: vpn1
vpn_vni: 100
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "dfs_source_ip": "6.6.6.6", "dfs_all_active":"enable", "dfs_peer_ip": "7.7.7.7"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "dfs_source_ip": null, "evn_peer_ip": [], "dfs_all_active": "disable"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "evn_source_ip": "6.6.6.6", "evn_source_vpn": null,
"evn_peers": [{"ip": "7.7.7.7", "vpn": ""}], "dfs_all_active": "enable"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["dfs-group 1",
"source ip 6.6.6.6",
"active-active-gateway",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist?"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr"""
if not addr:
return False
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def mac_format(mac):
"""convert mac format to xxxx-xxxx-xxxx"""
if not mac:
return None
if mac.count("-") != 2:
return None
addrs = mac.split("-")
for i in range(3):
if not addrs[i] or not addrs[i].isalnum():
return None
if len(addrs[i]) < 1 or len(addrs[i]) > 4:
return None
try:
addrs[i] = int(addrs[i], 16)
except ValueError:
return None
try:
return "%04x-%04x-%04x" % (addrs[0], addrs[1], addrs[2])
except ValueError:
return None
except TypeError:
return None
def get_dfs_source_ip(config):
"""get dfs source ip address"""
get = re.findall(r"source ip ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_source_vpn(config):
"""get dfs source ip vpn instance name"""
get = re.findall(
r"source ip [0-9]+.[0-9]+.[0-9]+.[0-9]+ vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_udp_port(config):
"""get dfs udp port"""
get = re.findall(r"udp port (\d+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_peers(config):
"""get evn peer ip list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s?(vpn-instance)?\s?(\S*)", config)
if not get:
return None
else:
peers = list()
for item in get:
peers.append(dict(ip=item[0], vpn=item[2]))
return peers
def get_ip_vpn(config):
"""get ip vpn instance"""
get = re.findall(r"ip vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_ip_vpn_vni(config):
"""get ip vpn vxlan vni"""
get = re.findall(r"vxlan vni (\d+)", config)
if not get:
return None
else:
return get[0]
def get_vbdif_vpn(config):
"""get ip vpn name of interface vbdif"""
get = re.findall(r"ip binding vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_vbdif_mac(config):
"""get mac address of interface vbdif"""
get = re.findall(
r" mac-address ([0-9a-fA-F]{1,4}-[0-9a-fA-F]{1,4}-[0-9a-fA-F]{1,4})", config)
if not get:
return None
else:
return get[0]
class VxlanGateway(object):
"""
Manages Gateway for the VXLAN Network.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.dfs_id = self.module.params['dfs_id']
self.dfs_source_ip = self.module.params['dfs_source_ip']
self.dfs_source_vpn = self.module.params['dfs_source_vpn']
self.dfs_udp_port = self.module.params['dfs_udp_port']
self.dfs_all_active = self.module.params['dfs_all_active']
self.dfs_peer_ip = self.module.params['dfs_peer_ip']
self.dfs_peer_vpn = self.module.params['dfs_peer_vpn']
self.vpn_instance = self.module.params['vpn_instance']
self.vpn_vni = self.module.params['vpn_vni']
self.vbdif_name = self.module.params['vbdif_name']
self.vbdif_mac = self.module.params['vbdif_mac']
self.vbdif_bind_vpn = self.module.params['vbdif_bind_vpn']
self.arp_distribute_gateway = self.module.params['arp_distribute_gateway']
self.arp_direct_route = self.module.params['arp_direct_route']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = " | ignore-case section include dfs-group"
if self.vpn_instance:
exp += "|^ip vpn-instance %s$" % self.vpn_instance
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
flags.append(exp)
return get_config(self.module, flags)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_dfs_group(self):
"""manage Dynamic Fabric Service (DFS) group configuration"""
if not self.dfs_id:
return
dfs_view = False
view_cmd = "dfs-group %s" % self.dfs_id
exist = is_config_exist(self.config, view_cmd)
if self.state == "present" and not exist:
self.cli_add_command(view_cmd)
dfs_view = True
# undo dfs-group dfs-group-id
if self.state == "absent" and exist:
if not self.dfs_source_ip and not self.dfs_udp_port and not self.dfs_all_active and not self.dfs_peer_ip:
self.cli_add_command(view_cmd, undo=True)
return
# [undo] source ip ip-address [ vpn-instance vpn-instance-name ]
if self.dfs_source_ip:
cmd = "source ip %s" % self.dfs_source_ip
if self.dfs_source_vpn:
cmd += " vpn-instance %s" % self.dfs_source_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd)
if self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd, undo=True)
# [undo] udp port port-number
if self.dfs_udp_port:
cmd = "udp port %s" % self.dfs_udp_port
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd, undo=True)
# [undo] active-active-gateway
# [undo]peer[ vpn-instance vpn-instance-name ]
aa_cmd = "active-active-gateway"
aa_exist = is_config_exist(self.config, aa_cmd)
aa_view = False
if self.dfs_all_active == "disable":
if aa_exist:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_source_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd, undo=True)
elif self.dfs_all_active == "enable":
if not aa_exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
aa_view = True
if self.dfs_peer_ip:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_peer_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
if not aa_view:
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
if not aa_view:
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
else: # not input dfs_all_active
if aa_exist and self.dfs_peer_ip:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_peer_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
else:
pass
elif not aa_exist and self.dfs_peer_ip and self.state == "present":
self.module.fail_json(
msg="Error: All-active gateways is not enable.")
else:
pass
if dfs_view:
self.cli_add_command("quit")
def config_ip_vpn(self):
"""configure command at the ip vpn view"""
if not self.vpn_instance or not self.vpn_vni:
return
# ip vpn-instance vpn-instance-name
view_cmd = "ip vpn-instance %s" % self.vpn_instance
exist = is_config_exist(self.config, view_cmd)
if not exist:
self.module.fail_json(
msg="Error: ip vpn instance %s is not exist." % self.vpn_instance)
# [undo] vxlan vni vni-id
cmd = "vxlan vni %s" % self.vpn_vni
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
self.cli_add_command(view_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
self.cli_add_command(view_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
if not self.vbdif_name:
return
vbdif_cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, vbdif_cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s is not exist." % self.vbdif_name)
# interface vbdif bd-id
# [undo] ip binding vpn-instance vpn-instance-name
vbdif_view = False
if self.vbdif_bind_vpn:
cmd = "ip binding vpn-instance %s" % self.vbdif_bind_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# [undo] arp distribute-gateway enable
if self.arp_distribute_gateway:
cmd = "arp distribute-gateway enable"
exist = is_config_exist(self.config, cmd)
if self.arp_distribute_gateway == "enable" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.arp_distribute_gateway == "disable" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# [undo] arp direct-route enable
if self.arp_direct_route:
cmd = "arp direct-route enable"
exist = is_config_exist(self.config, cmd)
if self.arp_direct_route == "enable" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.arp_direct_route == "disable" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# mac-address mac-address
# undo mac-address
if self.vbdif_mac:
cmd = "mac-address %s" % self.vbdif_mac
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command("undo mac-address")
# quit
if vbdif_view:
self.cli_add_command("quit")
def is_valid_vbdif(self, ifname):
"""check is interface vbdif"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def is_valid_ip_vpn(self, vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
self.module.fail_json(
msg="Error: The value C(_public_) is reserved and cannot be used as the VPN instance name.")
if len(vpname) < 1 or len(vpname) > 31:
self.module.fail_json(
msg="Error: IP vpn name length is not in the range from 1 to 31.")
return True
def check_params(self):
"""Check all input params"""
# dfs id check
if self.dfs_id:
if not self.dfs_id.isdigit():
self.module.fail_json(msg="Error: DFS id is not digit.")
if int(self.dfs_id) != 1:
self.module.fail_json(msg="Error: DFS is not 1.")
# dfs_source_ip check
if self.dfs_source_ip:
if not is_valid_v4addr(self.dfs_source_ip):
self.module.fail_json(msg="Error: dfs_source_ip is invalid.")
# dfs_source_vpn check
if self.dfs_source_vpn and not self.is_valid_ip_vpn(self.dfs_source_vpn):
self.module.fail_json(msg="Error: dfs_source_vpn is invalid.")
# dfs_source_vpn and dfs_source_ip must set at the same time
if self.dfs_source_vpn and not self.dfs_source_ip:
self.module.fail_json(
msg="Error: dfs_source_vpn and dfs_source_ip must set at the same time.")
# dfs_udp_port check
if self.dfs_udp_port:
if not self.dfs_udp_port.isdigit():
self.module.fail_json(
msg="Error: dfs_udp_port id is not digit.")
if int(self.dfs_udp_port) < 1025 or int(self.dfs_udp_port) > 65535:
self.module.fail_json(
msg="dfs_udp_port is not ranges from 1025 to 65535.")
# dfs_peer_ip check
if self.dfs_peer_ip:
if not is_valid_v4addr(self.dfs_peer_ip):
self.module.fail_json(msg="Error: dfs_peer_ip is invalid.")
# dfs_peer_vpn check
if self.dfs_peer_vpn and not self.is_valid_ip_vpn(self.dfs_peer_vpn):
self.module.fail_json(msg="Error: dfs_peer_vpn is invalid.")
# dfs_peer_vpn and dfs_peer_ip must set at the same time
if self.dfs_peer_vpn and not self.dfs_peer_ip:
self.module.fail_json(
msg="Error: dfs_peer_vpn and dfs_peer_ip must set at the same time.")
# vpn_instance check
if self.vpn_instance and not self.is_valid_ip_vpn(self.vpn_instance):
self.module.fail_json(msg="Error: vpn_instance is invalid.")
# vpn_vni check
if self.vpn_vni:
if not self.vpn_vni.isdigit():
self.module.fail_json(msg="Error: vpn_vni id is not digit.")
if int(self.vpn_vni) < 1 or int(self.vpn_vni) > 16000000:
self.module.fail_json(
msg="vpn_vni is not ranges from 1 to 16000000.")
# vpn_instance and vpn_vni must set at the same time
if bool(self.vpn_instance) != bool(self.vpn_vni):
self.module.fail_json(
msg="Error: vpn_instance and vpn_vni must set at the same time.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# vbdif_mac check
if self.vbdif_mac:
mac = mac_format(self.vbdif_mac)
if not mac:
self.module.fail_json(msg="Error: vbdif_mac is invalid.")
self.vbdif_mac = mac
# vbdif_bind_vpn check
if self.vbdif_bind_vpn and not self.is_valid_ip_vpn(self.vbdif_bind_vpn):
self.module.fail_json(msg="Error: vbdif_bind_vpn is invalid.")
# All-Active Gateways or Distributed Gateway config can not set at the
# same time.
if self.dfs_id:
if self.vpn_vni or self.arp_distribute_gateway == "enable":
self.module.fail_json(msg="Error: All-Active Gateways or Distributed Gateway config "
"can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.dfs_id:
self.proposed["dfs_id"] = self.dfs_id
self.proposed["dfs_source_ip"] = self.dfs_source_ip
self.proposed["dfs_source_vpn"] = self.dfs_source_vpn
self.proposed["dfs_udp_port"] = self.dfs_udp_port
self.proposed["dfs_all_active"] = self.dfs_all_active
self.proposed["dfs_peer_ip"] = self.dfs_peer_ip
self.proposed["dfs_peer_vpn"] = self.dfs_peer_vpn
if self.vpn_instance:
self.proposed["vpn_instance"] = self.vpn_instance
self.proposed["vpn_vni"] = self.vpn_vni
if self.vbdif_name:
self.proposed["vbdif_name"] = self.vbdif_name
self.proposed["vbdif_mac"] = self.vbdif_mac
self.proposed["vbdif_bind_vpn"] = self.vbdif_bind_vpn
self.proposed[
"arp_distribute_gateway"] = self.arp_distribute_gateway
self.proposed["arp_direct_route"] = self.arp_direct_route
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.config:
return
if is_config_exist(self.config, "dfs-group 1"):
self.existing["dfs_id"] = "1"
self.existing["dfs_source_ip"] = get_dfs_source_ip(self.config)
self.existing["dfs_source_vpn"] = get_dfs_source_vpn(self.config)
self.existing["dfs_udp_port"] = get_dfs_udp_port(self.config)
if is_config_exist(self.config, "active-active-gateway"):
self.existing["dfs_all_active"] = "enable"
self.existing["dfs_peers"] = get_dfs_peers(self.config)
else:
self.existing["dfs_all_active"] = "disable"
if self.vpn_instance:
self.existing["vpn_instance"] = get_ip_vpn(self.config)
self.existing["vpn_vni"] = get_ip_vpn_vni(self.config)
if self.vbdif_name:
self.existing["vbdif_name"] = self.vbdif_name
self.existing["vbdif_mac"] = get_vbdif_mac(self.config)
self.existing["vbdif_bind_vpn"] = get_vbdif_vpn(self.config)
if is_config_exist(self.config, "arp distribute-gateway enable"):
self.existing["arp_distribute_gateway"] = "enable"
else:
self.existing["arp_distribute_gateway"] = "disable"
if is_config_exist(self.config, "arp direct-route enable"):
self.existing["arp_direct_route"] = "enable"
else:
self.existing["arp_direct_route"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
if not config:
return
if is_config_exist(config, "dfs-group 1"):
self.end_state["dfs_id"] = "1"
self.end_state["dfs_source_ip"] = get_dfs_source_ip(config)
self.end_state["dfs_source_vpn"] = get_dfs_source_vpn(config)
self.end_state["dfs_udp_port"] = get_dfs_udp_port(config)
if is_config_exist(config, "active-active-gateway"):
self.end_state["dfs_all_active"] = "enable"
self.end_state["dfs_peers"] = get_dfs_peers(config)
else:
self.end_state["dfs_all_active"] = "disable"
if self.vpn_instance:
self.end_state["vpn_instance"] = get_ip_vpn(config)
self.end_state["vpn_vni"] = get_ip_vpn_vni(config)
if self.vbdif_name:
self.end_state["vbdif_name"] = self.vbdif_name
self.end_state["vbdif_mac"] = get_vbdif_mac(config)
self.end_state["vbdif_bind_vpn"] = get_vbdif_vpn(config)
if is_config_exist(config, "arp distribute-gateway enable"):
self.end_state["arp_distribute_gateway"] = "enable"
else:
self.end_state["arp_distribute_gateway"] = "disable"
if is_config_exist(config, "arp direct-route enable"):
self.end_state["arp_direct_route"] = "enable"
else:
self.end_state["arp_direct_route"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.dfs_id:
self.config_dfs_group()
if self.vpn_instance:
self.config_ip_vpn()
if self.vbdif_name:
self.config_vbdif()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
dfs_id=dict(required=False, type='str'),
dfs_source_ip=dict(required=False, type='str'),
dfs_source_vpn=dict(required=False, type='str'),
dfs_udp_port=dict(required=False, type='str'),
dfs_all_active=dict(required=False, type='str',
choices=['enable', 'disable']),
dfs_peer_ip=dict(required=False, type='str'),
dfs_peer_vpn=dict(required=False, type='str'),
vpn_instance=dict(required=False, type='str'),
vpn_vni=dict(required=False, type='str'),
vbdif_name=dict(required=False, type='str'),
vbdif_mac=dict(required=False, type='str'),
vbdif_bind_vpn=dict(required=False, type='str'),
arp_distribute_gateway=dict(
required=False, type='str', choices=['enable', 'disable']),
arp_direct_route=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanGateway(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
"""
Django admin page for Site Configuration models
"""
from django.contrib import admin
from .models import SiteConfiguration, SiteConfigurationHistory
class SiteConfigurationAdmin(admin.ModelAdmin):
"""
Admin interface for the SiteConfiguration object.
"""
list_display = ('site', 'enabled', 'values')
search_fields = ('site__domain', 'values')
class Meta(object):
"""
Meta class for SiteConfiguration admin model
"""
model = SiteConfiguration
admin.site.register(SiteConfiguration, SiteConfigurationAdmin)
class SiteConfigurationHistoryAdmin(admin.ModelAdmin):
"""
Admin interface for the SiteConfigurationHistory object.
"""
list_display = ('site', 'enabled', 'created', 'modified')
search_fields = ('site__domain', 'values', 'created', 'modified')
ordering = ['-created']
class Meta(object):
"""
Meta class for SiteConfigurationHistory admin model
"""
model = SiteConfigurationHistory
def has_add_permission(self, request):
"""Don't allow adds"""
return False
def has_delete_permission(self, request, obj=None):
"""Don't allow deletes"""
return False
admin.site.register(SiteConfigurationHistory, SiteConfigurationHistoryAdmin)
|
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.platform import test
class ConcatenationTest(trt_test.TfTrtIntegrationTestBase):
"""Testing Concatenation in TF-TRT conversion."""
def GraphFn(self, x):
dtype = x.dtype
# scale
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r1 = x / a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r2 = a / x
a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
r3 = a + x
a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
r4 = x * a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r5 = x - a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r6 = a - x
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r7 = x - a
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r8 = a - x
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r9 = gen_math_ops.maximum(x, a)
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r10 = gen_math_ops.minimum(a, x)
a = constant_op.constant(np.random.randn(3), dtype=dtype)
r11 = x * a
a = constant_op.constant(np.random.randn(1), dtype=dtype)
r12 = a * x
concat1 = array_ops.concat([r1, r2, r3, r4, r5, r6], axis=-1)
concat2 = array_ops.concat([r7, r8, r9, r10, r11, r12], axis=3)
x = array_ops.concat([concat1, concat2], axis=-1)
return gen_array_ops.reshape(x, [2, -1], name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 3, 1]],
[[2, 126]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
if __name__ == "__main__":
test.main()
|
"""
Tests for various parts of L{twisted.web}.
"""
import os
import zlib
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python.compat import _PY3, networkString
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.address import IPv4Address
from twisted.internet.task import Clock
from twisted.web import server, resource
from twisted.web import iweb, http, error
from twisted.web.test.requesthelper import DummyChannel, DummyRequest
if _PY3:
class Data(resource.Resource):
def __init__(self, data, type):
resource.Resource.__init__(self)
self.data = data
self.type = type
def render_GET(self, request):
request.setHeader(b"content-type", self.type)
request.setHeader(b"content-length",
networkString(str(len(self.data))))
return self.data
else:
from twisted.web.static import Data
class ResourceTestCase(unittest.TestCase):
def testListEntities(self):
r = resource.Resource()
self.assertEqual([], r.listEntities())
class SimpleResource(resource.Resource):
"""
@ivar _contentType: C{None} or a C{str} giving the value of the
I{Content-Type} header in the response this resource will render. If it
is C{None}, no I{Content-Type} header will be set in the response.
"""
def __init__(self, contentType=None):
resource.Resource.__init__(self)
self._contentType = contentType
def render(self, request):
if self._contentType is not None:
request.responseHeaders.setRawHeaders(
b"content-type", [self._contentType])
if http.CACHED in (request.setLastModified(10),
request.setETag(b'MatchingTag')):
return b''
else:
return b"correct"
class SiteTest(unittest.TestCase):
def test_simplestSite(self):
"""
L{Site.getResourceFor} returns the C{b""} child of the root resource it
is constructed with when processing a request for I{/}.
"""
sres1 = SimpleResource()
sres2 = SimpleResource()
sres1.putChild(b"",sres2)
site = server.Site(sres1)
self.assertIdentical(
site.getResourceFor(DummyRequest([b''])),
sres2, "Got the wrong resource.")
class SessionTest(unittest.TestCase):
"""
Tests for L{server.Session}.
"""
def setUp(self):
"""
Create a site with one active session using a deterministic, easily
controlled clock.
"""
self.clock = Clock()
self.uid = b'unique'
self.site = server.Site(resource.Resource())
self.session = server.Session(self.site, self.uid, self.clock)
self.site.sessions[self.uid] = self.session
def test_defaultReactor(self):
"""
If not value is passed to L{server.Session.__init__}, the global
reactor is used.
"""
session = server.Session(server.Site(resource.Resource()), b'123')
self.assertIdentical(session._reactor, reactor)
def test_startCheckingExpiration(self):
"""
L{server.Session.startCheckingExpiration} causes the session to expire
after L{server.Session.sessionTimeout} seconds without activity.
"""
self.session.startCheckingExpiration()
# Advance to almost the timeout - nothing should happen.
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# Advance to the timeout, the session should expire.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
# There should be no calls left over, either.
self.assertFalse(self.clock.calls)
def test_expire(self):
"""
L{server.Session.expire} expires the session.
"""
self.session.expire()
# It should be gone from the session dictionary.
self.assertNotIn(self.uid, self.site.sessions)
# And there should be no pending delayed calls.
self.assertFalse(self.clock.calls)
def test_expireWhileChecking(self):
"""
L{server.Session.expire} expires the session even if the timeout call
isn't due yet.
"""
self.session.startCheckingExpiration()
self.test_expire()
def test_notifyOnExpire(self):
"""
A function registered with L{server.Session.notifyOnExpire} is called
when the session expires.
"""
callbackRan = [False]
def expired():
callbackRan[0] = True
self.session.notifyOnExpire(expired)
self.session.expire()
self.assertTrue(callbackRan[0])
def test_touch(self):
"""
L{server.Session.touch} updates L{server.Session.lastModified} and
delays session timeout.
"""
# Make sure it works before startCheckingExpiration
self.clock.advance(3)
self.session.touch()
self.assertEqual(self.session.lastModified, 3)
# And after startCheckingExpiration
self.session.startCheckingExpiration()
self.clock.advance(self.session.sessionTimeout - 1)
self.session.touch()
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# It should have advanced it by just sessionTimeout, no more.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
def httpBody(whole):
return whole.split(b'\r\n\r\n', 1)[1]
def httpHeader(whole, key):
key = key.lower()
headers = whole.split(b'\r\n\r\n', 1)[0]
for header in headers.split(b'\r\n'):
if header.lower().startswith(key):
return header.split(b':', 1)[1].strip()
return None
def httpCode(whole):
l1 = whole.split(b'\r\n', 1)[0]
return int(l1.split()[1])
class ConditionalTest(unittest.TestCase):
"""
web.server's handling of conditional requests for cache validation.
"""
def setUp(self):
self.resrc = SimpleResource()
self.resrc.putChild(b'', self.resrc)
self.resrc.putChild(b'with-content-type', SimpleResource(b'image/jpeg'))
self.site = server.Site(self.resrc)
self.site.startFactory()
self.addCleanup(self.site.stopFactory)
# HELLLLLLLLLLP! This harness is Very Ugly.
self.channel = self.site.buildProtocol(None)
self.transport = http.StringTransport()
self.transport.close = lambda *a, **kw: None
self.transport.disconnecting = lambda *a, **kw: 0
self.transport.getPeer = lambda *a, **kw: "peer"
self.transport.getHost = lambda *a, **kw: "host"
self.channel.makeConnection(self.transport)
def tearDown(self):
self.channel.connectionLost(None)
def _modifiedTest(self, modifiedSince=None, etag=None):
"""
Given the value C{modifiedSince} for the I{If-Modified-Since} header or
the value C{etag} for the I{If-Not-Match} header, verify that a response
with a 200 code, a default Content-Type, and the resource as the body is
returned.
"""
if modifiedSince is not None:
validator = b"If-Modified-Since: " + modifiedSince
else:
validator = b"If-Not-Match: " + etag
for line in [b"GET / HTTP/1.1", validator, b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.OK)
self.assertEqual(httpBody(result), b"correct")
self.assertEqual(httpHeader(result, b"Content-Type"), b"text/html")
def test_modified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time before the last modification of the
requested resource, a 200 response is returned along with a response
body containing the resource.
"""
self._modifiedTest(modifiedSince=http.datetimeToString(1))
def test_unmodified(self):
"""
If a request is made with an I{If-Modified-Since} header value with a
timestamp indicating a time after the last modification of the request
resource, a 304 response is returned along with an empty response body
and no Content-Type header if the application does not set one.
"""
for line in [b"GET / HTTP/1.1",
b"If-Modified-Since: " + http.datetimeToString(100), b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
# Since there SHOULD NOT (RFC 2616, section 10.3.5) be any
# entity-headers, the Content-Type is not set if the application does
# not explicitly set it.
self.assertEqual(httpHeader(result, b"Content-Type"), None)
def test_invalidTimestamp(self):
"""
If a request is made with an I{If-Modified-Since} header value which
cannot be parsed, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest(modifiedSince=b"like, maybe a week ago, I guess?")
def test_invalidTimestampYear(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the year position which is not an integer, the
header is treated as not having been present and a normal 200
response is returned with a response body containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Jan blah 00:00:10 GMT")
def test_invalidTimestampTooLongAgo(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a year before the epoch, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Jan 1899 00:00:10 GMT")
def test_invalidTimestampMonth(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the month position which is not a recognized
month abbreviation, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Blah 1970 00:00:10 GMT")
def test_etagMatchedNot(self):
"""
If a request is made with an I{If-None-Match} ETag which does not match
the current ETag of the requested resource, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest(etag=b"unmatchedTag")
def test_etagMatched(self):
"""
If a request is made with an I{If-None-Match} ETag which does match the
current ETag of the requested resource, a 304 response is returned along
with an empty response body.
"""
for line in [b"GET / HTTP/1.1", b"If-None-Match: MatchingTag", b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpHeader(result, b"ETag"), b"MatchingTag")
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
def test_unmodifiedWithContentType(self):
"""
Similar to L{test_etagMatched}, but the response should include a
I{Content-Type} header if the application explicitly sets one.
This I{Content-Type} header SHOULD NOT be present according to RFC 2616,
section 10.3.5. It will only be present if the application explicitly
sets it.
"""
for line in [b"GET /with-content-type HTTP/1.1",
b"If-None-Match: MatchingTag", b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
self.assertEqual(httpHeader(result, b"Content-Type"), b"image/jpeg")
class RequestTests(unittest.TestCase):
"""
Tests for the HTTP request class, L{server.Request}.
"""
def test_interface(self):
"""
L{server.Request} instances provide L{iweb.IRequest}.
"""
self.assertTrue(
verifyObject(iweb.IRequest, server.Request(DummyChannel(), True)))
def testChildLink(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.childLink(b'baz'), b'bar/baz')
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar/', b'HTTP/1.0')
self.assertEqual(request.childLink(b'baz'), b'baz')
def testPrePathURLSimple(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
request.setHost(b'example.com', 80)
self.assertEqual(request.prePathURL(), b'http://example.com/foo/bar')
def testPrePathURLNonDefault(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com:81/foo/bar')
def testPrePathURLSSLPort(self):
d = DummyChannel()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost(b'example.com', 443)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com:443/foo/bar')
def testPrePathURLSSLPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost(b'example.com', 443)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com/foo/bar')
def testPrePathURLHTTPPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 80
request = server.Request(d, 1)
request.setHost(b'example.com', 80)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com:80/foo/bar')
def testPrePathURLSSLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com:81/foo/bar')
def testPrePathURLSetSSLHost(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'foo.com', 81, 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://foo.com:81/foo/bar')
def test_prePathURLQuoting(self):
"""
L{Request.prePathURL} quotes special characters in the URL segments to
preserve the original meaning.
"""
d = DummyChannel()
request = server.Request(d, 1)
request.setHost(b'example.com', 80)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo%2Fbar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com/foo%2Fbar')
class GzipEncoderTests(unittest.TestCase):
if _PY3:
skip = "GzipEncoder not ported to Python 3 yet."
def setUp(self):
self.channel = DummyChannel()
staticResource = Data(b"Some data", b"text/plain")
wrapped = resource.EncodingResourceWrapper(
staticResource, [server.GzipEncoderFactory()])
self.channel.site.resource.putChild(b"foo", wrapped)
def test_interfaces(self):
"""
L{server.GzipEncoderFactory} implements the
L{iweb._IRequestEncoderFactory} and its C{encoderForRequest} returns an
instance of L{server._GzipEncoder} which implements
L{iweb._IRequestEncoder}.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"gzip,deflate"])
factory = server.GzipEncoderFactory()
self.assertTrue(verifyObject(iweb._IRequestEncoderFactory, factory))
encoder = factory.encoderForRequest(request)
self.assertTrue(verifyObject(iweb._IRequestEncoder, encoder))
def test_encoding(self):
"""
If the client request passes a I{Accept-Encoding} header which mentions
gzip, L{server._GzipEncoder} automatically compresses the data.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"gzip,deflate"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_nonEncoding(self):
"""
L{server.GzipEncoderFactory} doesn't return a L{server._GzipEncoder} if
the I{Accept-Encoding} header doesn't mention gzip support.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"foo,bar"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertIn(b"Content-Length", data)
self.assertNotIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data", body)
def test_multipleAccept(self):
"""
If there are multiple I{Accept-Encoding} header,
L{server.GzipEncoderFactory} reads them properly to detect if gzip is
supported.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_alreadyEncoded(self):
"""
If the content is already encoded and the I{Content-Encoding} header is
set, L{server.GzipEncoderFactory} properly appends gzip to it.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.responseHeaders.setRawHeaders(b"Content-Encoding",
[b"deflate"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: deflate,gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_multipleEncodingLines(self):
"""
If there are several I{Content-Encoding} headers,
L{server.GzipEncoderFactory} normalizes it and appends gzip to the
field value.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.responseHeaders.setRawHeaders(b"Content-Encoding",
[b"foo", b"bar"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: foo,bar,gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
class RootResource(resource.Resource):
isLeaf=0
def getChildWithDefault(self, name, request):
request.rememberRootURL()
return resource.Resource.getChildWithDefault(self, name, request)
def render(self, request):
return ''
class RememberURLTest(unittest.TestCase):
def createServer(self, r):
chan = DummyChannel()
chan.site = server.Site(r)
return chan
def testSimple(self):
r = resource.Resource()
r.isLeaf=0
rr = RootResource()
r.putChild(b'foo', rr)
rr.putChild(b'', rr)
rr.putChild(b'bar', resource.Resource())
chan = self.createServer(r)
for url in [b'/foo/', b'/foo/bar', b'/foo/bar/baz', b'/foo/bar/']:
request = server.Request(chan, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', url, b'HTTP/1.0')
self.assertEqual(request.getRootURL(), b"http://example.com/foo")
def testRoot(self):
rr = RootResource()
rr.putChild(b'', rr)
rr.putChild(b'bar', resource.Resource())
chan = self.createServer(rr)
for url in [b'/', b'/bar', b'/bar/baz', b'/bar/']:
request = server.Request(chan, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', url, b'HTTP/1.0')
self.assertEqual(request.getRootURL(), b"http://example.com/")
class NewRenderResource(resource.Resource):
def render_GET(self, request):
return b"hi hi"
def render_HEH(self, request):
return b"ho ho"
@implementer(resource.IResource)
class HeadlessResource(object):
"""
A resource that implements GET but not HEAD.
"""
allowedMethods = [b"GET"]
def render(self, request):
"""
Leave the request open for future writes.
"""
self.request = request
if request.method not in self.allowedMethods:
raise error.UnsupportedMethod(self.allowedMethods)
self.request.write(b"some data")
return server.NOT_DONE_YET
class NewRenderTestCase(unittest.TestCase):
"""
Tests for L{server.Request.render}.
"""
def _getReq(self, resource=None):
"""
Create a request object with a stub channel and install the
passed resource at /newrender. If no resource is passed,
create one.
"""
d = DummyChannel()
if resource is None:
resource = NewRenderResource()
d.site.resource.putChild(b'newrender', resource)
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
return request
def testGoodMethods(self):
req = self._getReq()
req.requestReceived(b'GET', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.transport.getvalue().splitlines()[-1], b'hi hi')
req = self._getReq()
req.requestReceived(b'HEH', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.transport.getvalue().splitlines()[-1], b'ho ho')
def testBadMethods(self):
req = self._getReq()
req.requestReceived(b'CONNECT', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 501)
req = self._getReq()
req.requestReceived(b'hlalauguG', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 501)
def testImplicitHead(self):
req = self._getReq()
req.requestReceived(b'HEAD', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 200)
self.assertEqual(-1, req.transport.getvalue().find(b'hi hi'))
def test_unsupportedHead(self):
"""
HEAD requests against resource that only claim support for GET
should not include a body in the response.
"""
resource = HeadlessResource()
req = self._getReq(resource)
req.requestReceived(b"HEAD", b"/newrender", b"HTTP/1.0")
headers, body = req.transport.getvalue().split(b'\r\n\r\n')
self.assertEqual(req.code, 200)
self.assertEqual(body, b'')
class GettableResource(resource.Resource):
"""
Used by AllowedMethodsTest to simulate an allowed method.
"""
def render_GET(self):
pass
def render_fred_render_ethel(self):
"""
The unusual method name is designed to test the culling method
in C{twisted.web.resource._computeAllowedMethods}.
"""
pass
class AllowedMethodsTest(unittest.TestCase):
"""
'C{twisted.web.resource._computeAllowedMethods} is provided by a
default should the subclass not provide the method.
"""
if _PY3:
skip = "Allowed methods functionality not ported to Python 3."
def _getReq(self):
"""
Generate a dummy request for use by C{_computeAllowedMethod} tests.
"""
d = DummyChannel()
d.site.resource.putChild(b'gettableresource', GettableResource())
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
return request
def test_computeAllowedMethods(self):
"""
C{_computeAllowedMethods} will search through the
'gettableresource' for all attributes/methods of the form
'render_{method}' ('render_GET', for example) and return a list of
the methods. 'HEAD' will always be included from the
resource.Resource superclass.
"""
res = GettableResource()
allowedMethods = resource._computeAllowedMethods(res)
self.assertEqual(set(allowedMethods),
set([b'GET', b'HEAD', b'fred_render_ethel']))
def test_notAllowed(self):
"""
When an unsupported method is requested, the default
L{_computeAllowedMethods} method will be called to determine the
allowed methods, and the HTTP 405 'Method Not Allowed' status will
be returned with the allowed methods will be returned in the
'Allow' header.
"""
req = self._getReq()
req.requestReceived(b'POST', b'/gettableresource', b'HTTP/1.0')
self.assertEqual(req.code, 405)
self.assertEqual(
set(req.responseHeaders.getRawHeaders(b'allow')[0].split(b", ")),
set([b'GET', b'HEAD', b'fred_render_ethel'])
)
def test_notAllowedQuoting(self):
"""
When an unsupported method response is generated, an HTML message will
be displayed. That message should include a quoted form of the URI and,
since that value come from a browser and shouldn't necessarily be
trusted.
"""
req = self._getReq()
req.requestReceived(b'POST', b'/gettableresource?'
b'value=<script>bad', b'HTTP/1.0')
self.assertEqual(req.code, 405)
renderedPage = req.transport.getvalue()
self.assertNotIn(b"<script>bad", renderedPage)
self.assertIn(b'<script>bad', renderedPage)
def test_notImplementedQuoting(self):
"""
When an not-implemented method response is generated, an HTML message
will be displayed. That message should include a quoted form of the
requested method, since that value come from a browser and shouldn't
necessarily be trusted.
"""
req = self._getReq()
req.requestReceived(b'<style>bad', b'/gettableresource', b'HTTP/1.0')
self.assertEqual(req.code, 501)
renderedPage = req.transport.getvalue()
self.assertNotIn(b"<style>bad", renderedPage)
self.assertIn(b'<style>bad', renderedPage)
class DummyRequestForLogTest(DummyRequest):
uri = b'/dummy' # parent class uri has "http://", which doesn't really happen
code = 123
clientproto = b'HTTP/1.0'
sentLength = None
client = IPv4Address('TCP', '1.2.3.4', 12345)
class AccessLogTestsMixin(object):
"""
A mixin for L{TestCase} subclasses defining tests that apply to
L{HTTPFactory} and its subclasses.
"""
def factory(self, *args, **kwargs):
"""
Get the factory class to apply logging tests to.
Subclasses must override this method.
"""
raise NotImplementedError("Subclass failed to override factory")
def test_combinedLogFormat(self):
"""
The factory's C{log} method writes a I{combined log format} line to the
factory's log file.
"""
reactor = Clock()
# Set the clock to an arbitrary point in time. It doesn't matter when
# as long as it corresponds to the timestamp in the string literal in
# the assertion below.
reactor.advance(1234567890)
logPath = self.mktemp()
factory = self.factory(logPath=logPath)
factory._reactor = reactor
factory.startFactory()
try:
factory.log(DummyRequestForLogTest(factory))
finally:
factory.stopFactory()
self.assertEqual(
# Client IP
b'"1.2.3.4" '
# Some blanks we never fill in
b'- - '
# The current time (circa 1234567890)
b'[13/Feb/2009:23:31:30 +0000] '
# Method, URI, version
b'"GET /dummy HTTP/1.0" '
# Response code
b'123 '
# Response length
b'- '
# Value of the "Referer" header. Probably incorrectly quoted.
b'"-" '
# Value pf the "User-Agent" header. Probably incorrectly quoted.
b'"-"' + self.linesep,
FilePath(logPath).getContent())
def test_logFormatOverride(self):
"""
If the factory is initialized with a custom log formatter then that
formatter is used to generate lines for the log file.
"""
def notVeryGoodFormatter(timestamp, request):
return u"this is a bad log format"
reactor = Clock()
reactor.advance(1234567890)
logPath = self.mktemp()
factory = self.factory(
logPath=logPath, logFormatter=notVeryGoodFormatter)
factory._reactor = reactor
factory.startFactory()
try:
factory.log(DummyRequestForLogTest(factory))
finally:
factory.stopFactory()
self.assertEqual(
# self.linesep is a sad thing.
# https://twistedmatrix.com/trac/ticket/6938
b"this is a bad log format" + self.linesep,
FilePath(logPath).getContent())
class HTTPFactoryAccessLogTests(AccessLogTestsMixin, unittest.TestCase):
"""
Tests for L{http.HTTPFactory.log}.
"""
factory = http.HTTPFactory
linesep = b"\n"
class SiteAccessLogTests(AccessLogTestsMixin, unittest.TestCase):
"""
Tests for L{server.Site.log}.
"""
if _PY3:
skip = "Site not ported to Python 3 yet."
linesep = os.linesep
def factory(self, *args, **kwargs):
return server.Site(resource.Resource(), *args, **kwargs)
class CombinedLogFormatterTests(unittest.TestCase):
"""
Tests for L{twisted.web.http.combinedLogFormatter}.
"""
def test_interface(self):
"""
L{combinedLogFormatter} provides L{IAccessLogFormatter}.
"""
self.assertTrue(verifyObject(
iweb.IAccessLogFormatter, http.combinedLogFormatter))
def test_nonASCII(self):
"""
Bytes in fields of the request which are not part of ASCII are escaped
in the result.
"""
reactor = Clock()
reactor.advance(1234567890)
timestamp = http.datetimeToLogString(reactor.seconds())
request = DummyRequestForLogTest(http.HTTPFactory())
request.client = IPv4Address("TCP", b"evil x-forwarded-for \x80", 12345)
request.method = b"POS\x81"
request.protocol = b"HTTP/1.\x82"
request.headers[b"referer"] = b"evil \x83"
request.headers[b"user-agent"] = b"evil \x84"
line = http.combinedLogFormatter(timestamp, request)
self.assertEqual(
u'"evil x-forwarded-for \\x80" - - [13/Feb/2009:23:31:30 +0000] '
u'"POS\\x81 /dummy HTTP/1.0" 123 - "evil \\x83" "evil \\x84"',
line)
class ProxiedLogFormatterTests(unittest.TestCase):
"""
Tests for L{twisted.web.http.proxiedLogFormatter}.
"""
def test_interface(self):
"""
L{proxiedLogFormatter} provides L{IAccessLogFormatter}.
"""
self.assertTrue(verifyObject(
iweb.IAccessLogFormatter, http.proxiedLogFormatter))
def _xforwardedforTest(self, header):
"""
Assert that a request with the given value in its I{X-Forwarded-For}
header is logged by L{proxiedLogFormatter} the same way it would have
been logged by L{combinedLogFormatter} but with 172.16.1.2 as the
client address instead of the normal value.
@param header: An I{X-Forwarded-For} header with left-most address of
172.16.1.2.
"""
reactor = Clock()
reactor.advance(1234567890)
timestamp = http.datetimeToLogString(reactor.seconds())
request = DummyRequestForLogTest(http.HTTPFactory())
expected = http.combinedLogFormatter(timestamp, request).replace(
u"1.2.3.4", u"172.16.1.2")
request.requestHeaders.setRawHeaders(b"x-forwarded-for", [header])
line = http.proxiedLogFormatter(timestamp, request)
self.assertEqual(expected, line)
def test_xforwardedfor(self):
"""
L{proxiedLogFormatter} logs the value of the I{X-Forwarded-For} header
in place of the client address field.
"""
self._xforwardedforTest(b"172.16.1.2, 10.0.0.3, 192.168.1.4")
def test_extraForwardedSpaces(self):
"""
Any extra spaces around the address in the I{X-Forwarded-For} header
are stripped and not included in the log string.
"""
self._xforwardedforTest(b" 172.16.1.2 , 10.0.0.3, 192.168.1.4")
class TestLogEscaping(unittest.TestCase):
def setUp(self):
self.logPath = self.mktemp()
self.site = http.HTTPFactory(self.logPath)
self.site.startFactory()
self.request = DummyRequestForLogTest(self.site, False)
def assertLogs(self, line):
"""
Assert that if C{self.request} is logged using C{self.site} then
C{line} is written to the site's access log file.
@param line: The expected line.
@type line: L{bytes}
@raise self.failureException: If the log file contains something other
than the expected line.
"""
try:
self.site.log(self.request)
finally:
self.site.stopFactory()
logged = FilePath(self.logPath).getContent()
self.assertEqual(line, logged)
def test_simple(self):
"""
A I{GET} request is logged with no extra escapes.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - "-" "-"\n')
def test_methodQuote(self):
"""
If the HTTP request method includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.method = b'G"T'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n')
def test_requestQuote(self):
"""
If the HTTP request path includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.uri = b'/dummy"withquote'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n')
def test_protoQuote(self):
"""
If the HTTP request version includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.clientproto = b'HT"P/1.0'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HT\\"P/1.0" 123 - "-" "-"\n')
def test_refererQuote(self):
"""
If the value of the I{Referer} header contains a quote, the quote is
escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers[b'referer'] = (
b'http://malicious" ".website.invalid')
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - '
b'"http://malicious\\" \\".website.invalid" "-"\n')
def test_userAgentQuote(self):
"""
If the value of the I{User-Agent} header contains a quote, the quote is
escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers[b'user-agent'] = b'Malicious Web" Evil'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n')
class ServerAttributesTestCase(unittest.TestCase):
"""
Tests that deprecated twisted.web.server attributes raise the appropriate
deprecation warnings when used.
"""
def test_deprecatedAttributeDateTimeString(self):
"""
twisted.web.server.date_time_string should not be used; instead use
twisted.web.http.datetimeToString directly
"""
server.date_time_string
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedAttributeDateTimeString])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
("twisted.web.server.date_time_string was deprecated in Twisted "
"12.1.0: Please use twisted.web.http.datetimeToString instead"))
def test_deprecatedAttributeStringDateTime(self):
"""
twisted.web.server.string_date_time should not be used; instead use
twisted.web.http.stringToDatetime directly
"""
server.string_date_time
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedAttributeStringDateTime])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
("twisted.web.server.string_date_time was deprecated in Twisted "
"12.1.0: Please use twisted.web.http.stringToDatetime instead"))
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 861 $"
import numpy
import logfileparser
import utils
class ADF(logfileparser.Logfile):
"""An ADF log file"""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(ADF, self).__init__(logname="ADF", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "ADF log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'ADF("%s")' % (self.filename)
def normalisesym(self, label):
"""Use standard symmetry labels instead of ADF labels.
To normalise:
(1) any periods are removed (except in the case of greek letters)
(2) XXX is replaced by X, and a " added.
(3) XX is replaced by X, and a ' added.
(4) The greek letters Sigma, Pi, Delta and Phi are replaced by
their lowercase equivalent.
>>> sym = ADF("dummyfile").normalisesym
>>> labels = ['A','s','A1','A1.g','Sigma','Pi','Delta','Phi','Sigma.g','A.g','AA','AAA','EE1','EEE1']
>>> map(sym,labels)
['A', 's', 'A1', 'A1g', 'sigma', 'pi', 'delta', 'phi', 'sigma.g', 'Ag', "A'", 'A"', "E1'", 'E1"']
"""
greeks = ['Sigma', 'Pi', 'Delta', 'Phi']
for greek in greeks:
if label.startswith(greek):
return label.lower()
ans = label.replace(".", "")
if ans[1:3] == "''":
temp = ans[0] + '"'
ans = temp
l = len(ans)
if l > 1 and ans[0] == ans[1]: # Python only tests the second condition if the first is true
if l > 2 and ans[1] == ans[2]:
ans = ans.replace(ans[0]*3, ans[0]) + '"'
else:
ans = ans.replace(ans[0]*2, ans[0]) + "'"
return ans
def normalisedegenerates(self, label, num, ndict=None):
"""Generate a string used for matching degenerate orbital labels
To normalise:
(1) if label is E or T, return label:num
(2) if label is P or D, look up in dict, and return answer
"""
if not ndict:
ndict = { 'P': {0:"P:x", 1:"P:y", 2:"P:z"},\
'D': {0:"D:z2", 1:"D:x2-y2", 2:"D:xy", 3:"D:xz", 4:"D:yz"}}
if ndict.has_key(label):
if ndict[label].has_key(num):
return ndict[label][num]
else:
return "%s:%i"%(label,num+1)
else:
return "%s:%i"%(label,num+1)
def before_parsing(self):
# Used to avoid extracting the final geometry twice in a GeoOpt
self.NOTFOUND, self.GETLAST, self.NOMORE = range(3)
self.finalgeometry = self.NOTFOUND
# Used for calculating the scftarget (variables names taken from the ADF manual)
self.accint = self.SCFconv = self.sconv2 = None
# keep track of nosym and unrestricted case to parse Energies since it doens't have an all Irreps section
self.nosymflag = False
self.unrestrictedflag = False
SCFCNV, SCFCNV2 = range(2) #used to index self.scftargets[]
maxelem, norm = range(2) # used to index scf.values
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if line.find("INPUT FILE") >= 0:
#check to make sure we aren't parsing Create jobs
while line:
self.updateprogress(inputfile, "Unsupported Information", self.fupdate)
if line.find("INPUT FILE") >=0 and hasattr(self,"scftargets"):
#does this file contain multiple calculations?
#if so, print a warning and skip to end of file
self.logger.warning("Skipping remaining calculations")
inputfile.seek(0,2)
break
if line.find("INPUT FILE") >= 0:
line2 = inputfile.next()
else:
line2 = None
if line2 and len(line2) <= 2:
#make sure that it's not blank like in the NiCO4 regression
line2 = inputfile.next()
if line2 and (line2.find("Create") < 0 and line2.find("create") < 0):
break
line = inputfile.next()
if line[1:10] == "Symmetry:":
info = line.split()
if info[1] == "NOSYM":
self.nosymflag = True
# Use this to read the subspecies of irreducible representations.
# It will be a list, with each element representing one irrep.
if line.strip() == "Irreducible Representations, including subspecies":
dashes = inputfile.next()
self.irreps = []
line = inputfile.next()
while line.strip() != "":
self.irreps.append(line.split())
line = inputfile.next()
if line[4:13] == 'Molecule:':
info = line.split()
if info[1] == 'UNrestricted':
self.unrestrictedflag = True
if line[1:6] == "ATOMS":
# Find the number of atoms and their atomic numbers
# Also extract the starting coordinates (for a GeoOpt anyway)
self.updateprogress(inputfile, "Attributes", self.cupdate)
self.atomnos = []
self.atomcoords = []
self.coreelectrons = []
underline = inputfile.next() #clear pointless lines
label1 = inputfile.next() #
label2 = inputfile.next() #
line = inputfile.next()
atomcoords = []
while len(line)>2: #ensure that we are reading no blank lines
info = line.split()
element = info[1].split('.')[0]
self.atomnos.append(self.table.number[element])
atomcoords.append(map(float, info[2:5]))
self.coreelectrons.append(int(float(info[5]) - float(info[6])))
line = inputfile.next()
self.atomcoords.append(atomcoords)
self.natom = len(self.atomnos)
self.atomnos = numpy.array(self.atomnos, "i")
if line[1:10] == "FRAGMENTS":
header = inputfile.next()
self.frags = []
self.fragnames = []
line = inputfile.next()
while len(line) > 2: #ensure that we are reading no blank lines
info = line.split()
if len(info) == 7: #fragment name is listed here
self.fragnames.append("%s_%s"%(info[1],info[0]))
self.frags.append([])
self.frags[-1].append(int(info[2]) - 1)
elif len(info) == 5: #add atoms into last fragment
self.frags[-1].append(int(info[0]) - 1)
line = inputfile.next()
# Extract charge
if line[1:11] == "Net Charge":
self.charge = int(line.split()[2])
line = inputfile.next()
if len(line.strip()):
# Spin polar: 1 (Spin_A minus Spin_B electrons)
self.mult = int(line.split()[2]) + 1
# (Not sure about this for higher multiplicities)
else:
self.mult = 1
if line[1:22] == "S C F U P D A T E S":
# find targets for SCF convergence
if not hasattr(self,"scftargets"):
self.scftargets = []
#underline, blank, nr
for i in range(3):
inputfile.next()
line = inputfile.next()
self.SCFconv = float(line.split()[-1])
line = inputfile.next()
self.sconv2 = float(line.split()[-1])
if line[1:11] == "CYCLE 1":
self.updateprogress(inputfile, "QM convergence", self.fupdate)
newlist = []
line = inputfile.next()
if not hasattr(self,"geovalues"):
# This is the first SCF cycle
self.scftargets.append([self.sconv2*10, self.sconv2])
elif self.finalgeometry in [self.GETLAST, self.NOMORE]:
# This is the final SCF cycle
self.scftargets.append([self.SCFconv*10, self.SCFconv])
else:
# This is an intermediate SCF cycle
oldscftst = self.scftargets[-1][1]
grdmax = self.geovalues[-1][1]
scftst = max(self.SCFconv, min(oldscftst, grdmax/30, 10**(-self.accint)))
self.scftargets.append([scftst*10, scftst])
while line.find("SCF CONVERGED") == -1 and line.find("SCF not fully converged, result acceptable") == -1 and line.find("SCF NOT CONVERGED") == -1:
if line[4:12] == "SCF test":
if not hasattr(self, "scfvalues"):
self.scfvalues = []
info = line.split()
newlist.append([float(info[4]), abs(float(info[6]))])
try:
line = inputfile.next()
except StopIteration: #EOF reached?
self.logger.warning("SCF did not converge, so attributes may be missing")
break
if line.find("SCF not fully converged, result acceptable") > 0:
self.logger.warning("SCF not fully converged, results acceptable")
if line.find("SCF NOT CONVERGED") > 0:
self.logger.warning("SCF did not converge! moenergies and mocoeffs are unreliable")
if hasattr(self, "scfvalues"):
self.scfvalues.append(newlist)
# Parse SCF energy for SP calcs from bonding energy decomposition section.
# It seems ADF does not print it earlier for SP calcualtions.
# If it does (does it?), parse that instead.
# Check that scfenergies does not exist, becuase gopt runs also print this,
# repeating the values in the last "Geometry Convergence Tests" section.
if "Total Bonding Energy:" in line:
if not hasattr(self, "scfenergies"):
energy = utils.convertor(float(line.split()[3]), "hartree", "eV")
self.scfenergies = [energy]
if line[51:65] == "Final Geometry":
self.finalgeometry = self.GETLAST
if line[1:24] == "Coordinates (Cartesian)" and self.finalgeometry in [self.NOTFOUND, self.GETLAST]:
# Get the coordinates from each step of the GeoOpt
if not hasattr(self, "atomcoords"):
self.atomcoords = []
equals = inputfile.next()
blank = inputfile.next()
title = inputfile.next()
title = inputfile.next()
hyphens = inputfile.next()
atomcoords = []
line = inputfile.next()
while line != hyphens:
atomcoords.append(map(float, line.split()[5:8]))
line = inputfile.next()
self.atomcoords.append(atomcoords)
if self.finalgeometry == self.GETLAST: # Don't get any more coordinates
self.finalgeometry = self.NOMORE
if line[1:27] == 'Geometry Convergence Tests':
# Extract Geometry convergence information
if not hasattr(self, "geotargets"):
self.geovalues = []
self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
if not hasattr(self, "scfenergies"):
self.scfenergies = []
equals = inputfile.next()
blank = inputfile.next()
line = inputfile.next()
temp = inputfile.next().strip().split()
self.scfenergies.append(utils.convertor(float(temp[-1]), "hartree", "eV"))
for i in range(6):
line = inputfile.next()
values = []
for i in range(5):
temp = inputfile.next().split()
self.geotargets[i] = float(temp[-3])
values.append(float(temp[-4]))
self.geovalues.append(values)
if line[1:27] == 'General Accuracy Parameter':
# Need to know the accuracy of the integration grid to
# calculate the scftarget...note that it changes with time
self.accint = float(line.split()[-1])
if line.find('Orbital Energies, per Irrep and Spin') > 0 and not hasattr(self, "mosyms") and self.nosymflag and not self.unrestrictedflag:
#Extracting orbital symmetries and energies, homos for nosym case
#Should only be for restricted case because there is a better text block for unrestricted and nosym
self.mosyms = [[]]
self.moenergies = [[]]
underline = inputfile.next()
header = inputfile.next()
underline = inputfile.next()
label = inputfile.next()
line = inputfile.next()
info = line.split()
if not info[0] == '1':
self.logger.warning("MO info up to #%s is missing" % info[0])
#handle case where MO information up to a certain orbital are missing
while int(info[0]) - 1 != len(self.moenergies[0]):
self.moenergies[0].append(99999)
self.mosyms[0].append('A')
homoA = None
while len(line) > 10:
info = line.split()
self.mosyms[0].append('A')
self.moenergies[0].append(utils.convertor(float(info[2]), 'hartree', 'eV'))
if info[1] == '0.000' and not hasattr(self, 'homos'):
self.homos = [len(self.moenergies[0]) - 2]
line = inputfile.next()
self.moenergies = [numpy.array(self.moenergies[0], "d")]
self.homos = numpy.array(self.homos, "i")
if line[1:29] == 'Orbital Energies, both Spins' and not hasattr(self, "mosyms") and self.nosymflag and self.unrestrictedflag:
#Extracting orbital symmetries and energies, homos for nosym case
#should only be here if unrestricted and nosym
self.mosyms = [[], []]
moenergies = [[], []]
underline = inputfile.next()
blank = inputfile.next()
header = inputfile.next()
underline = inputfile.next()
line = inputfile.next()
homoa = 0
homob = None
while len(line) > 5:
info = line.split()
if info[2] == 'A':
self.mosyms[0].append('A')
moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
if info[3] != '0.00':
homoa = len(moenergies[0]) - 1
elif info[2] == 'B':
self.mosyms[1].append('A')
moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
if info[3] != '0.00':
homob = len(moenergies[1]) - 1
else:
print "Error reading line: %s" % line
line = inputfile.next()
self.moenergies = [numpy.array(x, "d") for x in moenergies]
self.homos = numpy.array([homoa, homob], "i")
if line[1:29] == 'Orbital Energies, all Irreps' and not hasattr(self, "mosyms"):
#Extracting orbital symmetries and energies, homos
self.mosyms = [[]]
self.symlist = {}
self.moenergies = [[]]
underline = inputfile.next()
blank = inputfile.next()
header = inputfile.next()
underline2 = inputfile.next()
line = inputfile.next()
homoa = None
homob = None
#multiple = {'E':2, 'T':3, 'P':3, 'D':5}
# The above is set if there are no special irreps
names = [irrep[0].split(':')[0] for irrep in self.irreps]
counts = [len(irrep) for irrep in self.irreps]
multiple = dict(zip(names, counts))
irrepspecies = {}
for n in range(len(names)):
indices = range(counts[n])
subspecies = self.irreps[n]
irrepspecies[names[n]] = dict(zip(indices, subspecies))
while line.strip():
info = line.split()
if len(info) == 5: #this is restricted
#count = multiple.get(info[0][0],1)
count = multiple.get(info[0],1)
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[0].append(self.normalisesym(info[0]))
self.moenergies[0].append(utils.convertor(float(info[3]), 'hartree', 'eV'))
sym = info[0]
if count > 1: # add additional sym label
sym = self.normalisedegenerates(info[0],repeat,ndict=irrepspecies)
try:
self.symlist[sym][0].append(len(self.moenergies[0])-1)
except KeyError:
self.symlist[sym]=[[]]
self.symlist[sym][0].append(len(self.moenergies[0])-1)
if info[2] == '0.00' and not hasattr(self, 'homos'):
self.homos = [len(self.moenergies[0]) - (count + 1)] #count, because need to handle degenerate cases
line = inputfile.next()
elif len(info) == 6: #this is unrestricted
if len(self.moenergies) < 2: #if we don't have space, create it
self.moenergies.append([])
self.mosyms.append([])
count = multiple.get(info[0], 1)
if info[2] == 'A':
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[0].append(self.normalisesym(info[0]))
self.moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
sym = info[0]
if count > 1: #add additional sym label
sym = self.normalisedegenerates(info[0],repeat)
try:
self.symlist[sym][0].append(len(self.moenergies[0])-1)
except KeyError:
self.symlist[sym]=[[],[]]
self.symlist[sym][0].append(len(self.moenergies[0])-1)
if info[3] == '0.00' and homoa == None:
homoa = len(self.moenergies[0]) - (count + 1) #count because degenerate cases need to be handled
if info[2] == 'B':
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[1].append(self.normalisesym(info[0]))
self.moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
sym = info[0]
if count > 1: #add additional sym label
sym = self.normalisedegenerates(info[0],repeat)
try:
self.symlist[sym][1].append(len(self.moenergies[1])-1)
except KeyError:
self.symlist[sym]=[[],[]]
self.symlist[sym][1].append(len(self.moenergies[1])-1)
if info[3] == '0.00' and homob == None:
homob = len(self.moenergies[1]) - (count + 1)
line = inputfile.next()
else: #different number of lines
print "Error", info
if len(info) == 6: #still unrestricted, despite being out of loop
self.homos = [homoa, homob]
self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
self.homos = numpy.array(self.homos, "i")
if line[1:28] == "Vibrations and Normal Modes":
# Section on extracting vibdisps
# Also contains vibfreqs, but these are extracted in the
# following section (see below)
self.vibdisps = []
equals = inputfile.next()
blank = inputfile.next()
header = inputfile.next()
header = inputfile.next()
blank = inputfile.next()
blank = inputfile.next()
freqs = inputfile.next()
while freqs.strip()!="":
minus = inputfile.next()
p = [ [], [], [] ]
for i in range(len(self.atomnos)):
broken = map(float, inputfile.next().split()[1:])
for j in range(0, len(broken), 3):
p[j/3].append(broken[j:j+3])
self.vibdisps.extend(p[:(len(broken)/3)])
blank = inputfile.next()
blank = inputfile.next()
freqs = inputfile.next()
self.vibdisps = numpy.array(self.vibdisps, "d")
if line[1:24] == "List of All Frequencies":
# Start of the IR/Raman frequency section
self.updateprogress(inputfile, "Frequency information", self.fupdate)
# self.vibsyms = [] # Need to look into this a bit more
self.vibirs = []
self.vibfreqs = []
for i in range(8):
line = inputfile.next()
line = inputfile.next().strip()
while line:
temp = line.split()
self.vibfreqs.append(float(temp[0]))
self.vibirs.append(float(temp[2])) # or is it temp[1]?
line = inputfile.next().strip()
self.vibfreqs = numpy.array(self.vibfreqs, "d")
self.vibirs = numpy.array(self.vibirs, "d")
if hasattr(self, "vibramans"):
self.vibramans = numpy.array(self.vibramans, "d")
#******************************************************************************************************************8
#delete this after new implementation using smat, eigvec print,eprint?
if line[1:49] == "Total nr. of (C)SFOs (summation over all irreps)":
# Extract the number of basis sets
self.nbasis = int(line.split(":")[1].split()[0])
# now that we're here, let's extract aonames
self.fonames = []
self.start_indeces = {}
blank = inputfile.next()
note = inputfile.next()
symoffset = 0
blank = inputfile.next()
blank = inputfile.next()
if len(blank) > 2: #fix for ADF2006.01 as it has another note
blank = inputfile.next()
blank = inputfile.next()
blank = inputfile.next()
self.nosymreps = []
while len(self.fonames) < self.nbasis:
symline = inputfile.next()
sym = symline.split()[1]
line = inputfile.next()
num = int(line.split(':')[1].split()[0])
self.nosymreps.append(num)
#read until line "--------..." is found
while line.find('-----') < 0:
line = inputfile.next()
line = inputfile.next() # the start of the first SFO
while len(self.fonames) < symoffset + num:
info = line.split()
#index0 index1 occ2 energy3/4 fragname5 coeff6 orbnum7 orbname8 fragname9
if not sym in self.start_indeces.keys():
#have we already set the start index for this symmetry?
self.start_indeces[sym] = int(info[1])
orbname = info[8]
orbital = info[7] + orbname.replace(":", "")
fragname = info[5]
frag = fragname + info[9]
coeff = float(info[6])
line = inputfile.next()
while line.strip() and not line[:7].strip(): # while it's the same SFO
# i.e. while not completely blank, but blank at the start
info = line[43:].split()
if len(info)>0: # len(info)==0 for the second line of dvb_ir.adfout
frag += "+" + fragname + info[-1]
coeff = float(info[-4])
if coeff < 0:
orbital += '-' + info[-3] + info[-2].replace(":", "")
else:
orbital += '+' + info[-3] + info[-2].replace(":", "")
line = inputfile.next()
# At this point, we are either at the start of the next SFO or at
# a blank line...the end
self.fonames.append("%s_%s" % (frag, orbital))
symoffset += num
# blankline blankline
inputfile.next(); inputfile.next()
if line[1:32] == "S F O P O P U L A T I O N S ,":
#Extract overlap matrix
self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
symoffset = 0
for nosymrep in self.nosymreps:
line = inputfile.next()
while line.find('===') < 10: #look for the symmetry labels
line = inputfile.next()
#blank blank text blank col row
for i in range(6):
inputfile.next()
base = 0
while base < nosymrep: #have we read all the columns?
for i in range(nosymrep - base):
self.updateprogress(inputfile, "Overlap", self.fupdate)
line = inputfile.next()
parts = line.split()[1:]
for j in range(len(parts)):
k = float(parts[j])
self.fooverlaps[base + symoffset + j, base + symoffset +i] = k
self.fooverlaps[base + symoffset + i, base + symoffset + j] = k
#blank, blank, column
for i in range(3):
inputfile.next()
base += 4
symoffset += nosymrep
base = 0
if line[48:67] == "SFO MO coefficients":
self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d")]
spin = 0
symoffset = 0
lastrow = 0
# Section ends with "1" at beggining of a line.
while line[0] != "1":
line = inputfile.next()
# If spin is specified, then there will be two coefficient matrices.
if line.strip() == "***** SPIN 1 *****":
self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d"),
numpy.zeros((self.nbasis, self.nbasis), "d")]
# Bump up the spin.
if line.strip() == "***** SPIN 2 *****":
spin = 1
symoffset = 0
lastrow = 0
# Next symmetry.
if line.strip()[:4] == "=== ":
sym = line.split()[1]
if self.nosymflag:
aolist = range(self.nbasis)
else:
aolist = self.symlist[sym][spin]
# Add to the symmetry offset of AO ordering.
symoffset += lastrow
# Blocks with coefficient always start with "MOs :".
if line[1:6] == "MOs :":
# Next line has the MO index contributed to.
monumbers = [int(n) for n in line[6:].split()]
occup = inputfile.next()
label = inputfile.next()
line = inputfile.next()
# The table can end with a blank line or "1".
row = 0
while not line.strip() in ["", "1"]:
info = line.split()
if int(info[0]) < self.start_indeces[sym]:
#check to make sure we aren't parsing CFs
line = inputfile.next()
continue
self.updateprogress(inputfile, "Coefficients", self.fupdate)
row += 1
coeffs = [float(x) for x in info[1:]]
moindices = [aolist[n-1] for n in monumbers]
# The AO index is 1 less than the row.
aoindex = symoffset + row - 1
for i in range(len(monumbers)):
self.mocoeffs[spin][moindices[i],aoindex] = coeffs[i]
line = inputfile.next()
lastrow = row
if line[4:53] == "Final excitation energies from Davidson algorithm":
# move forward in file past some various algorthm info
# * Final excitation energies from Davidson algorithm *
# * *
# **************************************************************************
# Number of loops in Davidson routine = 20
# Number of matrix-vector multiplications = 24
# Type of excitations = SINGLET-SINGLET
inputfile.next(); inputfile.next(); inputfile.next()
inputfile.next(); inputfile.next(); inputfile.next()
inputfile.next(); inputfile.next()
symm = self.normalisesym(inputfile.next().split()[1])
# move forward in file past some more txt and header info
# Excitation energies E in a.u. and eV, dE wrt prev. cycle,
# oscillator strengths f in a.u.
# no. E/a.u. E/eV f dE/a.u.
# -----------------------------------------------------
inputfile.next(); inputfile.next(); inputfile.next()
inputfile.next(); inputfile.next(); inputfile.next()
# now start parsing etenergies and etoscs
etenergies = []
etoscs = []
etsyms = []
line = inputfile.next()
while len(line) > 2:
info = line.split()
etenergies.append(utils.convertor(float(info[2]), "eV", "cm-1"))
etoscs.append(float(info[3]))
etsyms.append(symm)
line = inputfile.next()
# move past next section
while line[1:53] != "Major MO -> MO transitions for the above excitations":
line = inputfile.next()
# move past headers
# Excitation Occupied to virtual Contribution
# Nr. orbitals weight contribibutions to
# (sum=1) transition dipole moment
# x y z
inputfile.next(), inputfile.next(), inputfile.next()
inputfile.next(), inputfile.next(), inputfile.next()
# before we start handeling transitions, we need
# to create mosyms with indices
# only restricted calcs are possible in ADF
counts = {}
syms = []
for mosym in self.mosyms[0]:
if counts.keys().count(mosym) == 0:
counts[mosym] = 1
else:
counts[mosym] += 1
syms.append(str(counts[mosym]) + mosym)
import re
etsecs = []
printed_warning = False
for i in range(len(etenergies)):
etsec = []
line = inputfile.next()
info = line.split()
while len(info) > 0:
match = re.search('[^0-9]', info[1])
index1 = int(info[1][:match.start(0)])
text = info[1][match.start(0):]
symtext = text[0].upper() + text[1:]
sym1 = str(index1) + self.normalisesym(symtext)
match = re.search('[^0-9]', info[3])
index2 = int(info[3][:match.start(0)])
text = info[3][match.start(0):]
symtext = text[0].upper() + text[1:]
sym2 = str(index2) + self.normalisesym(symtext)
try:
index1 = syms.index(sym1)
except ValueError:
if not printed_warning:
self.logger.warning("Etsecs are not accurate!")
printed_warning = True
try:
index2 = syms.index(sym2)
except ValueError:
if not printed_warning:
self.logger.warning("Etsecs are not accurate!")
printed_warning = True
etsec.append([(index1, 0), (index2, 0), float(info[4])])
line = inputfile.next()
info = line.split()
etsecs.append(etsec)
if not hasattr(self, "etenergies"):
self.etenergies = etenergies
else:
self.etenergies += etenergies
if not hasattr(self, "etoscs"):
self.etoscs = etoscs
else:
self.etoscs += etoscs
if not hasattr(self, "etsyms"):
self.etsyms = etsyms
else:
self.etsyms += etsyms
if not hasattr(self, "etsecs"):
self.etsecs = etsecs
else:
self.etsecs += etsecs
if __name__ == "__main__":
import doctest, adfparser
doctest.testmod(adfparser, verbose=False)
|
"""Tests for the AdbWrapper class."""
import os
import socket
import tempfile
import time
import unittest
import adb_wrapper
class TestAdbWrapper(unittest.TestCase):
def setUp(self):
devices = adb_wrapper.AdbWrapper.GetDevices()
assert devices, 'A device must be attached'
self._adb = devices[0]
self._adb.WaitForDevice()
def _MakeTempFile(self, contents):
"""Make a temporary file with the given contents.
Args:
contents: string to write to the temporary file.
Returns:
The absolute path to the file.
"""
fi, path = tempfile.mkstemp()
with os.fdopen(fi, 'wb') as f:
f.write('foo')
return path
def testShell(self):
output = self._adb.Shell('echo test', expect_rc=0)
self.assertEqual(output.strip(), 'test')
output = self._adb.Shell('echo test')
self.assertEqual(output.strip(), 'test')
self.assertRaises(adb_wrapper.CommandFailedError, self._adb.Shell,
'echo test', expect_rc=1)
def testPushPull(self):
path = self._MakeTempFile('foo')
device_path = '/data/local/tmp/testfile.txt'
local_tmpdir = os.path.dirname(path)
self._adb.Push(path, device_path)
self.assertEqual(self._adb.Shell('cat %s' % device_path), 'foo')
self._adb.Pull(device_path, local_tmpdir)
with open(os.path.join(local_tmpdir, 'testfile.txt'), 'r') as f:
self.assertEqual(f.read(), 'foo')
def testInstall(self):
path = self._MakeTempFile('foo')
self.assertRaises(adb_wrapper.CommandFailedError, self._adb.Install, path)
def testForward(self):
self.assertRaises(adb_wrapper.CommandFailedError, self._adb.Forward, 0, 0)
def testUninstall(self):
self.assertRaises(adb_wrapper.CommandFailedError, self._adb.Uninstall,
'some.nonexistant.package')
def testRebootWaitForDevice(self):
self._adb.Reboot()
print 'waiting for device to reboot...'
while self._adb.GetState() == 'device':
time.sleep(1)
self._adb.WaitForDevice()
self.assertEqual(self._adb.GetState(), 'device')
print 'waiting for package manager...'
while 'package:' not in self._adb.Shell('pm path android'):
time.sleep(1)
def testRootRemount(self):
self._adb.Root()
while True:
try:
self._adb.Shell('start')
break
except adb_wrapper.CommandFailedError:
time.sleep(1)
self._adb.Remount()
if __name__ == '__main__':
unittest.main()
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_vxlan_arp
version_added: "2.4"
short_description: Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
description:
- Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
options:
evn_bgp:
description:
- Enables EVN BGP.
required: false
choices: ['enable', 'disable']
default: null
evn_source_ip:
description:
- Specifies the source address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_peer_ip:
description:
- Specifies the IP address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_server:
description:
- Configures the local device as the router reflector (RR) on the EVN network.
required: false
choices: ['enable', 'disable']
default: null
evn_reflect_client:
description:
- Configures the local device as the route reflector (RR) and its peer as the client.
required: false
choices: ['enable', 'disable']
default: null
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
required: false
default: null
arp_collect_host:
description:
- Enables EVN BGP or BGP EVPN to collect host information.
required: false
choices: ['enable', 'disable']
default: null
host_collect_protocol:
description:
- Enables EVN BGP or BGP EVPN to advertise host information.
required: false
choices: ['bgp','none']
default: null
bridge_domain_id:
description:
- Specifies a BD(bridge domain) ID.
The value is an integer ranging from 1 to 16777215.
required: false
default: null
arp_suppress:
description:
- Enables ARP broadcast suppression in a BD.
required: false
choices: ['enable', 'disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan arp module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure EVN BGP on Layer 2 and Layer 3 VXLAN gateways to establish EVN BGP peer relationships.
ce_vxlan_arp:
evn_bgp: enable
evn_source_ip: 6.6.6.6
evn_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Configure a Layer 3 VXLAN gateway as a BGP RR.
ce_vxlan_arp:
evn_bgp: enable
evn_server: enable
provider: "{{ cli }}"
- name: Enable EVN BGP on a Layer 3 VXLAN gateway to collect host information.
ce_vxlan_arp:
vbdif_name: Vbdif100
arp_collect_host: enable
provider: "{{ cli }}"
- name: Enable Layer 2 and Layer 3 VXLAN gateways to use EVN BGP to advertise host information.
ce_vxlan_arp:
host_collect_protocol: bgp
provider: "{{ cli }}"
- name: Enable ARP broadcast suppression on a Layer 2 VXLAN gateway.
ce_vxlan_arp:
bridge_domain_id: 100
arp_suppress: enable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip":"7.7.7.7", state: "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"evn_bgp": "disable", "evn_source_ip": null, "evn_peer_ip": []}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip": ["7.7.7.7"]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evn bgp",
"source-address 6.6.6.6",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config
from ansible.module_utils.ce import ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def get_evn_peers(config):
"""get evn peer ip list"""
get = re.findall(r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return list(set(get))
def get_evn_srouce(config):
"""get evn peer ip list"""
get = re.findall(
r"source-address ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_evn_reflect_client(config):
"""get evn reflect client list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s*reflect-client", config)
if not get:
return None
else:
return list(get)
class VxlanArp(object):
"""
Manages arp attributes of VXLAN.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.evn_bgp = self.module.params['evn_bgp']
self.evn_source_ip = self.module.params['evn_source_ip']
self.evn_peer_ip = self.module.params['evn_peer_ip']
self.evn_server = self.module.params['evn_server']
self.evn_reflect_client = self.module.params['evn_reflect_client']
self.vbdif_name = self.module.params['vbdif_name']
self.arp_collect_host = self.module.params['arp_collect_host']
self.host_collect_protocol = self.module.params[
'host_collect_protocol']
self.bridge_domain_id = self.module.params['bridge_domain_id']
self.arp_suppress = self.module.params['arp_suppress']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
required_together = [("vbdif_name", "arp_collect_host"), ("bridge_domain_id", "arp_suppress")]
self.module = AnsibleModule(argument_spec=self.spec,
required_together=required_together,
supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = "| ignore-case section include evn bgp|host collect protocol bgp"
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
if self.bridge_domain_id:
exp += "|^bridge-domain %s$" % self.bridge_domain_id
flags.append(exp)
config = get_config(self.module, flags)
return config
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_bridge_domain(self):
"""manage bridge domain configuration"""
if not self.bridge_domain_id:
return
# bridge-domain bd-id
# [undo] arp broadcast-suppress enable
cmd = "bridge-domain %s" % self.bridge_domain_id
if not is_config_exist(self.config, cmd):
self.module.fail_json(msg="Error: Bridge domain %s is not exist." % self.bridge_domain_id)
cmd = "arp broadcast-suppress enable"
exist = is_config_exist(self.config, cmd)
if self.arp_suppress == "enable" and not exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_suppress == "disable" and exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_evn_bgp(self):
"""enables EVN BGP and configure evn bgp command"""
evn_bgp_view = False
evn_bgp_enable = False
cmd = "evn bgp"
exist = is_config_exist(self.config, cmd)
if self.evn_bgp == "enable" or exist:
evn_bgp_enable = True
# [undo] evn bgp
if self.evn_bgp:
if self.evn_bgp == "enable" and not exist:
self.cli_add_command(cmd)
evn_bgp_view = True
elif self.evn_bgp == "disable" and exist:
self.cli_add_command(cmd, undo=True)
return
# [undo] source-address ip-address
if evn_bgp_enable and self.evn_source_ip:
cmd = "source-address %s" % self.evn_source_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] peer ip-address
# [undo] peer ipv4-address reflect-client
if evn_bgp_enable and self.evn_peer_ip:
cmd = "peer %s" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
if self.evn_reflect_client == "enable":
self.cli_add_command(
"peer %s reflect-client" % self.evn_peer_ip)
else:
if self.evn_reflect_client:
cmd = "peer %s reflect-client" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.evn_reflect_client == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_reflect_client == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
else:
if exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] server enable
if evn_bgp_enable and self.evn_server:
cmd = "server enable"
exist = is_config_exist(self.config, cmd)
if self.evn_server == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_server == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
if evn_bgp_view:
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
# interface vbdif bd-id
# [undo] arp collect host enable
cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s does not exist." % self.vbdif_name)
cmd = "arp collect host enable"
exist = is_config_exist(self.config, cmd)
if self.arp_collect_host == "enable" and not exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_collect_host == "disable" and exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_host_collect_protocal(self):
"""Enable EVN BGP or BGP EVPN to advertise host information"""
# [undo] host collect protocol bgp
cmd = "host collect protocol bgp"
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if self.host_collect_protocol == "bgp" and not exist:
self.cli_add_command(cmd)
elif self.host_collect_protocol == "none" and exist:
self.cli_add_command(cmd, undo=True)
else:
if self.host_collect_protocol == "bgp" and exist:
self.cli_add_command(cmd, undo=True)
def is_valid_vbdif(self, ifname):
"""check is interface vbdif is valid"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def check_params(self):
"""Check all input params"""
# bridge domain id check
if self.bridge_domain_id:
if not self.bridge_domain_id.isdigit():
self.module.fail_json(
msg="Error: Bridge domain id is not digit.")
if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215:
self.module.fail_json(
msg="Error: Bridge domain id is not in the range from 1 to 16777215.")
# evn_source_ip check
if self.evn_source_ip:
if not is_valid_v4addr(self.evn_source_ip):
self.module.fail_json(msg="Error: evn_source_ip is invalid.")
# evn_peer_ip check
if self.evn_peer_ip:
if not is_valid_v4addr(self.evn_peer_ip):
self.module.fail_json(msg="Error: evn_peer_ip is invalid.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(
" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# evn_reflect_client and evn_peer_ip must set at the same time
if self.evn_reflect_client and not self.evn_peer_ip:
self.module.fail_json(
msg="Error: evn_reflect_client and evn_peer_ip must set at the same time.")
# evn_server and evn_reflect_client can not set at the same time
if self.evn_server == "enable" and self.evn_reflect_client == "enable":
self.module.fail_json(
msg="Error: evn_server and evn_reflect_client can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.evn_bgp:
self.proposed["evn_bgp"] = self.evn_bgp
if self.evn_source_ip:
self.proposed["evn_source_ip"] = self.evn_source_ip
if self.evn_peer_ip:
self.proposed["evn_peer_ip"] = self.evn_peer_ip
if self.evn_server:
self.proposed["evn_server"] = self.evn_server
if self.evn_reflect_client:
self.proposed["evn_reflect_client"] = self.evn_reflect_client
if self.arp_collect_host:
self.proposed["arp_collect_host"] = self.arp_collect_host
if self.host_collect_protocol:
self.proposed["host_collect_protocol"] = self.host_collect_protocol
if self.arp_suppress:
self.proposed["arp_suppress"] = self.arp_suppress
if self.vbdif_name:
self.proposed["vbdif_name"] = self.evn_peer_ip
if self.bridge_domain_id:
self.proposed["bridge_domain_id"] = self.bridge_domain_id
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
evn_bgp_exist = is_config_exist(self.config, "evn bgp")
if evn_bgp_exist:
self.existing["evn_bgp"] = "enable"
else:
self.existing["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(self.config, "server enable"):
self.existing["evn_server"] = "enable"
else:
self.existing["evn_server"] = "disable"
self.existing["evn_source_ip"] = get_evn_srouce(self.config)
self.existing["evn_peer_ip"] = get_evn_peers(self.config)
self.existing["evn_reflect_client"] = get_evn_reflect_client(
self.config)
if is_config_exist(self.config, "arp collect host enable"):
self.existing["host_collect_protocol"] = "enable"
else:
self.existing["host_collect_protocol"] = "disable"
if is_config_exist(self.config, "host collect protocol bgp"):
self.existing["host_collect_protocol"] = "bgp"
else:
self.existing["host_collect_protocol"] = None
if is_config_exist(self.config, "arp broadcast-suppress enable"):
self.existing["arp_suppress"] = "enable"
else:
self.existing["arp_suppress"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
evn_bgp_exist = is_config_exist(config, "evn bgp")
if evn_bgp_exist:
self.end_state["evn_bgp"] = "enable"
else:
self.end_state["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(config, "server enable"):
self.end_state["evn_server"] = "enable"
else:
self.end_state["evn_server"] = "disable"
self.end_state["evn_source_ip"] = get_evn_srouce(config)
self.end_state["evn_peer_ip"] = get_evn_peers(config)
self.end_state[
"evn_reflect_client"] = get_evn_reflect_client(config)
if is_config_exist(config, "arp collect host enable"):
self.end_state["host_collect_protocol"] = "enable"
else:
self.end_state["host_collect_protocol"] = "disable"
if is_config_exist(config, "host collect protocol bgp"):
self.end_state["host_collect_protocol"] = "bgp"
else:
self.end_state["host_collect_protocol"] = None
if is_config_exist(config, "arp broadcast-suppress enable"):
self.end_state["arp_suppress"] = "enable"
else:
self.end_state["arp_suppress"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.evn_bgp or self.evn_server or self.evn_peer_ip or self.evn_source_ip:
self.config_evn_bgp()
if self.vbdif_name and self.arp_collect_host:
self.config_vbdif()
if self.host_collect_protocol:
self.config_host_collect_protocal()
if self.bridge_domain_id and self.arp_suppress:
self.config_bridge_domain()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
evn_bgp=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_source_ip=dict(required=False, type='str'),
evn_peer_ip=dict(required=False, type='str'),
evn_server=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_reflect_client=dict(
required=False, type='str', choices=['enable', 'disable']),
vbdif_name=dict(required=False, type='str'),
arp_collect_host=dict(required=False, type='str',
choices=['enable', 'disable']),
host_collect_protocol=dict(
required=False, type='str', choices=['bgp', 'none']),
bridge_domain_id=dict(required=False, type='str'),
arp_suppress=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanArp(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
from oscar.core.loading import is_model_registered
from oscar.apps.customer import abstract_models
__all__ = []
if not is_model_registered('customer', 'Email'):
class Email(abstract_models.AbstractEmail):
pass
__all__.append('Email')
if not is_model_registered('customer', 'CommunicationEventType'):
class CommunicationEventType(
abstract_models.AbstractCommunicationEventType):
pass
__all__.append('CommunicationEventType')
if not is_model_registered('customer', 'Notification'):
class Notification(abstract_models.AbstractNotification):
pass
__all__.append('Notification')
if not is_model_registered('customer', 'ProductAlert'):
class ProductAlert(abstract_models.AbstractProductAlert):
pass
__all__.append('ProductAlert')
|
from __future__ import unicode_literals
import os.path
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
class MonikerIE(InfoExtractor):
IE_DESC = 'allmyvideos.net and vidspot.net'
_VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)'
_TESTS = [{
'url': 'http://allmyvideos.net/jih3nce3x6wn',
'md5': '710883dee1bfc370ecf9fa6a89307c88',
'info_dict': {
'id': 'jih3nce3x6wn',
'ext': 'mp4',
'title': 'youtube-dl test video',
},
}, {
'url': 'http://vidspot.net/l2ngsmhs8ci5',
'md5': '710883dee1bfc370ecf9fa6a89307c88',
'info_dict': {
'id': 'l2ngsmhs8ci5',
'ext': 'mp4',
'title': 'youtube-dl test video',
},
}, {
'url': 'https://www.vidspot.net/l2ngsmhs8ci5',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
orig_webpage = self._download_webpage(url, video_id)
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
data = dict(fields)
post = compat_urllib_parse.urlencode(data)
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
req = compat_urllib_request.Request(url, post, headers)
webpage = self._download_webpage(
req, video_id, note='Downloading video page ...')
title = os.path.splitext(data['fname'])[0]
# Could be several links with different quality
links = re.findall(r'"file" : "?(.+?)",', webpage)
# Assume the links are ordered in quality
formats = [{
'url': l,
'quality': i,
} for i, l in enumerate(links)]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
}
|
__author__ = "Thomas Rueckstiess, Frank Sehnke"
from pybrain.tools.example_tools import ExTools
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.cartpole import CartPoleEnvironment, BalanceTask
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners import ENAC
from pybrain.rl.experiments import EpisodicExperiment
batch=50 #number of samples per learning step
prnts=4 #number of learning steps after results are printed
epis=4000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts, kind = "learner") #tool for printing and plotting
for runs in range(numbExp):
# create environment
env = CartPoleEnvironment()
# create task
task = BalanceTask(env, 200, desiredValue=None)
# create controller network
net = buildNetwork(4, 1, bias=False)
# create agent with controller and learner (and its options)
agent = LearningAgent(net, ENAC())
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
state, action, reward = agent.learner.dataset.getSequence(agent.learner.dataset.getNumSequences()-1)
et.printResults(reward.sum(), runs, updates)
et.addExps()
et.showExps()
|
__author__ = 'Cosmo Harrigan'
from flask import abort, jsonify
from flask.ext.restful import Resource, reqparse
import socket
from flask_restful_swagger import swagger
COGSERVER_PORT = 17001
class ShellAPI(Resource):
"""
Defines a barebones resource for sending shell commands to the CogServer
"""
# This is because of https://github.com/twilio/flask-restful/issues/134
@classmethod
def new(cls, atomspace):
cls.atomspace = atomspace
return cls
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('command', type=str, location='args')
super(ShellAPI, self).__init__()
@swagger.operation(
notes='''
Include a JSON object with the POST request containing the command
in a field named "command"
<p>Examples:
<pre>
{'command': 'agents-step'}
{'command': 'agents-step opencog::SimpleImportanceDiffusionAgent'}
</pre>''',
responseClass='response',
nickname='post',
parameters=[
{
'name': 'command',
'description': 'OpenCog Shell command',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'body'
}
],
responseMessages=[
{'code': 200, 'message': 'OpenCog Shell command executed successfully'},
{'code': 400, 'message': 'Invalid request: Required parameter command missing'}
]
)
def post(self):
"""
Send a shell command to the cogserver
"""
# Setup socket to communicate with OpenCog CogServer
try:
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(('localhost', COGSERVER_PORT))
except socket.error as msg:
print msg
# Validate, parse and send the command
data = reqparse.request.get_json()
if 'command' in data:
connection.send(data['command'])
else:
connection.close()
abort(400,
'Invalid request: required parameter command is missing')
connection.close()
return jsonify({'status': 'success'})
|
import hashlib
import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http
from livestreamer.stream import HDSStream
TOKEN_SECRET = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564'
_url_re = re.compile("http(s)?://(\w+\.)?wat.tv/")
_video_id_re = re.compile("href=\"http://m.wat.tv/video/([^\"]+)", re.IGNORECASE)
class WAT(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = video_id = _video_id_re.search(res.text)
if not match:
return
video_id = match.group(1)
# TODO: Replace with "yield from" when dropping Python 2.
for __ in self._create_streams('web', video_id).items():
yield __
for __ in self._create_streams('webhd', video_id).items():
yield __
def _create_streams(self, type_, video_id):
url = self._generate_security_url(type_, video_id)
res = http.get(url)
return HDSStream.parse_manifest(self.session, res.text, cookies=res.cookies)
def _generate_security_url(self, type_, video_id):
token = self._generate_security_token(type_, video_id)
return ("http://www.wat.tv/get/{type_}/{video_id}?token={token}"
"&domain=www.wat.tv&refererURL=wat.tv&revision=04.00.719%0A&"
"synd=0&helios=1&context=playerWat&pub=1&country=FR"
"&sitepage=WAT%2Ftv%2Ft%2Finedit%2Ftf1%2Fparamount_pictures_"
"france&lieu=wat&playerContext=CONTEXT_WAT&getURL=1"
"&version=LNX%2014,0,0,125").format(**locals())
def _generate_security_token(self, type_, video_id):
# Get timestamp
res = http.get('http://www.wat.tv/servertime')
timestamp = int(res.text.split('|')[0])
timestamp_hex = format(timestamp, 'x').rjust(8, '0')
# Player id
player_prefix = "/{0}/{1}".format(type_, video_id)
# Create the token
data = (TOKEN_SECRET + player_prefix + timestamp_hex).encode('utf8')
token = hashlib.md5(data)
token = "{0}/{1}".format(token.hexdigest(), timestamp_hex)
return token
__plugin__ = WAT
|
"""==============
Array indexing
==============
Array indexing refers to any use of the square brackets ([]) to index
array values. There are many options to indexing, which give numpy
indexing great power, but with power comes some complexity and the
potential for confusion. This section is just an overview of the
various options and issues related to indexing. Aside from single
element indexing, the details on most of these options are to be
found in related sections.
Assignment vs referencing
=========================
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
when assigning to an array. See the section at the end for
specific examples and explanations on how assignments work.
Single element indexing
=======================
Single element indexing for a 1-D array is what one expects. It work
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
>>> x = np.arange(10)
>>> x[2]
2
>>> x[-2]
8
Unlike lists and tuples, numpy arrays support multidimensional indexing
for multidimensional arrays. That means that it is not necessary to
separate each dimension's index into its own set of square brackets. ::
>>> x.shape = (2,5) # now x is 2-dimensional
>>> x[1,3]
8
>>> x[1,-1]
9
Note that if one indexes a multidimensional array with fewer indices
than dimensions, one gets a subdimensional array. For example: ::
>>> x[0]
array([0, 1, 2, 3, 4])
That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that the remaining dimension of length 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
It must be noted that the returned array is not a copy of the original,
but points to the same values in memory as does the original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
>>> x[0][2]
2
So note that ``x[0,2] = x[0][2]`` though the second case is more
inefficient as a new temporary array is created after the first index
that is subsequently indexed by 2.
Note to those used to IDL or Fortran memory order as it relates to
indexing. NumPy uses C-order indexing. That means that the last
index usually represents the most rapidly changing memory location,
unlike Fortran or IDL, where the first index represents the most
rapidly changing location in memory. This difference represents a
great potential for confusion.
Other indexing options
======================
It is possible to slice and stride arrays to extract arrays of the
same number of dimensions, but of different sizes than the original.
The slicing and striding works exactly the same way it does for lists
and tuples except that they can be applied to multiple dimensions as
well. A few examples illustrates best: ::
>>> x = np.arange(10)
>>> x[2:5]
array([2, 3, 4])
>>> x[:-7]
array([0, 1, 2])
>>> x[1:7:2]
array([1, 3, 5])
>>> y = np.arange(35).reshape(5,7)
>>> y[1:5:2,::3]
array([[ 7, 10, 13],
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
also produce new views of the original data.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
two different ways of accomplishing this. One uses one or more arrays
of index values. The other involves giving a boolean array of the proper
shape to indicate the values to be selected. Index arrays are a very
powerful tool that allow one to avoid looping over individual elements in
arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
array aquires the shape needed for use in an expression or with a
specific function.
Index arrays
============
NumPy arrays may be indexed with other arrays (or any other sequence-
like object that can be converted to an array, such as lists, with the
exception of tuples; see the end of this document for why this is). The
use of index arrays ranges from simple, straightforward cases to
complex, hard-to-understand cases. For all cases of index arrays, what
is returned is a copy of the original data, not a view as one gets for
slices.
Index arrays must be of integer type. Each value in the array indicates
which value in the array to use in place of the index. To illustrate: ::
>>> x = np.arange(10,1,-1)
>>> x
array([10, 9, 8, 7, 6, 5, 4, 3, 2])
>>> x[np.array([3, 3, 1, 8])]
array([7, 7, 9, 2])
The index array consisting of the values 3, 3, 1 and 8 correspondingly
create an array of length 4 (same as the index array) where each index
is replaced by the value the index array has in the array being indexed.
Negative values are permitted and work as they do with single indices
or slices: ::
>>> x[np.array([3,3,-3,8])]
array([7, 7, 4, 2])
It is an error to have index values out of bounds: ::
>>> x[np.array([3, 3, 20, 8])]
<type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
Generally speaking, what is returned when index arrays are used is
an array with the same shape as the index array, but with the type
and values of the array being indexed. As an example, we can use a
multidimensional index array instead: ::
>>> x[np.array([[1,1],[2,3]])]
array([[9, 9],
[8, 7]])
Indexing Multi-dimensional arrays
=================================
Things become more complex when multidimensional arrays are indexed,
particularly with multidimensional index arrays. These tend to be
more unusual uses, but they are permitted, and they are useful for some
problems. We'll start with the simplest multidimensional case (using
the array y from the previous examples): ::
>>> y[np.array([0,2,4]), np.array([0,1,2])]
array([ 0, 15, 30])
In this case, if the index arrays have a matching shape, and there is
an index array for each dimension of the array being indexed, the
resultant array has the same shape as the index arrays, and the values
correspond to the index set for each position in the index arrays. In
this example, the first index value is 0 for both index arrays, and
thus the first value of the resultant array is y[0,0]. The next value
is y[2,1], and the last is y[4,2].
If the index arrays do not have the same shape, there is an attempt to
broadcast them to the same shape. If they cannot be broadcast to the
same shape, an exception is raised: ::
>>> y[np.array([0,2,4]), np.array([0,1])]
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be
broadcast to a single shape
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
for all the corresponding values of the index arrays: ::
>>> y[np.array([0,2,4]), 1]
array([ 1, 15, 29])
Jumping to the next level of complexity, it is possible to only
partially index an array with index arrays. It takes a bit of thought
to understand what happens in such cases. For example if we just use
one index array with y: ::
>>> y[np.array([0,2,4])]
array([[ 0, 1, 2, 3, 4, 5, 6],
[14, 15, 16, 17, 18, 19, 20],
[28, 29, 30, 31, 32, 33, 34]])
What results is the construction of a new array where each value of
the index array selects one row from the array being indexed and the
resultant array has the resulting shape (size of row, number index
elements).
An example of where this may be useful is for a color lookup table
where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
In general, the shape of the resultant array will be the concatenation
of the shape of the index array (or the shape that all the index arrays
were broadcast to) with the shape of any unused dimensions (those not
indexed) in the array being indexed.
Boolean or "mask" index arrays
==============================
Boolean arrays used as indices are treated in a different manner
entirely than index arrays. Boolean arrays must be of the same shape
as the initial dimensions of the array being indexed. In the
most straightforward case, the boolean array has the same shape: ::
>>> b = y>20
>>> y[b]
array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
Unlike in the case of integer index arrays, in the boolean case, the
result is a 1-D array containing all the elements in the indexed array
corresponding to all the true elements in the boolean array. The
elements in the indexed array are always iterated and returned in
:term:`row-major` (C-style) order. The result is also identical to
``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy
of the data, not a view as one gets with slices.
The result will be multidimensional if y has more dimensions than b.
For example: ::
>>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
array([False, False, False, True, True], dtype=bool)
>>> y[b[:,5]]
array([[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34]])
Here the 4th and 5th rows are selected from the indexed array and
combined to make a 2-D array.
In general, when the boolean array has fewer dimensions than the array
being indexed, this is equivalent to y[b, ...], which means
y is indexed by b followed by as many : as are needed to fill
out the rank of y.
Thus the shape of the result is one dimension containing the number
of True elements of the boolean array, followed by the remaining
dimensions of the array being indexed.
For example, using a 2-D boolean array of shape (2,3)
with four True elements to select rows from a 3-D array of shape
(2,3,5) results in a 2-D result of shape (4,5): ::
>>> x = np.arange(30).reshape(2,3,5)
>>> x
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]]])
>>> b = np.array([[True, True, False], [False, True, True]])
>>> x[b]
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]])
For further details, consult the numpy reference documentation on array indexing.
Combining index arrays with slices
==================================
Index arrays may be combined with slices. For example: ::
>>> y[np.array([0,2,4]),1:3]
array([[ 1, 2],
[15, 16],
[29, 30]])
In effect, the slice is converted to an index array
np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array
to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
Structural indexing tools
=========================
To facilitate easy matching of array shapes with expressions and in
assignments, the np.newaxis object can be used within array indices
to add new dimensions with a size of 1. For example: ::
>>> y.shape
(5, 7)
>>> y[:,np.newaxis,:].shape
(5, 1, 7)
Note that there are no new elements in the array, just that the
dimensionality is increased. This can be handy to combine two
arrays in a way that otherwise would require explicitly reshaping
operations. For example: ::
>>> x = np.arange(5)
>>> x[:,np.newaxis] + x[np.newaxis,:]
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8]])
The ellipsis syntax maybe used to indicate selecting in full any
remaining unspecified dimensions. For example: ::
>>> z = np.arange(81).reshape(3,3,3,3)
>>> z[1,...,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
This is equivalent to: ::
>>> z[1,:,:,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
Assigning values to indexed arrays
==================================
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
assigned to the indexed array must be shape consistent (the same shape
or broadcastable to the shape the index produces). For example, it is
permitted to assign a constant to a slice: ::
>>> x = np.arange(10)
>>> x[2:7] = 1
or an array of the right size: ::
>>> x[2:7] = np.arange(5)
Note that assignments may result in changes if assigning
higher types to lower types (like floats to ints) or even
exceptions (assigning complex to floats or ints): ::
>>> x[1] = 1.2
>>> x[1]
1
>>> x[1] = 1.2j
<type 'exceptions.TypeError'>: can't convert complex to long; use
long(abs(z))
Unlike some of the references (such as array and mask indices)
assignments are always made to the original data in the array
(indeed, nothing else would make sense!). Note though, that some
actions may not work as one may naively expect. This particular
example is often surprising to people: ::
>>> x = np.arange(0, 50, 10)
>>> x
array([ 0, 10, 20, 30, 40])
>>> x[np.array([1, 1, 3, 1])] += 1
>>> x
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
In fact, it will only be incremented by 1. The reason is because
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
the value of the array at x[1]+1 is assigned to x[1] three times,
rather than being incremented 3 times.
Dealing with variable numbers of indices within programs
========================================================
The index syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
as a list of indices. For example (using the previous definition
for the array z): ::
>>> indices = (1,1,1,1)
>>> z[indices]
40
So one can use code to construct tuples of any number of indices
and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
>>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
>>> indices = (1, Ellipsis, 1) # same as [1,...,1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
For this reason it is possible to use the output from the np.where()
function directly as an index since it always returns a tuple of index
arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1,1,1,1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
>>> z[(1,1,1,1)] # returns a single value
40
"""
from __future__ import division, absolute_import, print_function
|
kwargs = {'foo': 'bar'}
class Foo(object):
@classmethod
def test(cls):
cls(**kwargs, <error descr="Python versions < 3.5 do not allow keyword arguments after **expression">foo=1</error>)
|
"""
set_default_site.py
"""
import socket
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django_extensions.management.utils import signalcommand
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--name', dest='site_name', default=None,
help='Use this as site name.'),
make_option('--domain', dest='site_domain', default=None,
help='Use this as site domain.'),
make_option('--system-fqdn', dest='set_as_system_fqdn', default=False,
action="store_true", help='Use the systems FQDN (Fully Qualified Domain Name) as name and domain. Can be used in combination with --name'),
)
help = "Set parameters of the default django.contrib.sites Site"
@signalcommand
def handle_noargs(self, **options):
from django.contrib.sites.models import Site
try:
site = Site.objects.get(pk=1)
except Site.DoesNotExist:
raise CommandError("Default site with pk=1 does not exist")
else:
name = options.get("site_name", None)
domain = options.get("site_domain", None)
if options.get('set_as_system_fqdn', False):
domain = socket.getfqdn()
if not domain:
raise CommandError("Cannot find systems FQDN")
if name is None:
name = domain
update_kwargs = {}
if name and name != site.name:
update_kwargs["name"] = name
if domain and domain != site.domain:
update_kwargs["domain"] = domain
if update_kwargs:
Site.objects.filter(pk=1).update(**update_kwargs)
site = Site.objects.get(pk=1)
print("Updated default site. You might need to restart django as sites are cached aggressively.")
else:
print("Nothing to update (need --name, --domain and/or --system-fqdn)")
print("Default Site:")
print("\tid = %s" % site.id)
print("\tname = %s" % site.name)
print("\tdomain = %s" % site.domain)
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.antenna', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## angles.h (module 'antenna'): ns3::Angles [struct]
module.add_class('Angles')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## antenna-model.h (module 'antenna'): ns3::AntennaModel [class]
module.add_class('AntennaModel', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel [class]
module.add_class('CosineAntennaModel', parent=root_module['ns3::AntennaModel'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel [class]
module.add_class('IsotropicAntennaModel', parent=root_module['ns3::AntennaModel'])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel [class]
module.add_class('ParabolicAntennaModel', parent=root_module['ns3::AntennaModel'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Angles_methods(root_module, root_module['ns3::Angles'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AntennaModel_methods(root_module, root_module['ns3::AntennaModel'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3CosineAntennaModel_methods(root_module, root_module['ns3::CosineAntennaModel'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3IsotropicAntennaModel_methods(root_module, root_module['ns3::IsotropicAntennaModel'])
register_Ns3ParabolicAntennaModel_methods(root_module, root_module['ns3::ParabolicAntennaModel'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Angles_methods(root_module, cls):
cls.add_output_stream_operator()
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Angles const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Angles const &', 'arg0')])
## angles.h (module 'antenna'): ns3::Angles::Angles() [constructor]
cls.add_constructor([])
## angles.h (module 'antenna'): ns3::Angles::Angles(double phi, double theta) [constructor]
cls.add_constructor([param('double', 'phi'), param('double', 'theta')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v) [constructor]
cls.add_constructor([param('ns3::Vector', 'v')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v, ns3::Vector o) [constructor]
cls.add_constructor([param('ns3::Vector', 'v'), param('ns3::Vector', 'o')])
## angles.h (module 'antenna'): ns3::Angles::phi [variable]
cls.add_instance_attribute('phi', 'double', is_const=False)
## angles.h (module 'antenna'): ns3::Angles::theta [variable]
cls.add_instance_attribute('theta', 'double', is_const=False)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AntennaModel_methods(root_module, cls):
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel(ns3::AntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AntennaModel const &', 'arg0')])
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel() [constructor]
cls.add_constructor([])
## antenna-model.h (module 'antenna'): double ns3::AntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_pure_virtual=True, is_virtual=True)
## antenna-model.h (module 'antenna'): static ns3::TypeId ns3::AntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3CosineAntennaModel_methods(root_module, cls):
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel() [constructor]
cls.add_constructor([])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel(ns3::CosineAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CosineAntennaModel const &', 'arg0')])
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::CosineAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3IsotropicAntennaModel_methods(root_module, cls):
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel(ns3::IsotropicAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IsotropicAntennaModel const &', 'arg0')])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel() [constructor]
cls.add_constructor([])
## isotropic-antenna-model.h (module 'antenna'): double ns3::IsotropicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## isotropic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::IsotropicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ParabolicAntennaModel_methods(root_module, cls):
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel() [constructor]
cls.add_constructor([])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel(ns3::ParabolicAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParabolicAntennaModel const &', 'arg0')])
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::ParabolicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## angles.h (module 'antenna'): extern double ns3::DegreesToRadians(double degrees) [free function]
module.add_function('DegreesToRadians',
'double',
[param('double', 'degrees')])
## angles.h (module 'antenna'): extern double ns3::RadiansToDegrees(double radians) [free function]
module.add_function('RadiansToDegrees',
'double',
[param('double', 'radians')])
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
class A:
def __getitem__(self, idx):
print(idx.start, idx.stop, idx.step)
try:
t = A()[1:2]
except:
print("SKIP")
raise SystemExit
A()[1:2:3]
class B:
def __getitem__(self, idx):
try:
idx.start = 0
except AttributeError:
print('AttributeError')
B()[:]
|
import psycopg2
import logging
from openerp.osv import orm
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
def _format_inserts_values(vals):
cols = vals.keys()
if 'line_id' in cols:
cols.remove('line_id')
return (', '.join(cols), ', '.join(['%%(%s)s' % i for i in cols]))
class account_move(orm.Model):
"""redefine account move create to bypass orm.
Async_bypass_create must be set to True in context.
"""
_inherit = "account.move"
def _prepare_line(self, cr, uid, move_id, line, vals, context=None):
"""Take incomming move vals and complete move line dict with missing data
:param move_id: parent move id
:param line: dict of vals of move line
:param vals: dict of vals of move
:returns: dict of val of move line completed
"""
if isinstance(line, tuple):
line = line[2]
line['journal_id'] = vals.get('journal_id')
line['date'] = vals.get('date')
line['period_id'] = vals.get('period_id')
line['company_id'] = vals.get('company_id')
line['state'] = vals['state']
line['move_id'] = move_id
if line['debit'] and line['credit']:
raise ValueError('debit and credit set on same line')
if not line.get('analytic_account_id'):
line['analytic_account_id'] = None
for key in line:
if line[key] is False:
line[key] = None
return line
def _check_balance(self, vals):
"""Check if move is balanced"""
line_dicts = [y[2] for y in vals['line_id']]
debit = sum(x.get('debit') or 0.0 for x in line_dicts)
credit = sum(x.get('credit') or 0.0 for x in line_dicts)
if float_compare(debit, credit, precision_digits=2):
raise ValueError('Move is not balanced %s %s' % (debit, credits))
def _bypass_create(self, cr, uid, vals, context=None):
"""Create entries using cursor directly
:returns: created id
"""
mvl_obj = self.pool['account.move.line']
vals['company_id'] = context.get('company_id', False)
vals['state'] = 'draft'
if not vals.get('name'):
vals['name'] = "/"
sql = u"Insert INTO account_move (%s) VALUES (%s) RETURNING id"
sql = sql % _format_inserts_values(vals)
try:
cr.execute(sql, vals)
except psycopg2.Error:
_logger.exception('ORM by pass error for move')
raise
created_id = cr.fetchone()[0]
if vals.get('line_id'):
self._check_balance(vals)
for line in vals['line_id']:
l_vals = self._prepare_line(cr, uid, created_id,
line, vals, context=context)
mvl_obj.create(cr, uid, l_vals, context=context)
return created_id
def create(self, cr, uid, vals, context=None):
"""Please refer to orm.BaseModel.create documentation"""
if context is None:
context = {}
if context.get('async_bypass_create'):
return self._bypass_create(cr, uid, vals, context=context)
return super(account_move, self).create(cr, uid, vals, context=context)
class account_move_line(orm.Model):
"""Redefine account move line create to bypass orm.
Async_bypass_create must be set to True in context
"""
_inherit = "account.move.line"
def create(self, cr, uid, vals, context=None):
"""Please refer to orm.BaseModel.create documentation"""
if context is None:
context = {}
if context.get('async_bypass_create'):
return self._bypass_create(cr, uid, vals, context=context)
return super(account_move_line, self).create(cr, uid, vals,
context=context)
def _bypass_create(self, cr, uid, vals, context=None):
"""Create entries using cursor directly
:returns: created id
"""
sql = u"Insert INTO account_move_line (%s) VALUES (%s) RETURNING id"
sql = sql % _format_inserts_values(vals)
try:
cr.execute(sql, vals)
except psycopg2.Error:
_logger.exception('ORM by pass error for move line')
raise
return cr.fetchone()[0]
|
import re
class pp_ss:
def __init__(self, val):
self.val = val
def to_string(self):
return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">"
def lookup_function (val):
"Look-up and return a pretty-printer that can print val."
# Get the type.
type = val.type
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
# Get the type name.
typename = type.tag
if typename == None:
return None
# Iterate over local dictionary of types to determine
# if a printer is registered for that type. Return an
# instantiation of the printer if found.
for function in pretty_printers_dict:
if function.match (typename):
return pretty_printers_dict[function] (val)
# Cannot find a pretty printer. Return None.
return None
def register_pretty_printers ():
pretty_printers_dict[re.compile ('^ss$')] = pp_ss
pretty_printers_dict = {}
register_pretty_printers ()
gdb.current_progspace().pretty_printers.append (lookup_function)
|
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='InstructorTask',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('task_type', models.CharField(max_length=50, db_index=True)),
('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('task_key', models.CharField(max_length=255, db_index=True)),
('task_input', models.CharField(max_length=255)),
('task_id', models.CharField(max_length=255, db_index=True)),
('task_state', models.CharField(max_length=50, null=True, db_index=True)),
('task_output', models.CharField(max_length=1024, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('updated', models.DateTimeField(auto_now=True)),
('subtasks', models.TextField(blank=True)),
('requester', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool, BoxSelectTool
output_file("toolbar.html")
TOOLS = [BoxSelectTool(), HoverTool()]
p = figure(plot_width=400, plot_height=400, title=None, tools=TOOLS)
p.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)
show(p)
|
from django.conf import settings
processor_name = settings.CC_PROCESSOR.keys()[0]
module = __import__('shoppingcart.processors.' + processor_name,
fromlist=['render_purchase_form_html'
'process_postpay_callback',
])
def render_purchase_form_html(*args, **kwargs):
"""
The top level call to this module to begin the purchase.
Given a shopping cart,
Renders the HTML form for display on user's browser, which POSTS to Hosted Processors
Returns the HTML as a string
"""
return module.render_purchase_form_html(*args, **kwargs)
def process_postpay_callback(*args, **kwargs):
"""
The top level call to this module after the purchase.
This function is handed the callback request after the customer has entered the CC info and clicked "buy"
on the external payment page.
It is expected to verify the callback and determine if the payment was successful.
It returns {'success':bool, 'order':Order, 'error_html':str}
If successful this function must have the side effect of marking the order purchased and calling the
purchased_callbacks of the cart items.
If unsuccessful this function should not have those side effects but should try to figure out why and
return a helpful-enough error message in error_html.
"""
return module.process_postpay_callback(*args, **kwargs)
|
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork
version_added: "2.1"
short_description: Manage Azure virtual networks.
description:
- Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges
and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.
options:
resource_group:
description:
- name of resource group.
required: true
address_prefixes_cidr:
description:
- List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating
a new virtual network or using purge_address_prefixes.
aliases:
- address_prefixes
default: null
required: false
dns_servers:
description:
- Custom list of DNS servers. Maximum length of two. The first server in the list will be treated
as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the
specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to
default Azure servers.
default: null
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- name of the virtual network.
required: true
purge_address_prefixes:
description:
- Use with state present to remove any existing address_prefixes.
default: false
purge_dns_servers:
description:
- Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually
exclusive with dns_servers.
default: false
required: false
state:
description:
- Assert the state of the virtual network. Use 'present' to create or update and
'absent' to delete.
default: present
choices:
- absent
- present
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a virtual network
azure_rm_virtualnetwork:
name: foobar
resource_group: Testing
address_prefixes_cidr:
- "10.1.0.0/16"
- "172.100.0.0/16"
dns_servers:
- "127.0.0.1"
- "127.0.0.2"
tags:
testing: testing
delete: on-exit
- name: Delete a virtual network
azure_rm_virtualnetwork:
name: foobar
resource_group: Testing
state: absent
'''
RETURN = '''
state:
description: Current state of the virtual network.
returned: always
type: dict
sample: {
"address_prefixes": [
"10.1.0.0/16",
"172.100.0.0/16"
],
"dns_servers": [
"127.0.0.1",
"127.0.0.3"
],
"etag": 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network",
"location": "eastus",
"name": "my_test_network",
"provisioning_state": "Succeeded",
"tags": null,
"type": "Microsoft.Network/virtualNetworks"
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions
except ImportError:
# This is handled in azure_rm_common
pass
def virtual_network_to_dict(vnet):
'''
Convert a virtual network object to a dict.
:param vnet: VirtualNet object
:return: dict
'''
results = dict(
id=vnet.id,
name=vnet.name,
location=vnet.location,
type=vnet.type,
tags=vnet.tags,
provisioning_state=vnet.provisioning_state,
etag=vnet.etag
)
if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
results['dns_servers'] = []
for server in vnet.dhcp_options.dns_servers:
results['dns_servers'].append(server)
if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:
results['address_prefixes'] = []
for space in vnet.address_space.address_prefixes:
results['address_prefixes'].append(space)
return results
class AzureRMVirtualNetwork(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),
dns_servers=dict(type='list',),
purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),
purge_dns_servers=dict(type='bool', default=False),
)
mutually_exclusive = [
('dns_servers', 'purge_dns_servers')
]
required_if = [
('purge_address_prefixes', True, ['address_prefixes_cidr'])
]
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.address_prefixes_cidr = None
self.purge_address_prefixes = None
self.dns_servers = None
self.purge_dns_servers = None
self.results=dict(
changed=False,
state=dict()
)
super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
self.results['check_mode'] = self.check_mode
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present' and self.purge_address_prefixes:
for prefix in self.address_prefixes_cidr:
if not CIDR_PATTERN.match(prefix):
self.fail("Parameter error: invalid address prefix value {0}".format(prefix))
if self.dns_servers and len(self.dns_servers) > 2:
self.fail("Parameter error: You can provide a maximum of 2 DNS servers.")
changed = False
results = dict()
try:
self.log('Fetching vnet {0}'.format(self.name))
vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)
results = virtual_network_to_dict(vnet)
self.log('Vnet exists {0}'.format(self.name))
self.log(results, pretty_print=True)
self.check_provisioning_state(vnet, self.state)
if self.state == 'present':
if self.address_prefixes_cidr:
existing_address_prefix_set = set(vnet.address_space.address_prefixes)
requested_address_prefix_set = set(self.address_prefixes_cidr)
missing_prefixes = requested_address_prefix_set - existing_address_prefix_set
extra_prefixes = existing_address_prefix_set - requested_address_prefix_set
if len(missing_prefixes) > 0:
self.log('CHANGED: there are missing address_prefixes')
changed = True
if not self.purge_address_prefixes:
# add the missing prefixes
for prefix in missing_prefixes:
results['address_prefixes'].append(prefix)
if len(extra_prefixes) > 0 and self.purge_address_prefixes:
self.log('CHANGED: there are address_prefixes to purge')
changed = True
# replace existing address prefixes with requested set
results['address_prefixes'] = self.address_prefixes_cidr
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
if self.dns_servers:
existing_dns_set = set(vnet.dhcp_options.dns_servers)
requested_dns_set = set(self.dns_servers)
if existing_dns_set != requested_dns_set:
self.log('CHANGED: replacing DNS servers')
changed = True
results['dns_servers'] = self.dns_servers
if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
self.log('CHANGED: purging existing DNS servers')
changed = True
results['dns_servers'] = []
elif self.state == 'absent':
self.log("CHANGED: vnet exists but requested state is 'absent'")
changed = True
except CloudError:
self.log('Vnet {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name))
changed = True
self.results['changed'] = changed
self.results['state'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not results:
# create a new virtual network
self.log("Create virtual network {0}".format(self.name))
if not self.address_prefixes_cidr:
self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')
vnet = VirtualNetwork(
location=self.location,
address_space=AddressSpace(
address_prefixes=self.address_prefixes_cidr
)
)
if self.dns_servers:
vnet.dhcp_options = DhcpOptions(
dns_servers=self.dns_servers
)
if self.tags:
vnet.tags = self.tags
self.results['state'] = self.create_or_update_vnet(vnet)
else:
# update existing virtual network
self.log("Update virtual network {0}".format(self.name))
vnet = VirtualNetwork(
location=results['location'],
address_space=AddressSpace(
address_prefixes=results['address_prefixes']
),
tags=results['tags']
)
if results.get('dns_servers'):
vnet.dhcp_options = DhcpOptions(
dns_servers=results['dns_servers']
)
self.results['state'] = self.create_or_update_vnet(vnet)
elif self.state == 'absent':
self.delete_virtual_network()
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update_vnet(self, vnet):
try:
poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)
new_vnet = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc)))
return virtual_network_to_dict(new_vnet)
def delete_virtual_network(self):
try:
poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMVirtualNetwork()
if __name__ == '__main__':
main()
|
"""
Twisted Python Roots: an abstract hierarchy representation for Twisted.
Maintainer: Glyph Lefkowitz
"""
import types
from twisted.python import reflect
class NotSupportedError(NotImplementedError):
"""
An exception meaning that the tree-manipulation operation
you're attempting to perform is not supported.
"""
class Request:
"""I am an abstract representation of a request for an entity.
I also function as the response. The request is responded to by calling
self.write(data) until there is no data left and then calling
self.finish().
"""
# This attribute should be set to the string name of the protocol being
# responded to (e.g. HTTP or FTP)
wireProtocol = None
def write(self, data):
"""Add some data to the response to this request.
"""
raise NotImplementedError("%s.write" % reflect.qual(self.__class__))
def finish(self):
"""The response to this request is finished; flush all data to the network stream.
"""
raise NotImplementedError("%s.finish" % reflect.qual(self.__class__))
class Entity:
"""I am a terminal object in a hierarchy, with no children.
I represent a null interface; certain non-instance objects (strings and
integers, notably) are Entities.
Methods on this class are suggested to be implemented, but are not
required, and will be emulated on a per-protocol basis for types which do
not handle them.
"""
def render(self, request):
"""
I produce a stream of bytes for the request, by calling request.write()
and request.finish().
"""
raise NotImplementedError("%s.render" % reflect.qual(self.__class__))
class Collection:
"""I represent a static collection of entities.
I contain methods designed to represent collections that can be dynamically
created.
"""
def __init__(self, entities=None):
"""Initialize me.
"""
if entities is not None:
self.entities = entities
else:
self.entities = {}
def getStaticEntity(self, name):
"""Get an entity that was added to me using putEntity.
This method will return 'None' if it fails.
"""
return self.entities.get(name)
def getDynamicEntity(self, name, request):
"""Subclass this to generate an entity on demand.
This method should return 'None' if it fails.
"""
def getEntity(self, name, request):
"""Retrieve an entity from me.
I will first attempt to retrieve an entity statically; static entities
will obscure dynamic ones. If that fails, I will retrieve the entity
dynamically.
If I cannot retrieve an entity, I will return 'None'.
"""
ent = self.getStaticEntity(name)
if ent is not None:
return ent
ent = self.getDynamicEntity(name, request)
if ent is not None:
return ent
return None
def putEntity(self, name, entity):
"""Store a static reference on 'name' for 'entity'.
Raises a KeyError if the operation fails.
"""
self.entities[name] = entity
def delEntity(self, name):
"""Remove a static reference for 'name'.
Raises a KeyError if the operation fails.
"""
del self.entities[name]
def storeEntity(self, name, request):
"""Store an entity for 'name', based on the content of 'request'.
"""
raise NotSupportedError("%s.storeEntity" % reflect.qual(self.__class__))
def removeEntity(self, name, request):
"""Remove an entity for 'name', based on the content of 'request'.
"""
raise NotSupportedError("%s.removeEntity" % reflect.qual(self.__class__))
def listStaticEntities(self):
"""Retrieve a list of all name, entity pairs that I store references to.
See getStaticEntity.
"""
return self.entities.items()
def listDynamicEntities(self, request):
"""A list of all name, entity that I can generate on demand.
See getDynamicEntity.
"""
return []
def listEntities(self, request):
"""Retrieve a list of all name, entity pairs I contain.
See getEntity.
"""
return self.listStaticEntities() + self.listDynamicEntities(request)
def listStaticNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getStaticEntity.
"""
return self.entities.keys()
def listDynamicNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getDynamicEntity.
"""
return []
def listNames(self, request):
"""Retrieve a list of all names for entities that I contain.
See getEntity.
"""
return self.listStaticNames()
class ConstraintViolation(Exception):
"""An exception raised when a constraint is violated.
"""
class Constrained(Collection):
"""A collection that has constraints on its names and/or entities."""
def nameConstraint(self, name):
"""A method that determines whether an entity may be added to me with a given name.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def entityConstraint(self, entity):
"""A method that determines whether an entity may be added to me.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def reallyPutEntity(self, name, entity):
Collection.putEntity(self, name, entity)
def putEntity(self, name, entity):
"""Store an entity if it meets both constraints.
Otherwise raise a ConstraintViolation.
"""
if self.nameConstraint(name):
if self.entityConstraint(entity):
self.reallyPutEntity(name, entity)
else:
raise ConstraintViolation("Entity constraint violated.")
else:
raise ConstraintViolation("Name constraint violated.")
class Locked(Constrained):
"""A collection that can be locked from adding entities."""
locked = 0
def lock(self):
self.locked = 1
def entityConstraint(self, entity):
return not self.locked
class Homogenous(Constrained):
"""A homogenous collection of entities.
I will only contain entities that are an instance of the class or type
specified by my 'entityType' attribute.
"""
entityType = types.InstanceType
def entityConstraint(self, entity):
if isinstance(entity, self.entityType):
return 1
else:
raise ConstraintViolation("%s of incorrect type (%s)" %
(entity, self.entityType))
def getNameType(self):
return "Name"
def getEntityType(self):
return self.entityType.__name__
|
from __future__ import absolute_import
import logging
__all__ = [
'ExtraLogFormatter'
]
class ExtraLogFormatter(logging.Formatter):
"""
Custom log formatter which attaches all the attributes from the "extra"
dictionary which start with an underscore to the end of the log message.
For example:
extra={'_id': 'user-1', '_path': '/foo/bar'}
"""
def format(self, record):
custom_attributes = dict([(k, v) for k, v in record.__dict__.items()
if k.startswith('_')])
custom_attributes = self._dict_to_str(custom_attributes)
msg = logging.Formatter.format(self, record)
msg = '%s (%s)' % (msg, custom_attributes)
return msg
def _dict_to_str(self, dictionary):
result = ['%s=%s' % (k[1:], str(v)) for k, v in dictionary.items()]
result = ','.join(result)
return result
|
"""
webapp2_extras.appengine.sessions_memcache
==========================================
Extended sessions stored in memcache.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
from google.appengine.api import memcache
from webapp2_extras import sessions
class MemcacheSessionFactory(sessions.CustomBackendSessionFactory):
"""A session factory that stores data serialized in memcache.
To use memcache sessions, pass this class as the `factory` keyword to
:meth:`webapp2_extras.sessions.SessionStore.get_session`::
from webapp2_extras import sessions_memcache
# [...]
session = self.session_store.get_session(
name='mc_session',
factory=sessions_memcache.MemcacheSessionFactory)
See in :meth:`webapp2_extras.sessions.SessionStore` an example of how to
make sessions available in a :class:`webapp2.RequestHandler`.
"""
def _get_by_sid(self, sid):
"""Returns a session given a session id."""
if self._is_valid_sid(sid):
data = memcache.get(sid)
if data is not None:
self.sid = sid
return sessions.SessionDict(self, data=data)
self.sid = self._get_new_sid()
return sessions.SessionDict(self, new=True)
def save_session(self, response):
if self.session is None or not self.session.modified:
return
memcache.set(self.sid, dict(self.session))
self.session_store.save_secure_cookie(
response, self.name, {'_sid': self.sid}, **self.session_args)
|
import re
import sys
import string
from optparse import OptionParser
asflicense='''
<!---
-->
'''
def docstrip(key,string):
string=re.sub("^## @%s " % key ,"",string)
string=string.lstrip()
string=string.rstrip()
return string
def toc(list):
tocout=[]
header=()
for i in list:
if header != i.getinter():
header=i.getinter()
line=" * %s\n" % (i.headerbuild())
tocout.append(line)
line=" * [%s](#%s)\n" % (i.getname().replace("_","\_"),i.getname())
tocout.append(line)
return tocout
class ShellFunction:
def __init__(self):
self.reset()
def __cmp__(self,other):
if (self.audience == other.audience):
if (self.stability == other.stability):
if (self.replaceb == other.replaceb):
return(cmp(self.name,other.name))
else:
if (self.replaceb == "Yes"):
return -1
else:
return 1
else:
if (self.stability == "Stable"):
return -1
else:
return 1
else:
if (self.audience == "Public"):
return -1
else:
return 1
def reset(self):
self.name=None
self.audience=None
self.stability=None
self.replaceb=None
self.returnt=None
self.desc=None
self.params=None
def setname(self,text):
definition=text.split();
self.name=definition[1]
def getname(self):
if (self.name is None):
return "None"
else:
return self.name
def setaudience(self,text):
self.audience=docstrip("audience",text)
self.audience=self.audience.capitalize()
def getaudience(self):
if (self.audience is None):
return "None"
else:
return self.audience
def setstability(self,text):
self.stability=docstrip("stability",text)
self.stability=self.stability.capitalize()
def getstability(self):
if (self.stability is None):
return "None"
else:
return self.stability
def setreplace(self,text):
self.replaceb=docstrip("replaceable",text)
self.replaceb=self.replaceb.capitalize()
def getreplace(self):
if (self.replaceb is None):
return "None"
else:
return self.replaceb
def getinter(self):
return( (self.getaudience(), self.getstability(), self.getreplace()))
def addreturn(self,text):
if (self.returnt is None):
self.returnt = []
self.returnt.append(docstrip("return",text))
def getreturn(self):
if (self.returnt is None):
return "Nothing"
else:
return "\n\n".join(self.returnt)
def adddesc(self,text):
if (self.desc is None):
self.desc = []
self.desc.append(docstrip("description",text))
def getdesc(self):
if (self.desc is None):
return "None"
else:
return " ".join(self.desc)
def addparam(self,text):
if (self.params is None):
self.params = []
self.params.append(docstrip("param",text))
def getparams(self):
if (self.params is None):
return ""
else:
return " ".join(self.params)
def getusage(self):
line="%s %s" % (self.name, self.getparams())
return line
def headerbuild(self):
if self.getreplace() == "Yes":
replacetext="Replaceable"
else:
replacetext="Not Replaceable"
line="%s/%s/%s" % (self.getaudience(), self.getstability(), replacetext)
return(line)
def getdocpage(self):
line="### `%s`\n\n"\
"* Synopsis\n\n"\
"```\n%s\n"\
"```\n\n" \
"* Description\n\n" \
"%s\n\n" \
"* Returns\n\n" \
"%s\n\n" \
"| Classification | Level |\n" \
"| :--- | :--- |\n" \
"| Audience | %s |\n" \
"| Stability | %s |\n" \
"| Replaceable | %s |\n\n" \
% (self.getname(),
self.getusage(),
self.getdesc(),
self.getreturn(),
self.getaudience(),
self.getstability(),
self.getreplace())
return line
def __str__(self):
line="{%s %s %s %s}" \
% (self.getname(),
self.getaudience(),
self.getstability(),
self.getreplace())
return line
def main():
parser=OptionParser(usage="usage: %prog --skipprnorep --output OUTFILE --input INFILE [--input INFILE ...]")
parser.add_option("-o","--output", dest="outfile",
action="store", type="string",
help="file to create", metavar="OUTFILE")
parser.add_option("-i","--input", dest="infile",
action="append", type="string",
help="file to read", metavar="INFILE")
parser.add_option("--skipprnorep", dest="skipprnorep",
action="store_true", help="Skip Private & Not Replaceable")
(options, args)=parser.parse_args()
allfuncs=[]
for filename in options.infile:
with open(filename,"r") as shellcode:
funcdef=ShellFunction()
for line in shellcode:
if line.startswith('## @description'):
funcdef.adddesc(line)
elif line.startswith('## @audience'):
funcdef.setaudience(line)
elif line.startswith('## @stability'):
funcdef.setstability(line)
elif line.startswith('## @replaceable'):
funcdef.setreplace(line)
elif line.startswith('## @param'):
funcdef.addparam(line)
elif line.startswith('## @return'):
funcdef.addreturn(line)
elif line.startswith('function'):
funcdef.setname(line)
if options.skipprnorep and \
funcdef.getaudience() == "Private" and \
funcdef.getreplace() == "No":
pass
else:
allfuncs.append(funcdef)
funcdef=ShellFunction()
allfuncs=sorted(allfuncs)
outfile=open(options.outfile, "w")
outfile.write(asflicense)
for line in toc(allfuncs):
outfile.write(line)
outfile.write("\n------\n\n")
header=[]
for funcs in allfuncs:
if header != funcs.getinter():
header=funcs.getinter()
line="## %s\n" % (funcs.headerbuild())
outfile.write(line)
outfile.write(funcs.getdocpage())
outfile.close()
if __name__ == "__main__":
main()
|
import sys
import shutil
import os
import stat
import re
import posixpath
import zipfile
import tarfile
import subprocess
import textwrap
from pip.exceptions import InstallationError, BadCommand, PipError
from pip.backwardcompat import(WindowsError, string_types, raw_input,
console_to_str, user_site, PermissionError)
from pip.locations import site_packages, running_under_virtualenv, virtualenv_no_global
from pip.log import logger
from pip._vendor import pkg_resources
from pip._vendor.distlib import version
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'create_download_cache_folder',
'cache_download', 'unpack_file', 'call_subprocess']
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
exctype, value = exc_info[:2]
if not ((exctype is WindowsError and value.args[0] == 5) or #others
(exctype is OSError and value.args[0] == 13) or #python2.4
(exctype is PermissionError and value.args[3] == 5) #python3.3
):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd)
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message)
response = raw_input(message)
response = response.strip().lower()
if response not in options:
print('Your response (%r) was not one of the expected responses: %s' % (
response, ', '.join(options)))
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() #this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000*1000:
return '%.1fMB' % (bytes/1000.0/1000)
elif bytes > 10*1000:
return '%ikB' % (bytes/1000)
elif bytes > 1000:
return '%.1fkB' % (bytes/1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
fp = open(filename, 'rb')
try:
return fp.read().decode('utf-8')
finally:
fp.close()
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..']*len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(os.path.expanduser(path)))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
if user_site:
return normalize_path(dist_location(dist)).startswith(normalize_path(user_site))
else:
return False
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in distutils.sysconfig.get_python_lib().
"""
return normalize_path(dist_location(dist)).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
#TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=('setuptools', 'pip', 'python', 'distribute'),
include_editables=True,
editables_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
if include_editables:
editable_test = lambda d: True
else:
editable_test = lambda d: not dist_is_editable(d)
if editables_only:
editables_only_test = lambda d: dist_is_editable(d)
else:
editables_only_test = lambda d: True
return [d for d in pkg_resources.working_set
if local_test(d)
and d.key not in skip
and editable_test(d)
and editables_only_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE (don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2 locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
if cr == (0, 0):
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world (chmod +x)
# no-op on windows per python docs
os.chmod(fn, (0o777-current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warn('Cannot determine compression type for file %s' % filename)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesnt seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
elif member.issym():
try:
tar._extract_member(member, path)
except:
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError):
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777-current_umask() | 0o111))
finally:
tar.close()
def create_download_cache_folder(folder):
logger.indent -= 2
logger.notify('Creating supposed download cache at %s' % folder)
logger.indent += 2
os.makedirs(folder)
def cache_download(target_file, temp_location, content_type):
logger.notify('Storing download in cache at %s' % display_path(target_file))
shutil.copyfile(temp_location, target_file)
fp = open(target_file+'.content-type', 'w')
fp.write(content_type)
fp.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.pybundle')
or filename.endswith('.whl')
or zipfile.is_zipfile(filename)):
unzip_file(filename, location, flatten=not filename.endswith(('.pybundle', '.whl')))
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
## FIXME: handle?
## FIXME: magic signatures?
logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format'
% (filename, location, content_type))
raise InstallationError('Cannot determine archive format of %s' % location)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=logger.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s" % command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, command_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
while 1:
line = console_to_str(stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % command_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise InstallationError(
"Command %s failed with error code %s in %s"
% (command_desc, proc.returncode, cwd))
else:
logger.warn(
"Command %s had error code %s in %s"
% (command_desc, proc.returncode, cwd))
if stdout is not None:
return ''.join(all_output)
def is_prerelease(vers):
"""
Attempt to determine if this is a pre-release using PEP386/PEP426 rules.
Will return True if it is a pre-release and False if not. Versions are
assumed to be a pre-release if they cannot be parsed.
"""
normalized = version._suggest_normalized_version(vers)
if normalized is None:
# Cannot normalize, assume it is a pre-release
return True
parsed = version._normalized_key(normalized)
return any([any([y in set(["a", "b", "c", "rc", "dev"]) for y in x]) for x in parsed])
|
'''expand keywords in tracked files
This extension expands RCS/CVS-like or self-customized $Keywords$ in
tracked text files selected by your configuration.
Keywords are only expanded in local repositories and not stored in the
change history. The mechanism can be regarded as a convenience for the
current user or for archive distribution.
Keywords expand to the changeset data pertaining to the latest change
relative to the working directory parent of each file.
Configuration is done in the [keyword], [keywordset] and [keywordmaps]
sections of hgrc files.
Example::
[keyword]
# expand keywords in every python file except those matching "x*"
**.py =
x* = ignore
[keywordset]
# prefer svn- over cvs-like default keywordmaps
svn = True
.. note::
The more specific you are in your filename patterns the less you
lose speed in huge repositories.
For [keywordmaps] template mapping and expansion demonstration and
control run :hg:`kwdemo`. See :hg:`help templates` for a list of
available templates and filters.
Three additional date template filters are provided:
:``utcdate``: "2006/09/18 15:13:13"
:``svnutcdate``: "2006-09-18 15:13:13Z"
:``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
The default template mappings (view with :hg:`kwdemo -d`) can be
replaced with customized keywords and templates. Again, run
:hg:`kwdemo` to control the results of your configuration changes.
Before changing/disabling active keywords, you must run :hg:`kwshrink`
to avoid storing expanded keywords in the change history.
To force expansion after enabling it, or a configuration change, run
:hg:`kwexpand`.
Expansions spanning more than one line and incremental expansions,
like CVS' $Log$, are not supported. A keyword template map "Log =
{desc}" expands to the first line of the changeset description.
'''
from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
from mercurial import localrepo, match, patch, templatefilters, templater, util
from mercurial import scmutil
from mercurial.hgweb import webcommands
from mercurial.i18n import _
import os, re, shutil, tempfile
commands.optionalrepo += ' kwdemo'
commands.inferrepo += ' kwexpand kwfiles kwshrink'
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
nokwcommands = ('add addremove annotate bundle export grep incoming init log'
' outgoing push tip verify convert email glog')
restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
recordextensions = 'record'
colortable = {
'kwfiles.enabled': 'green bold',
'kwfiles.deleted': 'cyan bold underline',
'kwfiles.enabledunknown': 'green',
'kwfiles.ignored': 'bold',
'kwfiles.ignoredunknown': 'none'
}
def utcdate(text):
''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
'''
return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
def svnisodate(text):
''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
+0200 (Tue, 18 Aug 2009)".
'''
return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
def svnutcdate(text):
''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
11:00:13Z".
'''
return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
templatefilters.filters.update({'utcdate': utcdate,
'svnisodate': svnisodate,
'svnutcdate': svnutcdate})
kwtools = {'templater': None, 'hgcmd': ''}
def _defaultkwmaps(ui):
'''Returns default keywordmaps according to keywordset configuration.'''
templates = {
'Revision': '{node|short}',
'Author': '{author|user}',
}
kwsets = ({
'Date': '{date|utcdate}',
'RCSfile': '{file|basename},v',
'RCSFile': '{file|basename},v', # kept for backwards compatibility
# with hg-keyword
'Source': '{root}/{file},v',
'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
}, {
'Date': '{date|svnisodate}',
'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
'LastChangedRevision': '{node|short}',
'LastChangedBy': '{author|user}',
'LastChangedDate': '{date|svnisodate}',
})
templates.update(kwsets[ui.configbool('keywordset', 'svn')])
return templates
def _shrinktext(text, subfunc):
'''Helper for keyword expansion removal in text.
Depending on subfunc also returns number of substitutions.'''
return subfunc(r'$\1$', text)
def _preselect(wstatus, changed):
'''Retrieves modified and added files from a working directory state
and returns the subset of each contained in given changed files
retrieved from a change context.'''
modified, added = wstatus[:2]
modified = [f for f in modified if f in changed]
added = [f for f in added if f in changed]
return modified, added
class kwtemplater(object):
'''
Sets up keyword templates, corresponding keyword regex, and
provides keyword substitution functions.
'''
def __init__(self, ui, repo, inc, exc):
self.ui = ui
self.repo = repo
self.match = match.match(repo.root, '', [], inc, exc)
self.restrict = kwtools['hgcmd'] in restricted.split()
self.postcommit = False
kwmaps = self.ui.configitems('keywordmaps')
if kwmaps: # override default templates
self.templates = dict((k, templater.parsestring(v, False))
for k, v in kwmaps)
else:
self.templates = _defaultkwmaps(self.ui)
@util.propertycache
def escape(self):
'''Returns bar-separated and escaped keywords.'''
return '|'.join(map(re.escape, self.templates.keys()))
@util.propertycache
def rekw(self):
'''Returns regex for unexpanded keywords.'''
return re.compile(r'\$(%s)\$' % self.escape)
@util.propertycache
def rekwexp(self):
'''Returns regex for expanded keywords.'''
return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
def substitute(self, data, path, ctx, subfunc):
'''Replaces keywords in data with expanded template.'''
def kwsub(mobj):
kw = mobj.group(1)
ct = cmdutil.changeset_templater(self.ui, self.repo,
False, None, '', False)
ct.use_template(self.templates[kw])
self.ui.pushbuffer()
ct.show(ctx, root=self.repo.root, file=path)
ekw = templatefilters.firstline(self.ui.popbuffer())
return '$%s: %s $' % (kw, ekw)
return subfunc(kwsub, data)
def linkctx(self, path, fileid):
'''Similar to filelog.linkrev, but returns a changectx.'''
return self.repo.filectx(path, fileid=fileid).changectx()
def expand(self, path, node, data):
'''Returns data with keywords expanded.'''
if not self.restrict and self.match(path) and not util.binary(data):
ctx = self.linkctx(path, node)
return self.substitute(data, path, ctx, self.rekw.sub)
return data
def iskwfile(self, cand, ctx):
'''Returns subset of candidates which are configured for keyword
expansion but are not symbolic links.'''
return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
'''Overwrites selected files expanding/shrinking keywords.'''
if self.restrict or lookup or self.postcommit: # exclude kw_copy
candidates = self.iskwfile(candidates, ctx)
if not candidates:
return
kwcmd = self.restrict and lookup # kwexpand/kwshrink
if self.restrict or expand and lookup:
mf = ctx.manifest()
if self.restrict or rekw:
re_kw = self.rekw
else:
re_kw = self.rekwexp
if expand:
msg = _('overwriting %s expanding keywords\n')
else:
msg = _('overwriting %s shrinking keywords\n')
for f in candidates:
if self.restrict:
data = self.repo.file(f).read(mf[f])
else:
data = self.repo.wread(f)
if util.binary(data):
continue
if expand:
if lookup:
ctx = self.linkctx(f, mf[f])
data, found = self.substitute(data, f, ctx, re_kw.subn)
elif self.restrict:
found = re_kw.search(data)
else:
data, found = _shrinktext(data, re_kw.subn)
if found:
self.ui.note(msg % f)
fp = self.repo.wopener(f, "wb", atomictemp=True)
fp.write(data)
fp.close()
if kwcmd:
self.repo.dirstate.normal(f)
elif self.postcommit:
self.repo.dirstate.normallookup(f)
def shrink(self, fname, text):
'''Returns text with all keyword substitutions removed.'''
if self.match(fname) and not util.binary(text):
return _shrinktext(text, self.rekwexp.sub)
return text
def shrinklines(self, fname, lines):
'''Returns lines with keyword substitutions removed.'''
if self.match(fname):
text = ''.join(lines)
if not util.binary(text):
return _shrinktext(text, self.rekwexp.sub).splitlines(True)
return lines
def wread(self, fname, data):
'''If in restricted mode returns data read from wdir with
keyword substitutions removed.'''
if self.restrict:
return self.shrink(fname, data)
return data
class kwfilelog(filelog.filelog):
'''
Subclass of filelog to hook into its read, add, cmp methods.
Keywords are "stored" unexpanded, and processed on reading.
'''
def __init__(self, opener, kwt, path):
super(kwfilelog, self).__init__(opener, path)
self.kwt = kwt
self.path = path
def read(self, node):
'''Expands keywords when reading filelog.'''
data = super(kwfilelog, self).read(node)
if self.renamed(node):
return data
return self.kwt.expand(self.path, node, data)
def add(self, text, meta, tr, link, p1=None, p2=None):
'''Removes keyword substitutions when adding to filelog.'''
text = self.kwt.shrink(self.path, text)
return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
def cmp(self, node, text):
'''Removes keyword substitutions for comparison.'''
text = self.kwt.shrink(self.path, text)
return super(kwfilelog, self).cmp(node, text)
def _status(ui, repo, wctx, kwt, *pats, **opts):
'''Bails out if [keyword] configuration is not active.
Returns status of working directory.'''
if kwt:
return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
unknown=opts.get('unknown') or opts.get('all'))
if ui.configitems('keyword'):
raise util.Abort(_('[keyword] patterns cannot match'))
raise util.Abort(_('no [keyword] patterns configured'))
def _kwfwrite(ui, repo, expand, *pats, **opts):
'''Selects files and passes them to kwtemplater.overwrite.'''
wctx = repo[None]
if len(wctx.parents()) > 1:
raise util.Abort(_('outstanding uncommitted merge'))
kwt = kwtools['templater']
wlock = repo.wlock()
try:
status = _status(ui, repo, wctx, kwt, *pats, **opts)
modified, added, removed, deleted, unknown, ignored, clean = status
if modified or added or removed or deleted:
raise util.Abort(_('outstanding uncommitted changes'))
kwt.overwrite(wctx, clean, True, expand)
finally:
wlock.release()
@command('kwdemo',
[('d', 'default', None, _('show default keyword template maps')),
('f', 'rcfile', '',
_('read maps from rcfile'), _('FILE'))],
_('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
def demo(ui, repo, *args, **opts):
'''print [keywordmaps] configuration and an expansion example
Show current, custom, or default keyword template maps and their
expansions.
Extend the current configuration by specifying maps as arguments
and using -f/--rcfile to source an external hgrc file.
Use -d/--default to disable current configuration.
See :hg:`help templates` for information on templates and filters.
'''
def demoitems(section, items):
ui.write('[%s]\n' % section)
for k, v in sorted(items):
ui.write('%s = %s\n' % (k, v))
fn = 'demo.txt'
tmpdir = tempfile.mkdtemp('', 'kwdemo.')
ui.note(_('creating temporary repository at %s\n') % tmpdir)
repo = localrepo.localrepository(repo.baseui, tmpdir, True)
ui.setconfig('keyword', fn, '')
svn = ui.configbool('keywordset', 'svn')
# explicitly set keywordset for demo output
ui.setconfig('keywordset', 'svn', svn)
uikwmaps = ui.configitems('keywordmaps')
if args or opts.get('rcfile'):
ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
if uikwmaps:
ui.status(_('\textending current template maps\n'))
if opts.get('default') or not uikwmaps:
if svn:
ui.status(_('\toverriding default svn keywordset\n'))
else:
ui.status(_('\toverriding default cvs keywordset\n'))
if opts.get('rcfile'):
ui.readconfig(opts.get('rcfile'))
if args:
# simulate hgrc parsing
rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
fp = repo.opener('hgrc', 'w')
fp.writelines(rcmaps)
fp.close()
ui.readconfig(repo.join('hgrc'))
kwmaps = dict(ui.configitems('keywordmaps'))
elif opts.get('default'):
if svn:
ui.status(_('\n\tconfiguration using default svn keywordset\n'))
else:
ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
kwmaps = _defaultkwmaps(ui)
if uikwmaps:
ui.status(_('\tdisabling current template maps\n'))
for k, v in kwmaps.iteritems():
ui.setconfig('keywordmaps', k, v)
else:
ui.status(_('\n\tconfiguration using current keyword template maps\n'))
if uikwmaps:
kwmaps = dict(uikwmaps)
else:
kwmaps = _defaultkwmaps(ui)
uisetup(ui)
reposetup(ui, repo)
ui.write('[extensions]\nkeyword =\n')
demoitems('keyword', ui.configitems('keyword'))
demoitems('keywordset', ui.configitems('keywordset'))
demoitems('keywordmaps', kwmaps.iteritems())
keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
repo.wopener.write(fn, keywords)
repo[None].add([fn])
ui.note(_('\nkeywords written to %s:\n') % fn)
ui.note(keywords)
repo.dirstate.setbranch('demobranch')
for name, cmd in ui.configitems('hooks'):
if name.split('.', 1)[0].find('commit') > -1:
repo.ui.setconfig('hooks', name, '')
msg = _('hg keyword configuration and expansion example')
ui.note("hg ci -m '%s'\n" % msg) # check-code-ignore
repo.commit(text=msg)
ui.status(_('\n\tkeywords expanded\n'))
ui.write(repo.wread(fn))
shutil.rmtree(tmpdir, ignore_errors=True)
@command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
def expand(ui, repo, *pats, **opts):
'''expand keywords in the working directory
Run after (re)enabling keyword expansion.
kwexpand refuses to run if given files contain local changes.
'''
# 3rd argument sets expansion to True
_kwfwrite(ui, repo, True, *pats, **opts)
@command('kwfiles',
[('A', 'all', None, _('show keyword status flags of all files')),
('i', 'ignore', None, _('show files excluded from expansion')),
('u', 'unknown', None, _('only show unknown (not tracked) files')),
] + commands.walkopts,
_('hg kwfiles [OPTION]... [FILE]...'))
def files(ui, repo, *pats, **opts):
'''show files configured for keyword expansion
List which files in the working directory are matched by the
[keyword] configuration patterns.
Useful to prevent inadvertent keyword expansion and to speed up
execution by including only files that are actual candidates for
expansion.
See :hg:`help keyword` on how to construct patterns both for
inclusion and exclusion of files.
With -A/--all and -v/--verbose the codes used to show the status
of files are::
K = keyword expansion candidate
k = keyword expansion candidate (not tracked)
I = ignored
i = ignored (not tracked)
'''
kwt = kwtools['templater']
wctx = repo[None]
status = _status(ui, repo, wctx, kwt, *pats, **opts)
cwd = pats and repo.getcwd() or ''
modified, added, removed, deleted, unknown, ignored, clean = status
files = []
if not opts.get('unknown') or opts.get('all'):
files = sorted(modified + added + clean)
kwfiles = kwt.iskwfile(files, wctx)
kwdeleted = kwt.iskwfile(deleted, wctx)
kwunknown = kwt.iskwfile(unknown, wctx)
if not opts.get('ignore') or opts.get('all'):
showfiles = kwfiles, kwdeleted, kwunknown
else:
showfiles = [], [], []
if opts.get('all') or opts.get('ignore'):
showfiles += ([f for f in files if f not in kwfiles],
[f for f in unknown if f not in kwunknown])
kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
kwstates = zip(kwlabels, 'K!kIi', showfiles)
fm = ui.formatter('kwfiles', opts)
fmt = '%.0s%s\n'
if opts.get('all') or ui.verbose:
fmt = '%s %s\n'
for kwstate, char, filenames in kwstates:
label = 'kwfiles.' + kwstate
for f in filenames:
fm.startitem()
fm.write('kwstatus path', fmt, char,
repo.pathto(f, cwd), label=label)
fm.end()
@command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
def shrink(ui, repo, *pats, **opts):
'''revert expanded keywords in the working directory
Must be run before changing/disabling active keywords.
kwshrink refuses to run if given files contain local changes.
'''
# 3rd argument sets expansion to False
_kwfwrite(ui, repo, False, *pats, **opts)
def uisetup(ui):
''' Monkeypatches dispatch._parse to retrieve user command.'''
def kwdispatch_parse(orig, ui, args):
'''Monkeypatch dispatch._parse to obtain running hg command.'''
cmd, func, args, options, cmdoptions = orig(ui, args)
kwtools['hgcmd'] = cmd
return cmd, func, args, options, cmdoptions
extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
def reposetup(ui, repo):
'''Sets up repo as kwrepo for keyword substitution.
Overrides file method to return kwfilelog instead of filelog
if file matches user configuration.
Wraps commit to overwrite configured files with updated
keyword substitutions.
Monkeypatches patch and webcommands.'''
try:
if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
or '.hg' in util.splitpath(repo.root)
or repo._url.startswith('bundle:')):
return
except AttributeError:
pass
inc, exc = [], ['.hg*']
for pat, opt in ui.configitems('keyword'):
if opt != 'ignore':
inc.append(pat)
else:
exc.append(pat)
if not inc:
return
kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
class kwrepo(repo.__class__):
def file(self, f):
if f[0] == '/':
f = f[1:]
return kwfilelog(self.sopener, kwt, f)
def wread(self, filename):
data = super(kwrepo, self).wread(filename)
return kwt.wread(filename, data)
def commit(self, *args, **opts):
# use custom commitctx for user commands
# other extensions can still wrap repo.commitctx directly
self.commitctx = self.kwcommitctx
try:
return super(kwrepo, self).commit(*args, **opts)
finally:
del self.commitctx
def kwcommitctx(self, ctx, error=False):
n = super(kwrepo, self).commitctx(ctx, error)
# no lock needed, only called from repo.commit() which already locks
if not kwt.postcommit:
restrict = kwt.restrict
kwt.restrict = True
kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
False, True)
kwt.restrict = restrict
return n
def rollback(self, dryrun=False, force=False):
wlock = self.wlock()
try:
if not dryrun:
changed = self['.'].files()
ret = super(kwrepo, self).rollback(dryrun, force)
if not dryrun:
ctx = self['.']
modified, added = _preselect(self[None].status(), changed)
kwt.overwrite(ctx, modified, True, True)
kwt.overwrite(ctx, added, True, False)
return ret
finally:
wlock.release()
# monkeypatches
def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
'''Monkeypatch/wrap patch.patchfile.__init__ to avoid
rejects or conflicts due to expanded keywords in working dir.'''
orig(self, ui, gp, backend, store, eolmode)
# shrink keywords read from working dir
self.lines = kwt.shrinklines(self.fname, self.lines)
def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
opts=None, prefix=''):
'''Monkeypatch patch.diff to avoid expansion.'''
kwt.restrict = True
return orig(repo, node1, node2, match, changes, opts, prefix)
def kwweb_skip(orig, web, req, tmpl):
'''Wraps webcommands.x turning off keyword expansion.'''
kwt.match = util.never
return orig(web, req, tmpl)
def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
'''Wraps cmdutil.amend expanding keywords after amend.'''
wlock = repo.wlock()
try:
kwt.postcommit = True
newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
if newid != old.node():
ctx = repo[newid]
kwt.restrict = True
kwt.overwrite(ctx, ctx.files(), False, True)
kwt.restrict = False
return newid
finally:
wlock.release()
def kw_copy(orig, ui, repo, pats, opts, rename=False):
'''Wraps cmdutil.copy so that copy/rename destinations do not
contain expanded keywords.
Note that the source of a regular file destination may also be a
symlink:
hg cp sym x -> x is symlink
cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
For the latter we have to follow the symlink to find out whether its
target is configured for expansion and we therefore must unexpand the
keywords in the destination.'''
wlock = repo.wlock()
try:
orig(ui, repo, pats, opts, rename)
if opts.get('dry_run'):
return
wctx = repo[None]
cwd = repo.getcwd()
def haskwsource(dest):
'''Returns true if dest is a regular file and configured for
expansion or a symlink which points to a file configured for
expansion. '''
source = repo.dirstate.copied(dest)
if 'l' in wctx.flags(source):
source = scmutil.canonpath(repo.root, cwd,
os.path.realpath(source))
return kwt.match(source)
candidates = [f for f in repo.dirstate.copies() if
'l' not in wctx.flags(f) and haskwsource(f)]
kwt.overwrite(wctx, candidates, False, False)
finally:
wlock.release()
def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
'''Wraps record.dorecord expanding keywords after recording.'''
wlock = repo.wlock()
try:
# record returns 0 even when nothing has changed
# therefore compare nodes before and after
kwt.postcommit = True
ctx = repo['.']
wstatus = repo[None].status()
ret = orig(ui, repo, commitfunc, *pats, **opts)
recctx = repo['.']
if ctx != recctx:
modified, added = _preselect(wstatus, recctx.files())
kwt.restrict = False
kwt.overwrite(recctx, modified, False, True)
kwt.overwrite(recctx, added, False, True, True)
kwt.restrict = True
return ret
finally:
wlock.release()
def kwfilectx_cmp(orig, self, fctx):
# keyword affects data size, comparing wdir and filelog size does
# not make sense
if (fctx._filerev is None and
(self._repo._encodefilterpats or
kwt.match(fctx.path()) and 'l' not in fctx.flags() or
self.size() - 4 == fctx.size()) or
self.size() == fctx.size()):
return self._filelog.cmp(self._filenode, fctx.data())
return True
extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
extensions.wrapfunction(patch, 'diff', kw_diff)
extensions.wrapfunction(cmdutil, 'amend', kw_amend)
extensions.wrapfunction(cmdutil, 'copy', kw_copy)
for c in 'annotate changeset rev filediff diff'.split():
extensions.wrapfunction(webcommands, c, kwweb_skip)
for name in recordextensions.split():
try:
record = extensions.find(name)
extensions.wrapfunction(record, 'dorecord', kw_dorecord)
except KeyError:
pass
repo.__class__ = kwrepo
|
from i18n import _
import util, match
import re
_commentre = None
def ignorepats(lines):
'''parse lines (iterable) of .hgignore text, returning a tuple of
(patterns, parse errors). These patterns should be given to compile()
to be validated and converted into a match function.'''
syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
syntax = 'relre:'
patterns = []
warnings = []
for line in lines:
if "#" in line:
global _commentre
if not _commentre:
_commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
# remove comments prefixed by an even number of escapes
line = _commentre.sub(r'\1', line)
# fixup properly escaped comments that survived the above
line = line.replace("\\#", "#")
line = line.rstrip()
if not line:
continue
if line.startswith('syntax:'):
s = line[7:].strip()
try:
syntax = syntaxes[s]
except KeyError:
warnings.append(_("ignoring invalid syntax '%s'") % s)
continue
pat = syntax + line
for s, rels in syntaxes.iteritems():
if line.startswith(rels):
pat = line
break
elif line.startswith(s+':'):
pat = rels + line[len(s) + 1:]
break
patterns.append(pat)
return patterns, warnings
def readpats(root, files, warn):
'''return a dict mapping ignore-file-name to list-of-patterns'''
pats = {}
for f in files:
if f in pats:
continue
try:
pats[f] = []
fp = open(f)
pats[f], warnings = ignorepats(fp)
fp.close()
for warning in warnings:
warn("%s: %s\n" % (f, warning))
except IOError, inst:
if f != files[0]:
warn(_("skipping unreadable ignore file '%s': %s\n") %
(f, inst.strerror))
return [(f, pats[f]) for f in files if f in pats]
def ignore(root, files, warn):
'''return matcher covering patterns in 'files'.
the files parsed for patterns include:
.hgignore in the repository root
any additional files specified in the [ui] section of ~/.hgrc
trailing white space is dropped.
the escape character is backslash.
comments start with #.
empty lines are skipped.
lines can be of the following formats:
syntax: regexp # defaults following lines to non-rooted regexps
syntax: glob # defaults following lines to non-rooted globs
re:pattern # non-rooted regular expression
glob:pattern # non-rooted glob
pattern # pattern of the current default type'''
pats = readpats(root, files, warn)
allpats = []
for f, patlist in pats:
allpats.extend(patlist)
if not allpats:
return util.never
try:
ignorefunc = match.match(root, '', [], allpats)
except util.Abort:
# Re-raise an exception where the src is the right file
for f, patlist in pats:
try:
match.match(root, '', [], patlist)
except util.Abort, inst:
raise util.Abort('%s: %s' % (f, inst[0]))
return ignorefunc
|
from django.contrib import auth
from django import db
from django.utils.encoding import force_bytes
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
UserModel = auth.get_user_model()
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_connection()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
UserModel = auth.get_user_model()
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_connection()
|
"""
Make sure the base address setting is extracted properly.
"""
import TestGyp
import re
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('base-address.gyp', chdir=CHDIR)
test.build('base-address.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# Extract the image base address from the headers output.
image_base_reg_ex = re.compile(r'.*\s+([0-9]+) image base.*', re.DOTALL)
exe_headers = GetHeaders('test_base_specified_exe.exe')
exe_match = image_base_reg_ex.match(exe_headers)
if not exe_match or not exe_match.group(1):
test.fail_test()
if exe_match.group(1) != '420000':
test.fail_test()
dll_headers = GetHeaders('test_base_specified_dll.dll')
dll_match = image_base_reg_ex.match(dll_headers)
if not dll_match or not dll_match.group(1):
test.fail_test()
if dll_match.group(1) != '10420000':
test.fail_test()
default_exe_headers = GetHeaders('test_base_default_exe.exe')
default_exe_match = image_base_reg_ex.match(default_exe_headers)
if not default_exe_match or not default_exe_match.group(1):
test.fail_test()
if default_exe_match.group(1) != '400000':
test.fail_test()
default_dll_headers = GetHeaders('test_base_default_dll.dll')
default_dll_match = image_base_reg_ex.match(default_dll_headers)
if not default_dll_match or not default_dll_match.group(1):
test.fail_test()
if default_dll_match.group(1) != '10000000':
test.fail_test()
test.pass_test()
|
import unittest
from test import test_support
import base64
class LegacyBase64TestCase(unittest.TestCase):
def test_encodestring(self):
eq = self.assertEqual
eq(base64.encodestring("www.python.org"), "d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodestring("a"), "YQ==\n")
eq(base64.encodestring("ab"), "YWI=\n")
eq(base64.encodestring("abc"), "YWJj\n")
eq(base64.encodestring(""), "")
eq(base64.encodestring("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
def test_decodestring(self):
eq = self.assertEqual
eq(base64.decodestring("d3d3LnB5dGhvbi5vcmc=\n"), "www.python.org")
eq(base64.decodestring("YQ==\n"), "a")
eq(base64.decodestring("YWI=\n"), "ab")
eq(base64.decodestring("YWJj\n"), "abc")
eq(base64.decodestring("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodestring(''), '')
def test_encode(self):
eq = self.assertEqual
from cStringIO import StringIO
infp = StringIO('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789!@#0^&*();:<>,. []{}')
outfp = StringIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
def test_decode(self):
from cStringIO import StringIO
infp = StringIO('d3d3LnB5dGhvbi5vcmc=')
outfp = StringIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), 'www.python.org')
class BaseXYTestCase(unittest.TestCase):
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode('\x00'), 'AA==')
eq(base64.b64encode("a"), "YQ==")
eq(base64.b64encode("ab"), "YWI=")
eq(base64.b64encode("abc"), "YWJj")
eq(base64.b64encode(""), "")
eq(base64.b64encode("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode('\xd3V\xbeo\xf7\x1d', altchars='*$'), '01a*b$cd')
# Test standard alphabet
eq(base64.standard_b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode("a"), "YQ==")
eq(base64.standard_b64encode("ab"), "YWI=")
eq(base64.standard_b64encode("abc"), "YWJj")
eq(base64.standard_b64encode(""), "")
eq(base64.standard_b64encode("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}"),
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode('\xd3V\xbeo\xf7\x1d'), '01a-b_cd')
def test_b64decode(self):
eq = self.assertEqual
eq(base64.b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.b64decode('AA=='), '\x00')
eq(base64.b64decode("YQ=="), "a")
eq(base64.b64decode("YWI="), "ab")
eq(base64.b64decode("YWJj"), "abc")
eq(base64.b64decode("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.b64decode(''), '')
# Test with arbitrary alternative characters
eq(base64.b64decode('01a*b$cd', altchars='*$'), '\xd3V\xbeo\xf7\x1d')
# Test standard alphabet
eq(base64.standard_b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.standard_b64decode("YQ=="), "a")
eq(base64.standard_b64decode("YWI="), "ab")
eq(base64.standard_b64decode("YWJj"), "abc")
eq(base64.standard_b64decode(""), "")
eq(base64.standard_b64decode("YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ=="),
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64decode('01a-b_cd'), '\xd3V\xbeo\xf7\x1d')
def test_b64decode_error(self):
self.assertRaises(TypeError, base64.b64decode, 'abc')
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(''), '')
eq(base64.b32encode('\x00'), 'AA======')
eq(base64.b32encode('a'), 'ME======')
eq(base64.b32encode('ab'), 'MFRA====')
eq(base64.b32encode('abc'), 'MFRGG===')
eq(base64.b32encode('abcd'), 'MFRGGZA=')
eq(base64.b32encode('abcde'), 'MFRGGZDF')
def test_b32decode(self):
eq = self.assertEqual
eq(base64.b32decode(''), '')
eq(base64.b32decode('AA======'), '\x00')
eq(base64.b32decode('ME======'), 'a')
eq(base64.b32decode('MFRA===='), 'ab')
eq(base64.b32decode('MFRGG==='), 'abc')
eq(base64.b32decode('MFRGGZA='), 'abcd')
eq(base64.b32decode('MFRGGZDF'), 'abcde')
def test_b32decode_casefold(self):
eq = self.assertEqual
eq(base64.b32decode('', True), '')
eq(base64.b32decode('ME======', True), 'a')
eq(base64.b32decode('MFRA====', True), 'ab')
eq(base64.b32decode('MFRGG===', True), 'abc')
eq(base64.b32decode('MFRGGZA=', True), 'abcd')
eq(base64.b32decode('MFRGGZDF', True), 'abcde')
# Lower cases
eq(base64.b32decode('me======', True), 'a')
eq(base64.b32decode('mfra====', True), 'ab')
eq(base64.b32decode('mfrgg===', True), 'abc')
eq(base64.b32decode('mfrggza=', True), 'abcd')
eq(base64.b32decode('mfrggzdf', True), 'abcde')
# Expected exceptions
self.assertRaises(TypeError, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode('MLO23456'), 'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('M1023456', map01='L'), 'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('M1023456', map01='I'), 'b\x1d\xad\xf3\xbe')
def test_b32decode_error(self):
self.assertRaises(TypeError, base64.b32decode, 'abc')
self.assertRaises(TypeError, base64.b32decode, 'ABCDEF==')
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode('\x01\x02\xab\xcd\xef'), '0102ABCDEF')
eq(base64.b16encode('\x00'), '00')
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode('0102ABCDEF'), '\x01\x02\xab\xcd\xef')
eq(base64.b16decode('00'), '\x00')
# Lower case is not allowed without a flag
self.assertRaises(TypeError, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode('0102abcdef', True), '\x01\x02\xab\xcd\xef')
def test_main():
test_support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
|
import time
from openerp.osv import fields, osv
class account_analytic_balance(osv.osv_memory):
_name = 'account.analytic.balance'
_description = 'Account Analytic Balance'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'empty_acc': fields.boolean('Empty Accounts ? ', help='Check if you want to display Accounts with 0 balance too.'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.analytic.account',
'form': data
}
datas['form']['active_ids'] = context.get('active_ids', False)
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticbalance', data=datas, context=context)
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
)
class ChirbitIE(InfoExtractor):
IE_NAME = 'chirbit'
_VALID_URL = r'https?://(?:www\.)?chirb\.it/(?:(?:wp|pl)/|fb_chirbit_player\.swf\?key=)?(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'http://chirb.it/PrIPv5',
'md5': '9847b0dad6ac3e074568bf2cfb197de8',
'info_dict': {
'id': 'PrIPv5',
'ext': 'mp3',
'title': 'Фасадстрой',
'duration': 52,
'view_count': int,
'comment_count': int,
}
}, {
'url': 'https://chirb.it/fb_chirbit_player.swf?key=PrIPv5',
'only_matching': True,
}]
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(
'http://chirb.it/%s' % audio_id, audio_id)
audio_url = self._search_regex(
r'"setFile"\s*,\s*"([^"]+)"', webpage, 'audio url')
title = self._search_regex(
r'itemprop="name">([^<]+)', webpage, 'title')
duration = parse_duration(self._html_search_meta(
'duration', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'itemprop="playCount"\s*>(\d+)', webpage,
'listen count', fatal=False))
comment_count = int_or_none(self._search_regex(
r'>(\d+) Comments?:', webpage,
'comment count', fatal=False))
return {
'id': audio_id,
'url': audio_url,
'title': title,
'duration': duration,
'view_count': view_count,
'comment_count': comment_count,
}
class ChirbitProfileIE(InfoExtractor):
IE_NAME = 'chirbit:profile'
_VALID_URL = r'https?://(?:www\.)?chirbit.com/(?:rss/)?(?P<id>[^/]+)'
_TEST = {
'url': 'http://chirbit.com/ScarletBeauty',
'info_dict': {
'id': 'ScarletBeauty',
'title': 'Chirbits by ScarletBeauty',
},
'playlist_mincount': 3,
}
def _real_extract(self, url):
profile_id = self._match_id(url)
rss = self._download_xml(
'http://chirbit.com/rss/%s' % profile_id, profile_id)
entries = [
self.url_result(audio_url.text, 'Chirbit')
for audio_url in rss.findall('./channel/item/link')]
title = rss.find('./channel/title').text
return self.playlist_result(entries, profile_id, title)
|
from subcmds.sync import Sync
class Smartsync(Sync):
common = True
helpSummary = "Update working tree to the latest known good revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command is a shortcut for sync -s.
"""
def _Options(self, p):
Sync._Options(self, p, show_smart=False)
def Execute(self, opt, args):
opt.smart_sync = True
Sync.Execute(self, opt, args)
|
from .cassconnect import create_test_db, remove_test_db
setUp = create_test_db
tearDown = remove_test_db
|
cpp_examples = [
("wifi-example-sim", "True", "True"),
]
python_examples = []
|
import sys
from root import get_root, get_focus
from dialogs import Dialog
from theme import ThemeProperty
from pygame import Rect, draw
class MenuItem(object):
keyname = ""
keycode = None
shift = False
alt = False
enabled = False
if sys.platform.startswith('darwin') or sys.platform.startswith('mac'):
cmd_name = "Cmd "
option_name = "Opt "
else:
cmd_name = "Ctrl "
option_name = "Alt "
def __init__(self, text="", command=None):
self.command = command
if "/" in text:
text, key = text.split("/", 1)
else:
key = ""
self.text = text
if key:
keyname = key[-1]
mods = key[:-1]
self.keycode = ord(keyname.lower())
if "^" in mods:
self.shift = True
keyname = "Shift " + keyname
if "@" in mods:
self.alt = True
keyname = self.option_name + keyname
self.keyname = self.cmd_name + keyname
class Menu(Dialog):
disabled_color = ThemeProperty('disabled_color')
click_outside_response = -1
scroll_button_size = ThemeProperty('scroll_button_size')
scroll_button_color = ThemeProperty('scroll_button_color')
scroll = 0
def __init__(self, title, items, scrolling=False, scroll_items=30,
scroll_page=5, **kwds):
self.title = title
self.items = items
self._items = [MenuItem(*item) for item in items]
self.scrolling = scrolling and len(self._items) > scroll_items
self.scroll_items = scroll_items
self.scroll_page = scroll_page
Dialog.__init__(self, **kwds)
h = self.font.get_linesize()
if self.scrolling:
self.height = h * self.scroll_items + h
else:
self.height = h * len(self._items) + h
def present(self, client, pos):
client = client or get_root()
self.topleft = client.local_to_global(pos)
focus = get_focus()
font = self.font
h = font.get_linesize()
items = self._items
margin = self.margin
if self.scrolling:
height = h * self.scroll_items + h
else:
height = h * len(items) + h
w1 = w2 = 0
for item in items:
item.enabled = self.command_is_enabled(item, focus)
w1 = max(w1, font.size(item.text)[0])
w2 = max(w2, font.size(item.keyname)[0])
width = w1 + 2 * margin
self._key_margin = width
if w2 > 0:
width += w2 + margin
if self.scrolling:
width += self.scroll_button_size
self.size = (width, height)
self._hilited = None
root = get_root()
self.rect.clamp_ip(root.rect)
return Dialog.present(self, centered=False)
def command_is_enabled(self, item, focus):
cmd = item.command
if cmd:
enabler_name = cmd + '_enabled'
handler = focus
while handler:
enabler = getattr(handler, enabler_name, None)
if enabler:
return enabler()
handler = handler.next_handler()
return True
def scroll_up_rect(self):
d = self.scroll_button_size
r = Rect(0, 0, d, d)
m = self.margin
r.top = m
r.right = self.width - m
r.inflate_ip(-4, -4)
return r
def scroll_down_rect(self):
d = self.scroll_button_size
r = Rect(0, 0, d, d)
m = self.margin
r.bottom = self.height - m
r.right = self.width - m
r.inflate_ip(-4, -4)
return r
def draw(self, surf):
font = self.font
h = font.get_linesize()
sep = surf.get_rect()
sep.height = 1
if self.scrolling:
sep.width -= self.margin + self.scroll_button_size
colors = [self.disabled_color, self.fg_color]
bg = self.bg_color
xt = self.margin
xk = self._key_margin
y = h // 2
hilited = self._hilited
if self.scrolling:
items = self._items[self.scroll:self.scroll + self.scroll_items]
else:
items = self._items
for item in items:
text = item.text
if not text:
sep.top = y + h // 2
surf.fill(colors[0], sep)
else:
if item is hilited:
rect = surf.get_rect()
rect.top = y
rect.height = h
if self.scrolling:
rect.width -= xt + self.scroll_button_size
surf.fill(colors[1], rect)
color = bg
else:
color = colors[item.enabled]
buf = font.render(item.text, True, color)
surf.blit(buf, (xt, y))
keyname = item.keyname
if keyname:
buf = font.render(keyname, True, color)
surf.blit(buf, (xk, y))
y += h
if self.scrolling:
if self.can_scroll_up():
self.draw_scroll_up_button(surf)
if self.can_scroll_down():
self.draw_scroll_down_button(surf)
def draw_scroll_up_button(self, surface):
r = self.scroll_up_rect()
c = self.scroll_button_color
draw.polygon(surface, c, [r.bottomleft, r.midtop, r.bottomright])
def draw_scroll_down_button(self, surface):
r = self.scroll_down_rect()
c = self.scroll_button_color
draw.polygon(surface, c, [r.topleft, r.midbottom, r.topright])
def mouse_move(self, e):
self.mouse_drag(e)
def mouse_drag(self, e):
item = self.find_enabled_item(e)
if item is not self._hilited:
self._hilited = item
self.invalidate()
def mouse_up(self, e):
if 1 <= e.button <= 3:
item = self.find_enabled_item(e)
if item:
self.dismiss(self._items.index(item))
def find_enabled_item(self, e):
x, y = e.local
if 0 <= x < (self.width - self.margin - self.scroll_button_size
if self.scrolling else self.width):
h = self.font.get_linesize()
i = (y - h // 2) // h + self.scroll
items = self._items
if 0 <= i < len(items):
item = items[i]
if item.enabled:
return item
def mouse_down(self, event):
if event.button == 1:
if self.scrolling:
p = event.local
if self.scroll_up_rect().collidepoint(p):
self.scroll_up()
return
elif self.scroll_down_rect().collidepoint(p):
self.scroll_down()
return
if event.button == 4:
self.scroll_up()
if event.button == 5:
self.scroll_down()
Dialog.mouse_down(self, event)
def scroll_up(self):
if self.can_scroll_up():
self.scroll = max(self.scroll - self.scroll_page, 0)
def scroll_down(self):
if self.can_scroll_down():
self.scroll = min(self.scroll + self.scroll_page,
len(self._items) - self.scroll_items)
def can_scroll_up(self):
return self.scrolling and self.scroll > 0
def can_scroll_down(self):
return (self.scrolling and
self.scroll + self.scroll_items < len(self._items))
def find_item_for_key(self, e):
for item in self._items:
if item.keycode == e.key \
and item.shift == e.shift and item.alt == e.alt:
focus = get_focus()
if self.command_is_enabled(item, focus):
return self._items.index(item)
else:
return -1
return -1
def get_command(self, i):
if i >= 0:
item = self._items[i]
cmd = item.command
if cmd:
return cmd + '_cmd'
def invoke_item(self, i):
cmd = self.get_command(i)
if cmd:
get_focus().handle_command(cmd)
|
import os
import sys
import time
NUM_EVENT_QUEUES = 10
mountpoint = sys.argv[1]
if not os.path.exists( mountpoint ):
print >> sys.stderr, "Usage: %s MOUNTPOINT" % sys.argv[0]
sys.exit(1)
for i in xrange(0, NUM_EVENT_QUEUES):
print "event queue: tmp/test-%s" % i
os.mkdir( "tmp/test-%s" % i )
print "Waiting for SIGINT"
while True:
time.sleep(100)
|
import math
import sensors.pycomms.mpu6050 as mpu6050
mpu = mpu6050.MPU6050(channel=1)
mpu.dmpInitialize()
mpu.setDMPEnabled(True)
packetSize = mpu.dmpGetFIFOPacketSize()
offset = None
while True:
# Get INT_STATUS byte
mpuIntStatus = mpu.getIntStatus()
if mpuIntStatus >= 2: # check for DMP data ready interrupt (this should happen frequently)
# get current FIFO count
fifoCount = mpu.getFIFOCount()
# check for overflow (this should never happen unless our code is too inefficient)
if fifoCount == 1024:
# reset so we can continue cleanly
mpu.resetFIFO()
print('FIFO overflow!')
# wait for correct available data length, should be a VERY short wait
fifoCount = mpu.getFIFOCount()
while fifoCount < packetSize:
fifoCount = mpu.getFIFOCount()
result = mpu.getFIFOBytes(packetSize)
q = mpu.dmpGetQuaternion(result)
accelRaw = mpu.dmpGetAccel(result)
g = mpu.dmpGetGravity(q)
linearAccel = mpu.dmpGetLinearAccel(accelRaw, g)
ypr = mpu.dmpGetYawPitchRoll(q, g)
if offset == None:
offset = ypr
deviceAngles = [math.degrees(angle) \
for angle in [ypr['pitch']-offset['pitch'],ypr['roll']-offset['roll'],ypr['yaw']-offset['yaw']]]
accels = mpu.dmpGetLinearAccelInWorld(linearAccel, [ypr['pitch'],ypr['roll'],ypr['yaw']])
accels = [accel * 9.807 for accel in accels]
accelStr = "({0:.3f},{1:.3f},{2:.3f})".format(accels[0],accels[1],accels[2])
angleStr = "({0:.3f},{1:.3f},{2:.3f})".format(deviceAngles[0],deviceAngles[1],deviceAngles[2])
print "Acc: {0}; Ang: {1}".format(accelStr, angleStr)
# track FIFO count here in case there is > 1 packet available
# (this lets us immediately read more without waiting for an interrupt)
fifoCount -= packetSize
|
import json
from datetime import date
from textwrap import dedent
import pytest
import responses
from flask_forecaster.tracker import Tracker, models
@pytest.fixture
def api():
return Tracker('hello')
class TestTokenValidation:
@responses.activate
def test_token_valid(self):
projects = ['foo', 'bar', 'baz']
responses.add(
responses.GET,
'https://www.pivotaltracker.com/services/v5/me',
json={'projects': projects},
status=200,
)
result = Tracker.validate_token('hello')
assert len(responses.calls) == 1
assert responses.calls[0].request.headers.get('X-TrackerToken') == 'hello'
assert result == projects
@responses.activate
def test_token_invalid(self):
responses.add(
responses.GET,
'https://www.pivotaltracker.com/services/v5/me',
body='{"error": "something went horribly wrong"}',
status=200,
)
result = Tracker.validate_token('hello')
calls = responses.calls
assert len(calls) == 1
assert calls[0].request.headers.get('X-TrackerToken') == 'hello'
assert result is None
@responses.activate
def test_token_failed(self):
responses.add(
responses.GET,
'https://www.pivotaltracker.com/services/v5/me',
status=404,
)
result = Tracker.validate_token('hello')
calls = responses.calls
assert len(calls) == 1
assert calls[0].request.headers.get('X-TrackerToken') == 'hello'
assert result is None
class TestGetProject:
@responses.activate
def test_get_project_data(self, api):
project = dict(name='demo', description='some stupid project')
responses.add(
responses.GET,
'https://www.pivotaltracker.com/services/v5/projects/123',
body=json.dumps(project),
status=200,
)
result = api.get_project(123)
calls = responses.calls
assert len(calls) == 1
assert calls[0].request.headers.get('X-TrackerToken') == 'hello'
assert result == project
class TestGetProjectSnapshot:
@responses.activate
def test_get_project_history(self, api):
data = self._project_data()
responses.add(
responses.GET,
'https://www.pivotaltracker.com/services/v5/projects/123/history/snapshots',
body=data,
status=200,
)
result = api.get_project_history(123)
calls = responses.calls
assert len(calls) == 1
assert calls[0].request.headers.get('X-TrackerToken') == 'hello'
assert result == json.loads(data)
@responses.activate
def test_get_converted_project_history(self, api):
data = self._project_data()
responses.add(
responses.GET,
'https://www.pivotaltracker.com/services/v5/projects/123/history/snapshots',
body=data,
status=200,
)
result = api.get_project_history(123, True)
calls = responses.calls
assert len(calls) == 1
assert calls[0].request.headers.get('X-TrackerToken') == 'hello'
assert result[0].date == date(2016, 2, 28)
assert result[0].current[0].story_type == models.StoryType.feature
@staticmethod
def _project_data():
return dedent("""
[
{
"kind": "project_snapshot",
"date": "2016-02-28",
"current":
[
{
"kind": "story_snapshot",
"story_id": 555,
"state": "unstarted",
"estimate": 1,
"story_type": "feature"
}
]
}
]
""")
|
from distutils.core import setup
setup(
name='nasbkp',
version='0.1',
packages=['nasbkp', 'nasbkp.tests', 'nasbkp.common', 'nasbkp.storage'],
url='',
license='ISC',
author='Alexandre Vaissière',
author_email='avaiss@fmiw.org',
description='Automated backup for zfs volume'
)
|
from __future__ import print_function
from bytecode import *
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
try:
sys.stdout.write(write_bytecode(collect(refine(optimize(convert(sys.argv[1], flatten(parse(sys.argv[1]))))))))
except DejaSyntaxError as e:
print(e, file=sys.stderr)
|
import numpy as np
def make_isocosahedron():
g = (1. + np.sqrt(5)) / 2. # the golden ratio
# looking outside the isocosahedron in to the origin
# the vertices go in an anticlockwise direction around each face
# and the faces touch each other sequentially
v = np.array([
[ 0, 1, g],
[ 0,-1, g],
[ 0,-1,-g],
[ 0, 1,-g],
[ 1, g, 0],
[-1, g, 0],
[-1,-g, 0],
[ 1,-g, 0],
[ g, 0, 1],
[ g, 0,-1],
[-g, 0,-1],
[-g, 0, 1]])
f = np.array([[0,11,1],
[11,6,1],
[1,6,7],
[1,7,8],
[0,1,8],
[0,8,4],
[0,4,5],
[11,0,5],
[11,5,10],
[11,10,6],
[6,10,2],
[6,2,7],
[7,2,9],
[8,7,9],
[4,8,9],
[4,9,3],
[5,4,3],
[5,3,10],
[3,2,10],
[3,9,2]
])
return(f,v)
def np_to_verts(p):
# converts numpy array to list suitable for COllection3D
x = np.ndarray.tolist(p[:,0])
y = np.ndarray.tolist(p[:,1])
z = np.ndarray.tolist(p[:,2])
verts = [zip(x, y, z)]
return(verts)
def get_vertex_ind(vert, point):
# see if point is already in vert
delta = np.sum( np.power(point - vert,2.), axis=1)
# if delta is nearly zero, get index of that element
# otherwise push new coordinate onto vert and return that number
anydup = np.isclose(delta, 0)
if np.any(anydup):
# there a duplicate
return(vert, np.ravel(np.where(anydup)))
else:
vert = np.append(vert, [point], axis=0)
return(vert, vert.shape[0] - 1)
def normalise_vertices(v):
# normalise to unit length
vs = v*v
vssum = np.power(np.sum(vs, axis=1), 0.5)
vn = v / vssum[:,np.newaxis]
return(vn)
def normit(x):
nx = x / np.linalg.norm(x, axis=1)[:,np.newaxis]
return(nx)
def split_iso(fa, ve):
# split each triangle face into four sub triangles
newface = np.empty((0,3), dtype=int)
for face in fa:
# generate new points from vertex list
ip0, ip1, ip2 = face
p0 = ve[ip0]
p1 = ve[ip1]
p2 = ve[ip2]
# see if this point already exists
# if it does, return the index of already point
# if it doesn't, push it on the list and return that
(ve, ip3) = get_vertex_ind(ve, (p0+p1)/2.)
(ve, ip4) = get_vertex_ind(ve, (p1+p2)/2.)
(ve, ip5) = get_vertex_ind(ve, (p2+p0)/2.)
newface = np.append(newface,np.array([[ip0,ip3,ip5],[ip5,ip3,ip4],[ip3,ip1,ip4],[ip5,ip4,ip2]]), axis=0)
ven = normalise_vertices(ve)
return(newface,ven)
def sigmoid(x):
' sigmoid - goes from 0 to 1 as you go from -inf to inf'
result = 1. / (1. + np.exp(-x))
return(result)
def starspot(cosang, spotsize, angwid):
# convert to degrees
angle = np.arccos(cosang) * 180. / np.pi
tang = (angle - spotsize) / angwid
return(sigmoid(tang))
def vec2sph(vec):
theta = np.arcsin(vec[:,0])
lambd = np.arctan2(vec[:,1], vec[:,0])
return(theta,lambd)
def sph2vec(theta,lambd):
xx = np.cos(theta) * np.cos(lambd)
yy = np.cos(theta) * np.sin(lambd)
zz = np.sin(theta)
return(np.hstack((xx,yy,zz)))
def intriangle(tup, p1, p2, p3):
'tup is a tuple containing x, y p1, p2, p3 are the three points defining the triangle return a tuple of points from inside the triangle'
x, y = tup
x1 = p1[0]
x2 = p2[0]
x3 = p3[0]
y1 = p1[1]
y2 = p2[1]
y3 = p3[1]
a = ((y2 - y3)*(x - x3) + (x3 - x2)*(y - y3)) / ((y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3))
b = ((y3 - y1)*(x - x3) + (x1 - x3)*(y - y3)) / ((y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3))
c = 1 - a - b
T = (a>=0) & (a<=1) & (b>=0) & (b<=1) & (c>=0) & (c<=1)
tupout = (x[T], y[T])
return(tupout)
def triangle_image(tup, p1, p2, p3):
'mgri is a tuple containing x, y from mgrid p1, p2, p3 are the three points defining the triangle return a boolean array of points from inside the triangle'
y, x = tup
x1 = p1[0]
x2 = p2[0]
x3 = p3[0]
y1 = p1[1]
y2 = p2[1]
y3 = p3[1]
aden = (y2 - y3)*(x - x3) + (x3 - x2)*(y - y3)
adiv = (y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3)
bden = (y3 - y1)*(x - x3) + (x1 - x3)*(y - y3)
bdiv = (y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3)
if np.isclose(adiv,0.):
a = 0.
else:
a = aden / adiv
if np.isclose(bdiv,0.):
b = 0.
else:
b = bden / bdiv
c = 1 - a - b
T = (a>=0) & (a<=1) & (b>=0) & (b<=1) & (c>=0) & (c<=1)
return(T)
def rotx(theta):
t = theta * np.pi / 180.
ct = np.cos(t)
st = np.sin(t)
m = np.array([[ 1, 0, 0 ],
[ 0, ct, -st],
[ 0, st, ct]])
return(m)
def roty(theta):
t = theta * np.pi / 180.
ct = np.cos(t)
st = np.sin(t)
m = np.array([[ ct, 0, st],
[ 0, 1, 0],
[-st, 0, ct]])
return(m)
def rotz(theta):
t = theta * np.pi / 180.
ct = np.cos(t)
st = np.sin(t)
m = np.array([[ ct,-st, 0],
[ st, ct, 0],
[ 0, 0, 1]])
return(m)
if __name__ == '__main__':
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('image', interpolation='nearest', origin='lower', cmap='gray')
mpl.rc('axes.formatter', limits=(-7,7))
fig = plt.figure()
ax = Axes3D(fig)
(fa, ve) = make_isocosahedron()
ve=normalise_vertices(ve)
print ' number of faces is %d with %d vertices' % (fa.shape[0], ve.shape[0])
(fa2, ve2) = split_iso(fa, ve)
print ' number of faces is %d with %d vertices' % (fa2.shape[0], ve2.shape[0])
(fa3, ve3) = split_iso(fa2, ve2)
print ' number of faces is %d with %d vertices' % (fa3.shape[0], ve3.shape[0])
(fa4, ve4) = split_iso(fa3, ve3)
print ' number of faces is %d with %d vertices' % (fa4.shape[0], ve4.shape[0])
(fa5, ve5) = split_iso(fa4, ve4)
print ' number of faces is %d with %d vertices' % (fa5.shape[0], ve5.shape[0])
# plot the vertices
ax.scatter(ve[:,0], ve[:,1], ve[:,2])
#facols = np.ones_like(fa3) * np.random.random(fa3.shape)
ve3=normalise_vertices(ve3)
fa3_mean = np.mean((ve3[fa3]),axis=1)
facols_3 = np.abs(fa3_mean)
ve4=normalise_vertices(ve4)
fa4_mean = np.mean((ve4[fa4]),axis=1)
facols_4 = np.abs(fa4_mean)
#ax.scatter(fa3_mean[:,0], fa3_mean[:,1], fa3_mean[:,2])
ve5=normalise_vertices(ve5)
fa5_mean = np.mean((ve5[fa5]),axis=1)
facols_5 = np.abs(fa5_mean)
#ax.scatter(fa3_mean[:,0], fa3_mean[:,1], fa3_mean[:,2])
# plot the faces
for (face, facol) in zip(fa4, facols_4):
ax.add_collection3d(Poly3DCollection(np_to_verts(ve4[face]),color=facol))
# label the vertices
#for n,j in enumerate(ve3):
# ax.text(j[0], j[1], j[2], n, fontsize=30, \
# color='red', zorder=50, ha='center', va='center')
# testing intriangle()
# END testing intriangle()
# make starspots
# calculate
bdir1 = np.array([[1,1,1]])
bdir2 = np.array([[0,-1,0]])
n_bdir1 = normit(bdir1)
n_bdir2 = normit(bdir2)
n_fa5_mean = normit(fa5_mean)
# angle = arccos( dot(a, b) )
ang1 = np.dot(n_fa5_mean, n_bdir1.T)
ang2 = np.dot(n_fa5_mean, n_bdir2.T)
cspot = starspot(ang1, 30., 5.) * starspot(ang2, 20, 1.)
# method using plot+trisurf - it's fast, but no color control
# make a 2D array and fill it to the corners
def fillstar2d(im, tup, face, vert, tricol):
'do a 2d projection onto an image xy plane of 3d triangles'
for (fa, col) in zip(face, tricol):
intri = triangle_image(tup, vert[fa[0]], vert[fa[1]], vert[fa[2]])
im[intri] = im[intri] + col
return(im)
ty, tx = np.mgrid[-1.2:1.2:0.01,-1.2:1.2:0.01]
# set up an image
im0 = np.zeros((ty.shape[0],ty.shape[1],3))
intens = facols_3
# select faces which are positive in the z axis
# select faces which are positive in the z axis
# select faces which are positive in the z axis
# END select faces which are positive in the z axis
# END select faces which are positive in the z axis
# END select faces which are positive in the z axis
# 3d plot hemisphere of positive z values
# 3d plot hemisphere of positive z values
# 3d plot hemisphere of positive z values
# END 3d plot hemisphere of positive z values
# END 3d plot hemisphere of positive z values
# END 3d plot hemisphere of positive z values
fig = plt.figure()
ax = plt.subplot(111)
# +ve x rotates top edge down towards bottom edge
# +ve y moves lhs side of star to the right
# +ve z rotates the star surface anticlockwise
ty, tx = np.mgrid[-1.2:1.2:0.02,-1.2:1.2:0.02]
# set up an image
im0 = np.zeros((ty.shape[0],ty.shape[1],3))
intens = facols_3
for (i, ni) in enumerate(np.linspace(0,360,360)):
im0 = np.zeros((ty.shape[0],ty.shape[1],3))
# stack matrices in reverse order
Matrix = np.dot(rotx(30),roty(i))
ve5rot = (np.dot(Matrix, ve5.T)).T
fa5_mean = np.mean((ve5rot[fa5]),axis=1)
# select faces which are positive in the z axis
zplus = (fa5_mean[:,2]>0)
fa5_zplus = fa5[zplus]
f = fillstar2d(im0, (ty, tx), fa5_zplus, ve5rot, cspot[zplus])
plt.imshow(f)
# plt.draw()
# write out image
fname = '/Users/kenworthy/_tmp%04d.png' % i
print 'Saving frame', fname
fig.savefig(fname, facecolor=fig.get_facecolor(), edgecolor='none')
|
import os
import unittest
from subprocess import CalledProcessError
from latexbuild.subprocess_extension import check_output_cwd
PATH_FILE = os.path.abspath(__file__)
PATH_TEST = os.path.dirname(PATH_FILE)
PATH_MAIN = os.path.dirname(PATH_TEST)
NAME_FILE = os.path.basename(PATH_FILE)
def ls_and_split(directory):
stdout = check_output_cwd(['ls'], directory)
return stdout
class TestCheckOutputCwd(unittest.TestCase):
def test_raises_bad_binary(self):
self.assertRaises(ValueError,
check_output_cwd, ['fjadklsjfkldsjf', '--ddfddf'], PATH_TEST)
def test_raises_bad_call(self):
self.assertRaises(CalledProcessError,
check_output_cwd, ['python', '--ddfddf'], PATH_TEST)
def test_ls_current_dir(self):
self.assertIn(NAME_FILE, ls_and_split(PATH_TEST))
def test_ls_above_dir(self):
self.assertNotIn(NAME_FILE, ls_and_split(PATH_MAIN))
if __name__ == '__main__':
unittest.main()
|
"""
This package provides extensions of the RX used in the app.
"""
import sensomatic.rxutils.from_iterable_with_interval
|
import os
from pythonz.commands import Command
from pythonz.define import PATH_PYTHONS
from pythonz.installer.pythoninstaller import CPythonInstaller, StacklessInstaller, PyPyInstaller, JythonInstaller
from pythonz.log import logger
class ListCommand(Command):
name = "list"
usage = "%prog [options]"
summary = "List the installed python versions"
def __init__(self):
super(ListCommand, self).__init__()
self.parser.add_option(
'-a', '--all-versions',
dest='all_versions',
action='store_true',
default=False,
help='Show the all available python versions.'
)
self.parser.add_option(
'-p', '--path',
dest='path',
action='store_true',
default=False,
help='Show the path for all Python installations.'
)
def run_command(self, options, args):
if options.all_versions:
self.all()
else:
self.installed(path=options.path)
def installed(self, path):
logger.log("# Installed Python versions")
for d in sorted(os.listdir(PATH_PYTHONS)):
if path:
logger.log(' %-16s %s/%s' % (d, PATH_PYTHONS, d))
else:
logger.log(' %s' % d)
def all(self):
logger.log('# Available Python versions')
for type, installer in zip(['cpython', 'stackless', 'pypy', 'jython'], [CPythonInstaller, StacklessInstaller, PyPyInstaller, JythonInstaller]):
logger.log(' # %s:' % type)
for version in installer.supported_versions:
logger.log(' %s' % version)
ListCommand()
|
from __future__ import print_function
import unittest
import discretize
from SimPEG import utils
import numpy as np
from SimPEG.electromagnetics import resistivity as dc
from SimPEG.electromagnetics import analytics
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
class DCProblemAnalyticTests(unittest.TestCase):
def setUp(self):
cs = 25.0
npad = 7
hx = [(cs, npad, -1.3), (cs, 21), (cs, npad, 1.3)]
hy = [(cs, npad, -1.3), (cs, 21), (cs, npad, 1.3)]
hz = [(cs, npad, -1.3), (cs, 20)]
mesh = discretize.TensorMesh([hx, hy, hz], x0="CCN")
sigma = np.ones(mesh.nC) * 1e-2
x = mesh.vectorCCx[(mesh.vectorCCx > -100) & (mesh.vectorCCx < 100)]
y = mesh.vectorCCy[(mesh.vectorCCy > -100) & (mesh.vectorCCy < 100)]
Aloc = np.r_[-200.0, 0.0, 0.0]
Bloc = np.r_[200.0, 0.0, 0.0]
M = utils.ndgrid(x - 25.0, y, np.r_[0.0])
N = utils.ndgrid(x + 25.0, y, np.r_[0.0])
phiA = analytics.DCAnalytic_Pole_Dipole(
Aloc, [M, N], 1e-2, earth_type="halfspace"
)
phiB = analytics.DCAnalytic_Pole_Dipole(
Bloc, [M, N], 1e-2, earth_type="halfspace"
)
data_ana = phiA - phiB
rx = dc.receivers.Dipole(M, N)
src = dc.sources.Dipole([rx], Aloc, Bloc)
survey = dc.survey.Survey([src])
self.survey = survey
self.mesh = mesh
self.sigma = sigma
self.data_ana = data_ana
def test_Simulation3DNodal(self, tolerance=0.05):
simulation = dc.simulation.Simulation3DNodal(
self.mesh,
survey=self.survey,
sigma=self.sigma,
solver=Solver,
bc_type="Neumann",
)
data = simulation.dpred()
err = np.sqrt(
(((data - self.data_ana) / self.data_ana) ** 2).sum() / self.survey.nD
)
if err < tolerance:
print(err)
passed = True
print(">> DC analytic test for Simulation3DNodal is passed")
else:
print(err)
passed = False
print(">> DC analytic test for Simulation3DNodal is failed")
self.assertTrue(passed)
def test_Simulation3DNodal_Robin(self, tolerance=0.05):
simulation = dc.simulation.Simulation3DNodal(
self.mesh,
survey=self.survey,
sigma=self.sigma,
solver=Solver,
bc_type="Robin",
)
data = simulation.dpred()
err = np.sqrt(
(((data - self.data_ana) / self.data_ana) ** 2).sum() / self.survey.nD
)
print(err)
self.assertLess(err, 0.05)
def test_Simulation3DCellCentered_Mixed(self, tolerance=0.05):
simulation = dc.Simulation3DCellCentered(
self.mesh,
survey=self.survey,
sigma=self.sigma,
bc_type="Mixed",
solver=Solver,
)
data = simulation.dpred()
err = np.sqrt(
(((data - self.data_ana) / self.data_ana) ** 2).sum() / self.survey.nD
)
if err < tolerance:
print(err)
passed = True
print(">> DC analytic test for Simulation3DCellCentered is passed")
else:
print(err)
passed = False
print(">> DC analytic test for Simulation3DCellCentered is failed")
self.assertTrue(passed)
def test_Simulation3DCellCentered_Neumann(self, tolerance=0.05):
simulation = dc.simulation.Simulation3DCellCentered(
self.mesh,
survey=self.survey,
sigma=self.sigma,
bc_type="Neumann",
solver=Solver,
)
data = simulation.dpred()
err = np.sqrt(
(((data - self.data_ana) / self.data_ana) ** 2).sum() / self.survey.nD
)
if err < tolerance:
print(err)
passed = True
print(">> DC analytic test for Simulation3DCellCentered is passed")
else:
print(err)
passed = False
print(">> DC analytic test for Simulation3DCellCentered is failed")
self.assertTrue(passed)
class DCProblemAnalyticTests_Dirichlet(unittest.TestCase):
def setUp(self):
cs = 25.0
hx = [(cs, 7, -1.3), (cs, 21), (cs, 7, 1.3)]
hy = [(cs, 7, -1.3), (cs, 21), (cs, 7, 1.3)]
hz = [(cs, 7, -1.3), (cs, 20), (cs, 7, -1.3)]
mesh = discretize.TensorMesh([hx, hy, hz], x0="CCC")
sigma = np.ones(mesh.nC) * 1e-2
x = mesh.vectorCCx[(mesh.vectorCCx > -155.0) & (mesh.vectorCCx < 155.0)]
y = mesh.vectorCCy[(mesh.vectorCCy > -155.0) & (mesh.vectorCCy < 155.0)]
Aloc = np.r_[-200.0, 0.0, 0.0]
Bloc = np.r_[200.0, 0.0, 0.0]
M = utils.ndgrid(x - 25.0, y, np.r_[0.0])
N = utils.ndgrid(x + 25.0, y, np.r_[0.0])
phiA = analytics.DCAnalytic_Pole_Dipole(
Aloc, [M, N], 1e-2, earth_type="wholespace"
)
phiB = analytics.DCAnalytic_Pole_Dipole(
Bloc, [M, N], 1e-2, earth_type="wholespace"
)
data_ana = phiA - phiB
rx = dc.receivers.Dipole(M, N)
src = dc.sources.Dipole([rx], Aloc, Bloc)
survey = dc.survey.Survey([src])
self.survey = survey
self.mesh = mesh
self.sigma = sigma
self.data_ana = data_ana
def test_Simulation3DCellCentered_Dirichlet(self, tolerance=0.05):
simulation = dc.simulation.Simulation3DCellCentered(
self.mesh,
survey=self.survey,
sigma=self.sigma,
bc_type="Dirichlet",
solver=Solver,
)
data = simulation.dpred()
err = np.sqrt(
(((data - self.data_ana) / self.data_ana) ** 2).sum() / self.survey.nD
)
if err < tolerance:
print(err)
passed = True
print(">> DC analytic test for Simulation3DCellCentered_Dirchlet is passed")
else:
print(err)
passed = False
print(">> DC analytic test for Simulation3DCellCentered_Dirchlet is failed")
self.assertTrue(passed)
class DCProblemAnalyticTests_Mixed(unittest.TestCase):
def setUp(self):
cs = 25.0
hx = [(cs, 7, -1.5), (cs, 21), (cs, 7, 1.5)]
hy = [(cs, 7, -1.5), (cs, 21), (cs, 7, 1.5)]
hz = [(cs, 7, -1.5), (cs, 20)]
mesh = discretize.TensorMesh([hx, hy, hz], x0="CCN")
sigma = np.ones(mesh.nC) * 1e-2
x = mesh.vectorCCx[(mesh.vectorCCx > -155.0) & (mesh.vectorCCx < 155.0)]
y = mesh.vectorCCy[(mesh.vectorCCy > -155.0) & (mesh.vectorCCy < 155.0)]
Aloc = np.r_[-200.0, 0.0, 0.0]
M = utils.ndgrid(x, y, np.r_[0.0])
phiA = analytics.DCAnalytic_Pole_Pole(Aloc, M, 1e-2, earth_type="halfspace")
data_ana = phiA
rx = dc.receivers.Pole(M)
src = dc.sources.Pole([rx], Aloc)
survey = dc.survey.Survey([src])
self.survey = survey
self.mesh = mesh
self.sigma = sigma
self.data_ana = data_ana
def test_Simulation3DCellCentered_Mixed(self, tolerance=0.05):
simulation = dc.simulation.Simulation3DCellCentered(
self.mesh,
survey=self.survey,
sigma=self.sigma,
bc_type="Mixed",
solver=Solver,
)
data = simulation.dpred()
err = np.sqrt(
(((data - self.data_ana) / self.data_ana) ** 2).sum() / self.survey.nD
)
if err < tolerance:
print(err)
passed = True
print(">> DC analytic test for Simulation3DCellCentered_Mixed is passed")
else:
print(err)
passed = False
print(">> DC analytic test for Simulation3DCellCentered_Mixed is failed")
self.assertTrue(passed)
if __name__ == "__main__":
unittest.main()
|
from textwrap import dedent
import icalendar
import pytest
import vdirsyncer.utils.vobject as vobject
from .. import BARE_EVENT_TEMPLATE, EVENT_TEMPLATE, VCARD_TEMPLATE, \
normalize_item
_simple_split = [
VCARD_TEMPLATE.format(r=123, uid=123),
VCARD_TEMPLATE.format(r=345, uid=345),
VCARD_TEMPLATE.format(r=678, uid=678)
]
_simple_joined = u'\r\n'.join(
[u'BEGIN:VADDRESSBOOK'] +
_simple_split +
[u'END:VADDRESSBOOK\r\n']
)
def test_split_collection_simple():
given = list(vobject.split_collection(_simple_joined))
assert [normalize_item(item) for item in given] == \
[normalize_item(item) for item in _simple_split]
if vobject.ICALENDAR_ORIGINAL_ORDER_SUPPORT:
assert [x.splitlines() for x in given] == \
[x.splitlines() for x in _simple_split]
def test_split_collection_multiple_wrappers():
joined = u'\r\n'.join(
u'BEGIN:VADDRESSBOOK\r\n' +
x +
u'\r\nEND:VADDRESSBOOK\r\n'
for x in _simple_split
)
given = list(vobject.split_collection(joined))
assert [normalize_item(item) for item in given] == \
[normalize_item(item) for item in _simple_split]
if vobject.ICALENDAR_ORIGINAL_ORDER_SUPPORT:
assert [x.splitlines() for x in given] == \
[x.splitlines() for x in _simple_split]
def test_split_collection_different_wrappers():
with pytest.raises(ValueError) as exc_info:
list(vobject.split_collection(u'BEGIN:VADDRESSBOOK\r\n'
u'BEGIN:FOO\r\n'
u'END:FOO\r\n'
u'END:VADDRESSBOOK\r\n'
u'BEGIN:VCALENDAR\r\n'
u'BEGIN:FOO\r\n'
u'END:FOO\r\n'
u'END:VCALENDAR\r\n'))
assert 'different types of components at top-level' in \
str(exc_info.value).lower()
def test_join_collection_simple():
given = vobject.join_collection(_simple_split)
assert normalize_item(given) == normalize_item(_simple_joined)
if vobject.ICALENDAR_ORIGINAL_ORDER_SUPPORT:
assert given.splitlines() == _simple_joined.splitlines()
def test_join_collection_vevents():
actual = vobject.join_collection([
dedent("""
BEGIN:VCALENDAR
BEGIN:VTIMEZONE
VALUE:The Timezone
END:VTIMEZONE
BEGIN:VEVENT
VALUE:Event {}
END:VEVENT
END:VCALENDAR
""").format(i) for i in range(3)
])
expected = dedent("""
BEGIN:VCALENDAR
BEGIN:VTIMEZONE
VALUE:The Timezone
END:VTIMEZONE
BEGIN:VEVENT
VALUE:Event 0
END:VEVENT
BEGIN:VEVENT
VALUE:Event 1
END:VEVENT
BEGIN:VEVENT
VALUE:Event 2
END:VEVENT
END:VCALENDAR
""").lstrip()
assert actual.splitlines() == expected.splitlines()
def test_split_collection_timezones():
items = [
BARE_EVENT_TEMPLATE.format(r=123, uid=123),
BARE_EVENT_TEMPLATE.format(r=345, uid=345)
]
timezone = (
u'BEGIN:VTIMEZONE\r\n'
u'TZID:/mozilla.org/20070129_1/Asia/Tokyo\r\n'
u'X-LIC-LOCATION:Asia/Tokyo\r\n'
u'BEGIN:STANDARD\r\n'
u'TZOFFSETFROM:+0900\r\n'
u'TZOFFSETTO:+0900\r\n'
u'TZNAME:JST\r\n'
u'DTSTART:19700101T000000\r\n'
u'END:STANDARD\r\n'
u'END:VTIMEZONE'
)
full = u'\r\n'.join(
[u'BEGIN:VCALENDAR'] +
items +
[timezone, u'END:VCALENDAR']
)
given = set(normalize_item(item)
for item in vobject.split_collection(full))
expected = set(
normalize_item(u'\r\n'.join((
u'BEGIN:VCALENDAR', item, timezone, u'END:VCALENDAR'
)))
for item in items
)
assert given == expected
def test_hash_item():
a = EVENT_TEMPLATE.format(r=1, uid=1)
b = u'\n'.join(line for line in a.splitlines()
if u'PRODID' not in line and u'VERSION' not in line)
assert vobject.hash_item(a) == vobject.hash_item(b)
def test_multiline_uid():
a = (u'BEGIN:FOO\r\n'
u'UID:123456789abcd\r\n'
u' efgh\r\n'
u'END:FOO\r\n')
assert vobject.Item(a).uid == u'123456789abcdefgh'
def test_multiline_uid_complex():
a = dedent(u'''
BEGIN:VCALENDAR
BEGIN:VTIMEZONE
TZID:Europe/Rome
X-LIC-LOCATION:Europe/Rome
BEGIN:DAYLIGHT
TZOFFSETFROM:+0100
TZOFFSETTO:+0200
TZNAME:CEST
DTSTART:19700329T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=3
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:+0200
TZOFFSETTO:+0100
TZNAME:CET
DTSTART:19701025T030000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTART:20140124T133000Z
DTEND:20140124T143000Z
DTSTAMP:20140612T090652Z
UID:040000008200E00074C5B7101A82E0080000000050AAABEEF50DCF
001000000062548482FA830A46B9EA62114AC9F0EF
CREATED:20140110T102231Z
DESCRIPTION:Test.
LAST-MODIFIED:20140123T095221Z
LOCATION:25.12.01.51
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Präsentation
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
''').strip()
assert vobject.Item(a).uid == (u'040000008200E00074C5B7101A82E008000000005'
u'0AAABEEF50DCF001000000062548482FA830A46B9'
u'EA62114AC9F0EF')
@pytest.mark.xfail(icalendar.parser.NAME.findall('FOO.BAR') != ['FOO.BAR'],
reason=('version of icalendar doesn\'t support dots in '
'property names'))
def test_vcard_property_groups():
vcard = dedent(u'''
BEGIN:VCARD
VERSION:3.0
MYLABEL123.ADR:;;This is the Address 08; Some City;;12345;Germany
MYLABEL123.X-ABLABEL:
FN:Some Name
N:Name;Some;;;Nickname
UID:67c15e43-34d2-4f55-a6c6-4adb7aa7e3b2
END:VCARD
''').strip()
book = u'BEGIN:VADDRESSBOOK\n' + vcard + u'\nEND:VADDRESSBOOK'
splitted = list(vobject.split_collection(book))
assert len(splitted) == 1
assert vobject.Item(vcard).hash == vobject.Item(splitted[0]).hash
assert 'is the Address' in vobject.Item(vcard).parsed['MYLABEL123.ADR']
def test_vcard_semicolons_in_values():
# If this test fails because proper vCard support was added to icalendar,
# we can remove some ugly postprocessing code in to_unicode_lines.
vcard = dedent(u'''
BEGIN:VCARD
VERSION:3.0
ADR:;;Address 08;City;;12345;Germany
END:VCARD
''').strip()
# Assert that icalendar breaks vcard properties with semicolons in values
assert b'ADR:\\;\\;Address 08\\;City\\;\\;12345\\;Germany' in \
vobject.Item(vcard).parsed.to_ical().splitlines()
# Assert that vdirsyncer fixes these properties
assert u'ADR:;;Address 08;City;;12345;Germany' in \
list(vobject.to_unicode_lines(vobject.Item(vcard).parsed))
|
import network
import numpy
training_data = [
(numpy.array([[0], [0]]), numpy.array([[0]])),
(numpy.array([[0], [1]]), numpy.array([[1]])),
(numpy.array([[1], [0]]), numpy.array([[1]])),
(numpy.array([[1], [1]]), numpy.array([[0]])),
]
net = network.Network([2, 3, 1])
net.SGD(training_data, 5000, 4, 1.0, 0.0, training_data, [])
for elem in training_data:
print net.feed_forward(elem[0]), elem[1]
|
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Ecs20140526AllocateEipAddressRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.Bandwidth = None
self.InternetChargeType = None
self.RegionId = None
def getapiname(self):
return 'ecs.aliyuncs.com.AllocateEipAddress.2014-05-26'
|
import botlog
from bbot.Data import *
from bbot.Utils import *
class PrimeProjection:
def __init__(self, answer=None, confidence=None):
self.answer = answer
self.confidence = confidence
def is_low_confidence(self):
return self.confidence is None or self.confidence < 0.5
def is_high_confidence(self):
return not self.is_low_confidence()
def is_certain(self):
return self.confidence >= 1.0
def __str__(self):
stringrep = str(self.answer)
if not self.is_certain():
stringrep += (" (confidence " +
str(round(self.confidence*100)) + "%")
return stringrep
class Mentat:
def __init__(self, app):
self.app = app
self.data = app.data
def is_end_of_day(self):
botlog.debug("Determining if it is the end of the day")
end_of_day = False
confidence = 0.0
realm = self.data.realm
if realm.turns.remaining is not None:
botlog.debug("Turns remaining is set and is: " +
str(realm.turns.remaining))
confidence = 1.0
end_of_day = realm.turns.remaining <= END_OF_DAY_TURNS
elif realm.turns.current is not None:
maxturns = 8
if self.data.setup is not None and self.data.setup.turns_per_day is not None:
maxturns = self.data.setup.turns_per_day
botlog.debug("Turns remaining not set, but max turns is: " +
str(maxturns))
confidence = 1.0
else:
confidence = 0.5
botlog.debug("Turns remaining not set, but max turns blindly "
"assumed at: " + str(maxturns))
end_of_day = maxturns - realm.turns.current <= END_OF_DAY_TURNS
botlog.debug("Turns remaining calculated to probably be: " +
str(realm.turns.remaining))
return PrimeProjection(
answer=end_of_day,
confidence=confidence)
def score_indicates_realm_in_protection(self,realm=None):
confidence = 0.0
in_pro = False
SCORE_PER_TURN=213.0
score = None
if realm is not None:
score = realm.score
elif self.app.data.realm is not None:
score = self.app.data.realm.score
if score is not None:
confidence += 0.25
setup = self.app.data.setup
if (score is not None and
setup is not None and
setup.protection_turns is not None):
turns = int(math.ceil(score / SCORE_PER_TURN)) + 1
in_pro = turns <= setup.protection_turns
confidence += 0.50
# we can never be certain because pirates could be involved
return PrimeProjection(
answer=in_pro,
confidence=confidence)
|
"""
audfprint_analyze.py
Class to do the analysis of wave files into hash constellations.
2014-09-20 Dan Ellis dpwe@ee.columbia.edu
"""
from __future__ import print_function
import os
import numpy as np
import scipy.signal
import struct
import glob
import time
import hash_table
import librosa
import audio_read
PRECOMPEXT = '.afpt'
PRECOMPPKEXT = '.afpk'
def locmax(vec, indices=False):
""" Return a boolean vector of which points in vec are local maxima.
End points are peaks if larger than single neighbors.
if indices=True, return the indices of the True values instead
of the boolean vector.
"""
# vec[-1]-1 means last value can be a peak
#nbr = np.greater_equal(np.r_[vec, vec[-1]-1], np.r_[vec[0], vec])
# the np.r_ was killing us, so try an optimization...
nbr = np.zeros(len(vec)+1, dtype=bool)
nbr[0] = True
nbr[1:-1] = np.greater_equal(vec[1:], vec[:-1])
maxmask = (nbr[:-1] & ~nbr[1:])
if indices:
return np.nonzero(maxmask)[0]
else:
return maxmask
DENSITY = 20.0
OVERSAMP = 1
N_FFT = 512
N_HOP = 256
HPF_POLE = 0.98
F1_BITS = 8
DF_BITS = 6
DT_BITS = 6
B1_MASK = (1 << F1_BITS) - 1
B1_SHIFT = DF_BITS + DT_BITS
DF_MASK = (1 << DF_BITS) - 1
DF_SHIFT = DT_BITS
DT_MASK = (1 << DT_BITS) - 1
def landmarks2hashes(landmarks):
"""Convert a list of (time, bin1, bin2, dtime) landmarks
into a list of (time, hash) pairs where the hash combines
the three remaining values.
"""
# build up and return the list of hashed values
return [(time_,
(((bin1 & B1_MASK) << B1_SHIFT)
| (((bin2 - bin1) & DF_MASK) << DF_SHIFT)
| (dtime & DT_MASK)))
for time_, bin1, bin2, dtime in landmarks]
def hashes2landmarks(hashes):
"""Convert the mashed-up landmarks in hashes back into a list
of (time, bin1, bin2, dtime) tuples.
"""
landmarks = []
for time_, hash_ in hashes:
dtime = hash_ & DT_MASK
bin1 = (hash_ >> B1_SHIFT) & B1_MASK
dbin = (hash_ >> DF_SHIFT) & DF_MASK
# Sign extend frequency difference
if dbin >= (1 << (DF_BITS-1)):
dbin -= (1 << DF_BITS)
landmarks.append((time_, bin1, bin1+dbin, dtime))
return landmarks
class Analyzer(object):
""" A class to wrap up all the parameters associated with
the analysis of soundfiles into fingerprints """
# Parameters
# optimization: cache pre-calculated Gaussian profile
__sp_width = None
__sp_len = None
__sp_vals = []
def __init__(self, density=DENSITY):
self.density = density
self.target_sr = 11025
self.n_fft = N_FFT
self.n_hop = N_HOP
self.shifts = 1
# how wide to spreak peaks
self.f_sd = 30.0
# Maximum number of local maxima to keep per frame
self.maxpksperframe = 5
# Limit the num of pairs we'll make from each peak (Fanout)
self.maxpairsperpeak = 3
# Values controlling peaks2landmarks
# +/- 31 bins in freq (LIMITED TO -32..31 IN LANDMARK2HASH)
self.targetdf = 31
# min time separation (traditionally 1, upped 2014-08-04)
self.mindt = 2
# max lookahead in time (LIMITED TO <64 IN LANDMARK2HASH)
self.targetdt = 63
# global stores duration of most recently-read soundfile
self.soundfiledur = 0.0
# .. and total amount of sound processed
self.soundfiletotaldur = 0.0
# .. and count of files
self.soundfilecount = 0
# Control behavior on file reading error
self.fail_on_error = True
def spreadpeaksinvector(self, vector, width=4.0):
""" Create a blurred version of vector, where each of the local maxes
is spread by a gaussian with SD <width>.
"""
npts = len(vector)
peaks = locmax(vector, indices=True)
return self.spreadpeaks(zip(peaks, vector[peaks]),
npoints=npts, width=width)
def spreadpeaks(self, peaks, npoints=None, width=4.0, base=None):
""" Generate a vector consisting of the max of a set of Gaussian bumps
:params:
peaks : list
list of (index, value) pairs giving the center point and height
of each gaussian
npoints : int
the length of the output vector (needed if base not provided)
width : float
the half-width of the Gaussians to lay down at each point
base : np.array
optional initial lower bound to place Gaussians above
:returns:
vector : np.array(npoints)
the maximum across all the scaled Gaussians
"""
if base is None:
vec = np.zeros(npoints)
else:
npoints = len(base)
vec = np.copy(base)
#binvals = np.arange(len(vec))
#for pos, val in peaks:
# vec = np.maximum(vec, val*np.exp(-0.5*(((binvals - pos)
# /float(width))**2)))
if width != self.__sp_width or npoints != self.__sp_len:
# Need to calculate new vector
self.__sp_width = width
self.__sp_len = npoints
self.__sp_vals = np.exp(-0.5*((np.arange(-npoints, npoints+1)
/ float(width))**2))
# Now the actual function
for pos, val in peaks:
vec = np.maximum(vec, val*self.__sp_vals[np.arange(npoints)
+ npoints - pos])
return vec
def _decaying_threshold_fwd_prune(self, sgram, a_dec):
""" forward pass of findpeaks
initial threshold envelope based on peaks in first 10 frames
"""
(srows, scols) = np.shape(sgram)
sthresh = self.spreadpeaksinvector(
np.max(sgram[:, :np.minimum(10, scols)], axis=1), self.f_sd
)
## Store sthresh at each column, for debug
#thr = np.zeros((srows, scols))
peaks = np.zeros((srows, scols))
# optimization of mask update
__sp_pts = len(sthresh)
__sp_v = self.__sp_vals
for col in range(scols):
s_col = sgram[:, col]
# Find local magnitude peaks that are above threshold
sdmaxposs = np.nonzero(locmax(s_col) * (s_col > sthresh))[0]
# Work down list of peaks in order of their absolute value
# above threshold
valspeaks = sorted(zip(s_col[sdmaxposs], sdmaxposs), reverse=True)
for val, peakpos in valspeaks[:self.maxpksperframe]:
# What we actually want
#sthresh = spreadpeaks([(peakpos, s_col[peakpos])],
# base=sthresh, width=f_sd)
# Optimization - inline the core function within spreadpeaks
sthresh = np.maximum(sthresh,
val*__sp_v[(__sp_pts - peakpos):
(2*__sp_pts - peakpos)])
peaks[peakpos, col] = 1
sthresh *= a_dec
return peaks
def _decaying_threshold_bwd_prune_peaks(self, sgram, peaks, a_dec):
""" backwards pass of findpeaks """
scols = np.shape(sgram)[1]
# Backwards filter to prune peaks
sthresh = self.spreadpeaksinvector(sgram[:, -1], self.f_sd)
for col in range(scols, 0, -1):
pkposs = np.nonzero(peaks[:, col-1])[0]
peakvals = sgram[pkposs, col-1]
for val, peakpos in sorted(zip(peakvals, pkposs), reverse=True):
if val >= sthresh[peakpos]:
# Setup the threshold
sthresh = self.spreadpeaks([(peakpos, val)], base=sthresh,
width=self.f_sd)
# Delete any following peak (threshold should, but be sure)
if col < scols:
peaks[peakpos, col] = 0
else:
# delete the peak
peaks[peakpos, col-1] = 0
sthresh = a_dec*sthresh
return peaks
def find_peaks(self, d, sr):
""" Find the local peaks in the spectrogram as basis for fingerprints.
Returns a list of (time_frame, freq_bin) pairs.
:params:
d - np.array of float
Input waveform as 1D vector
sr - int
Sampling rate of d (not used)
:returns:
pklist - list of (int, int)
Ordered list of landmark peaks found in STFT. First value of
each pair is the time index (in STFT frames, i.e., units of
n_hop/sr secs), second is the FFT bin (in units of sr/n_fft
Hz).
"""
if len(d) == 0:
return []
# masking envelope decay constant
a_dec = (1.0 - 0.01*(self.density*np.sqrt(self.n_hop/352.8)/35.0)) \
**(1.0/OVERSAMP)
# Take spectrogram
mywin = np.hanning(self.n_fft+2)[1:-1]
sgram = np.abs(librosa.stft(d, n_fft=self.n_fft,
hop_length=self.n_hop,
window=mywin))
sgrammax = np.max(sgram)
if sgrammax > 0.0:
sgram = np.log(np.maximum(sgram, np.max(sgram)/1e6))
sgram = sgram - np.mean(sgram)
else:
# The sgram is identically zero, i.e., the input signal was identically
# zero. Not good, but let's let it through for now.
print("find_peaks: Warning: input signal is identically zero.")
# High-pass filter onset emphasis
# [:-1,] discards top bin (nyquist) of sgram so bins fit in 8 bits
sgram = np.array([scipy.signal.lfilter([1, -1],
[1, -(HPF_POLE)** \
(1/OVERSAMP)], s_row)
for s_row in sgram])[:-1,]
# Prune to keep only local maxima in spectrum that appear above an online,
# decaying threshold
peaks = self._decaying_threshold_fwd_prune(sgram, a_dec)
# Further prune these peaks working backwards in time, to remove small peaks
# that are closely followed by a large peak
peaks = self._decaying_threshold_bwd_prune_peaks(sgram, peaks, a_dec)
# build a list of peaks we ended up with
scols = np.shape(sgram)[1]
pklist = []
for col in xrange(scols):
for bin in np.nonzero(peaks[:, col])[0]:
pklist.append( (col, bin) )
return pklist
def peaks2landmarks(self, pklist):
""" Take a list of local peaks in spectrogram
and form them into pairs as landmarks.
pklist is a column-sorted list of (col, bin) pairs as created
by findpeaks().
Return a list of (col, peak, peak2, col2-col) landmark descriptors.
"""
# Form pairs of peaks into landmarks
landmarks = []
if len(pklist) > 0:
# Find column of the final peak in the list
scols = pklist[-1][0] + 1
# Convert (col, bin) list into peaks_at[col] lists
peaks_at = [[] for col in xrange(scols)]
for (col, bin) in pklist:
peaks_at[col].append(bin)
# Build list of landmarks <starttime F1 endtime F2>
for col in xrange(scols):
for peak in peaks_at[col]:
pairsthispeak = 0
for col2 in xrange(col+self.mindt,
min(scols, col+self.targetdt)):
if pairsthispeak < self.maxpairsperpeak:
for peak2 in peaks_at[col2]:
if abs(peak2-peak) < self.targetdf:
#and abs(peak2-peak) + abs(col2-col) > 2 ):
if pairsthispeak < self.maxpairsperpeak:
# We have a pair!
landmarks.append((col, peak,
peak2, col2-col))
pairsthispeak += 1
return landmarks
def wavfile2peaks(self, filename, shifts=None):
""" Read a soundfile and return its landmark peaks as a
list of (time, bin) pairs. If specified, resample to sr first.
shifts > 1 causes hashes to be extracted from multiple shifts of
waveform, to reduce frame effects. """
ext = os.path.splitext(filename)[1]
if ext == PRECOMPPKEXT:
# short-circuit - precomputed fingerprint file
peaks = peaks_load(filename)
dur = np.max(peaks, axis=0)[0]*self.n_hop/float(self.target_sr)
else:
try:
[d, sr] = librosa.load(filename, sr=self.target_sr)
#d, sr = audio_read.audio_read(filename, sr=self.target_sr, channels=1)
except: # audioread.NoBackendError:
message = "wavfile2peaks: Error reading" + filename
if self.fail_on_error:
raise IOError(message)
print(message, "skipping")
d = []
sr = self.target_sr
# Store duration in a global because it's hard to handle
dur = float(len(d))/sr
if shifts is None or shifts < 2:
peaks = self.find_peaks(d, sr);
else:
# Calculate hashes with optional part-frame shifts
peaklists = []
for shift in range(shifts):
shiftsamps = int(float(shift)/self.shifts*self.n_hop)
peaklists.append(self.find_peaks(d[shiftsamps:], sr))
peaks = peaklists
# instrumentation to track total amount of sound processed
self.soundfiledur = dur
self.soundfiletotaldur += dur
self.soundfilecount += 1
return peaks
def wavfile2hashes(self, filename):
""" Read a soundfile and return its fingerprint hashes as a
list of (time, hash) pairs. If specified, resample to sr first.
shifts > 1 causes hashes to be extracted from multiple shifts of
waveform, to reduce frame effects. """
ext = os.path.splitext(filename)[1]
if ext == PRECOMPEXT:
# short-circuit - precomputed fingerprint file
hashes = hashes_load(filename)
dur = np.max(hashes, axis=0)[0]*self.n_hop/float(self.target_sr)
# instrumentation to track total amount of sound processed
self.soundfiledur = dur
self.soundfiletotaldur += dur
self.soundfilecount += 1
else:
peaks = self.wavfile2peaks(filename, self.shifts)
if len(peaks) == 0:
return []
# Did we get returned a list of lists of peaks due to shift?
if isinstance(peaks[0], list):
peaklists = peaks
query_hashes = []
for peaklist in peaklists:
query_hashes += landmarks2hashes(
self.peaks2landmarks(peaklist)
)
else:
query_hashes = landmarks2hashes(self.peaks2landmarks(peaks))
# remove duplicate elements by pushing through a set
hashes = sorted(list(set(query_hashes)))
#print("wavfile2hashes: read", len(hashes), "hashes from", filename)
return hashes
########### functions to link to actual hash table index database #######
def ingest(self, hashtable, filename):
""" Read an audio file and add it to the database
:params:
hashtable : HashTable object
the hash table to add to
filename : str
name of the soundfile to add
:returns:
dur : float
the duration of the track
nhashes : int
the number of hashes it mapped into
"""
#sr = 11025
#print("ingest: sr=",sr)
#d, sr = librosa.load(filename, sr=sr)
# librosa.load on mp3 files prepends 396 samples compared
# to Matlab audioread ??
#hashes = landmarks2hashes(peaks2landmarks(find_peaks(d, sr,
# density=density,
# n_fft=n_fft,
# n_hop=n_hop)))
hashes = self.wavfile2hashes(filename)
hashtable.store(filename, hashes)
#return (len(d)/float(sr), len(hashes))
#return (np.max(hashes, axis=0)[0]*n_hop/float(sr), len(hashes))
# soundfiledur is set up in wavfile2hashes, use result here
return self.soundfiledur, len(hashes)
HASH_FMT = '<2i'
HASH_MAGIC = 'audfprinthashV00' # 16 chars, FWIW
PEAK_FMT = '<2i'
PEAK_MAGIC = 'audfprintpeakV00' # 16 chars, FWIW
def hashes_save(hashfilename, hashes):
""" Write out a list of (time, hash) pairs as 32 bit ints """
with open(hashfilename, 'wb') as f:
f.write(HASH_MAGIC)
for time_, hash_ in hashes:
f.write(struct.pack(HASH_FMT, time_, hash_))
def hashes_load(hashfilename):
""" Read back a set of hashes written by hashes_save """
hashes = []
fmtsize = struct.calcsize(HASH_FMT)
with open(hashfilename, 'rb') as f:
magic = f.read(len(HASH_MAGIC))
if magic != HASH_MAGIC:
raise IOError('%s is not a hash file (magic %s)'
% (hashfilename, magic))
data = f.read(fmtsize)
while data is not None and len(data) == fmtsize:
hashes.append(struct.unpack(HASH_FMT, data))
data = f.read(fmtsize)
return hashes
def peaks_save(peakfilename, peaks):
""" Write out a list of (time, bin) pairs as 32 bit ints """
with open(peakfilename, 'wb') as f:
f.write(PEAK_MAGIC)
for time_, bin_ in peaks:
f.write(struct.pack(PEAK_FMT, time_, bin_))
def peaks_load(peakfilename):
""" Read back a set of (time, bin) pairs written by peaks_save """
peaks = []
fmtsize = struct.calcsize(PEAK_FMT)
with open(peakfilename, 'rb') as f:
magic = f.read(len(PEAK_MAGIC))
if magic != PEAK_MAGIC:
raise IOError('%s is not a peak file (magic %s)'
% (peakfilename, magic))
data = f.read(fmtsize)
while data is not None and len(data) == fmtsize:
peaks.append(struct.unpack(PEAK_FMT, data))
data = f.read(fmtsize)
return peaks
extract_features_analyzer = None
def extract_features(track_obj, *args, **kwargs):
""" Extract the audfprint fingerprint hashes for one file.
:params:
track_obj : object
Gordon's internal structure defining a track; we use
track_obj.fn_audio to find the actual audio file.
:returns:
hashes : list of (int, int)
The times (in frames) and hashes analyzed from the audio file.
"""
global extract_features_analyzer
if extract_features_analyzer == None:
extract_features_analyzer = Analyzer()
density = None
n_fft = None
n_hop = None
sr = None
if "density" in kwargs:
density = kwargs["density"]
if "n_fft" in kwargs:
n_fft = kwargs["n_fft"]
if "n_hop" in kwargs:
n_hop = kwargs["n_hop"]
if "sr" in kwargs:
sr = kwargs["sr"]
extract_features_analyzer.density = density
extract_features_analyzer.n_fft = n_fft
extract_features_analyzer.n_hop = n_hop
extract_features_analyzer.target_sr = sr
return extract_features_analyzer.wavfile2hashes(track_obj.fn_audio)
g2h_analyzer = None
def glob2hashtable(pattern, density=20.0):
""" Build a hash table from the files matching a glob pattern """
global g2h_analyzer
if g2h_analyzer == None:
g2h_analyzer = Analyzer(density=density)
ht = hash_table.HashTable()
filelist = glob.glob(pattern)
initticks = time.clock()
totdur = 0.0
tothashes = 0
for ix, file_ in enumerate(filelist):
print(time.ctime(), "ingesting #", ix, ":", file_, "...")
dur, nhash = g2h_analyzer.ingest(ht, file_)
totdur += dur
tothashes += nhash
elapsedtime = time.clock() - initticks
print("Added", tothashes, "(", tothashes/float(totdur), "hashes/sec) at ",
elapsedtime/totdur, "x RT")
return ht
def local_tester():
test_fn = '/Users/dpwe/Downloads/carol11k.wav'
test_ht = hash_table.HashTable()
test_analyzer = Analyzer()
test_analyzer.ingest(test_ht, test_fn)
test_ht.save('httest.pklz')
if __name__ == "__main__":
local_tester()
|
import superimport
import numpy as np
import scipy.sparse
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from matplotlib import colors as mcolors
def demo(priorVar, plot_num):
np.random.seed(1)
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
N = 10 # number of interior observed points
D = 150 # number of points we evaluate function at
xs = np.linspace(0, 1, D)
allNdx = np.arange(0, D, 1)
perm = np.random.permutation(D)
obsNdx = perm[: N]
obsNdx = np.concatenate((np.array([0]), obsNdx, np.array([D - 1])))
Nobs = len(obsNdx)
hidNdx = np.setdiff1d(allNdx, obsNdx)
Nhid = len(hidNdx)
xobs = np.random.randn(Nobs)
obsNoiseVar = 1
y = xobs + np.sqrt(obsNoiseVar) * np.random.randn(Nobs)
L = (0.5 * scipy.sparse.diags([-1, 2, -1],
[0, 1, 2], (D - 2, D))).toarray()
Lambda = 1 / priorVar
L = L * Lambda
L1 = L[:, hidNdx]
L2 = L[:, obsNdx]
B11 = np.dot(np.transpose(L1), L1)
B12 = np.dot(np.transpose(L1), L2)
B21 = np.transpose(B12)
mu = np.zeros(D)
mu[hidNdx] = -np.dot(np.dot(np.linalg.inv(B11), B12), xobs)
mu[obsNdx] = xobs
inverseB11 = np.linalg.inv(B11)
Sigma = np.zeros((D, D))
# https://stackoverflow.com/questions/22927181/selecting-specific-rows-and-columns-from-numpy-array/22927889#22927889
Sigma[hidNdx[:, None], hidNdx] = inverseB11
plt.figure()
plt.plot(obsNdx, xobs, 'bo', markersize=10)
plt.plot(allNdx, mu, 'r-')
S2 = np.diag(Sigma)
upper = (mu + 2 * np.sqrt(S2))
lower = (mu - 2 * np.sqrt(S2))
plt.fill_between(allNdx, lower, upper, alpha=0.2)
for i in range(0, 3):
fs = np.random.multivariate_normal(mu, Sigma)
plt.plot(allNdx, fs, 'k-', alpha=0.7)
plt.title(f'prior variance {priorVar:0.2f}')
pml.savefig(f'gaussian_interpolation_1d_{plot_num}.pdf')
priorVars = [0.01, 0.1]
for i, priorVar in enumerate(priorVars):
demo(priorVar, i)
plt.show()
|
import urllib
class RecaptchaMixin(object):
"""RecaptchaMixin
You must define some options for this mixin. All information
can be found at http://www.google.com/recaptcha
A basic example::
from tornado.options import define
from tornado.web import RequestHandler, asynchronous
define('recaptcha_key', 'key')
define('recaptcha_secret', 'secret')
define('recaptcha_theme', 'clean')
class SignupHandler(RequestHandler, RecaptchaMixin):
def get(self):
self.write('<form method="post" action="">')
self.write(self.xsrf_form_html())
self.write(self.recaptcha_render())
self.write('<button type="submit">Submit</button>')
self.write('</form>')
@asynchronous
def post(self):
self.recaptcha_validate(self._on_validate)
def _on_validate(self, response):
if response:
self.write('success')
self.finish()
return
self.write('failed')
self.finish()
"""
RECAPTCHA_VERIFY_URL = "http://www.google.com/recaptcha/api/verify"
def recaptcha_render(self):
if not self.settings['use_recaptcha']:
return ''
token = self._recaptcha_token()
html = (
'<div id="recaptcha_div"></div>'
'<script type="text/javascript" '
'src="https://www.google.com/recaptcha/api/js/recaptcha_ajax.js">'
'</script><script type="text/javascript">'
'Recaptcha.create("%(key)s", "recaptcha_div", '
'{theme: "%(theme)s",callback:Recaptcha.focus_response_field});'
'</script>'
)
return html % token
def recaptcha_validate(self):
if not self.settings['use_recaptcha']:
return
token = self._recaptcha_token()
challenge = self.get_argument('recaptcha_challenge_field', None)
response = self.get_argument('recaptcha_response_field', None)
post_args = {
'privatekey': token['secret'],
'remoteip': self.request.remote_ip,
'challenge': challenge,
'response': response
}
body = urllib.urlopen(self.RECAPTCHA_VERIFY_URL,
urllib.urlencode(post_args)).read()
verify, message = body.split()
if verify != 'true':
self.flash('Are you human?')
self.redirect('/')
def _recaptcha_token(self):
token = dict(
key=self.settings['recaptcha_key'],
secret=self.settings['recaptcha_secret'],
theme=self.settings['recaptcha_theme'],
)
return token
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from statusupdater import views
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^login', views.login),
url(r'^get_code', views.get_code),
url(r'^hook/(?P<hook_id>[^/]+)/$', views.hook),
url(r'^$', views.index),
url(r'^view', views.status_links),
)
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gateway',
version='0.0.1',
description='gateway',
long_description=long_description,
url='https://github.com/pap/simplebank',
author='Simplebank Engineering',
author_email='engineering@simplebank.book',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
keywords='microservices gateway',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'nameko>=2.6.0',
'logstash_formatter>=0.5.16',
'statsd>=3.2.1',
"circuitbreaker>=1.0.1",
'gutter>=0.5.0',
'request-id>=0.2.1',
'nameko-sentry>=0.0.5',
'pyopenssl>=19.1.0',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
|
from hyperstream import TimeInterval, MIN_DATE
from hyperstream.tool import MultiOutputTool
from hyperstream.stream import StreamMetaInstance, AssetStream
import logging
from copy import deepcopy
class SplitterFromStream(MultiOutputTool):
"""
This version of the splitter assumes that the mapping exists as the last element in a (asset) stream
From version 0.0.2 onwards it supports element=None (the default value), in which case each document is assumed to
be a dict with keys corresponding to plate values, the respective values will then be written into corresponding
streams
"""
def __init__(self, element=None):
super(SplitterFromStream, self).__init__(element=element)
def _execute(self, source, splitting_stream, interval, meta_data_id, output_plate_values):
if splitting_stream is None:
raise ValueError("Splitting stream required for this tool")
if isinstance(splitting_stream, AssetStream):
time_interval = TimeInterval(MIN_DATE, interval.end)
splitter = splitting_stream.window(time_interval, force_calculation=True).last()
else:
splitter = splitting_stream.window(interval, force_calculation=True).last()
if not splitter:
logging.debug("No assets found for source {} and splitter {}"
.format(source.stream_id, splitting_stream.stream_id))
return
mapping = splitter.value
for timestamp, value in source.window(interval, force_calculation=True):
if self.element is None:
for plate_value, sub_value in value.items():
if plate_value in mapping.keys():
yield StreamMetaInstance((timestamp, sub_value), (meta_data_id, plate_value))
else:
logging.error("Unexpected splitting value {}".format(plate_value))
else:
if self.element not in value:
logging.debug("Mapping element {} not in instance".format(self.element))
continue
value = deepcopy(value)
meta_data = str(value.pop(self.element))
if meta_data not in mapping:
logging.warn("Unknown value {} for meta data {}".format(meta_data, self.element))
continue
plate_value = mapping[meta_data]
yield StreamMetaInstance((timestamp, value), (meta_data_id, plate_value))
|
from kombu import Connection
from kombu import Exchange, Queue, pools
from microservices.utils import get_logger
from microservices.helpers.logs import InstanceLogger
import six
_logger = get_logger(__file__)
class _exchange(object):
"""Exchange helper"""
def __init__(self, client, name, routing_key=None, logger=None):
"""Initialization
:param client: instance of client
:type client: Client
:param name: name of exchange
:type name: str
:param routing_key: routing key to queue
:type routing_key: str or None
"""
self.client = client
self.name = name
self.routing_key = routing_key
if logger is None:
logger = _logger # pragma: no cover
self.logger = logger
self.logger.debug('Exchange "%s" built, routing_key: %s', self.name,
self.routing_key if not self.routing_key is None else '')
def publish(self, message, routing_key=None):
"""Publish message to exchange
:param message: message for publishing
:type message: any serializable object
:param routing_key: routing key for queue
:return: None
"""
if routing_key is None:
routing_key = self.routing_key
return self.client.publish_to_exchange(self.name, message=message, routing_key=routing_key)
class _queue(object):
"""Queue helper"""
def __init__(self, client, name, logger=None):
"""Initialization
:param client: instance of client
:type client: Client
:param name: name of queue
:type name: str
"""
self.client = client
self.name = name
if logger is None:
logger = _logger # pragma: no cover
self.logger = logger
self.logger.debug('Queue "%s" built', self.name)
def publish(self, message):
"""Publish message to queue
:param message: message for publishing
:type message: any serializable object
:return: None
"""
return self.client.publish_to_queue(self.name, message=message)
@six.python_2_unicode_compatible
class Client(object):
"""Client for queue brokers, kombu based"""
default_connection = 'amqp:///'
def __init__(self, connection='amqp:///', name=None, logger=None, limit=None):
"""Initialization of Client instance
:param connection: connection for broker
:type connection: str, None, kombu.connections.Connection, dict
"""
self.connection = self._get_connection(connection)
self.exchanges = {}
if name is None:
try:
name = '<client: {}>'.format(self.connection.as_uri())
except: # pragma: no cover
# Errors with filesystem transport
name = '<client: {}>'.format(self.connection.transport_cls)
if logger is None:
logger = get_logger(__name__)
self.logger = InstanceLogger(self, logger)
self.name = name
self.logger.debug('%s built', self.name)
if limit is None:
# Set limit as global kombu limit.
limit = pools.get_limit()
self.limit = limit
self.connections = pools.Connections(self.limit)
def __str__(self):
return self.name
def _get_connection(self, connection):
"""Create connection strategy
:param connection: connection for broker
:type connection: str, None, kombu.connections.Connection, dict
:return: instance of kombu.connections.Connection
:rtype: Connection
"""
if not connection:
connection = self.default_connection # pragma: no cover
if isinstance(connection, str):
connection = {'hostname': connection}
if isinstance(connection, dict):
connection = Connection(**connection)
return connection
def declare_exchange(self, name, type='direct', queues=None, **options):
"""Create or update exchange
:param name: name of exchange
:type name: str
:param type: type of exchange - direct, fanout, topic, match
:type type: str
:param queues: list of queues with routing keys: [[queue_name, routing_key], [queue_name, routing_key], ...]
:type queues: list, None or tuple
:param options: additional options for Exchange creation
"""
if queues is None:
queues = [] # pragma: no cover
with self.connections[self.connection].acquire() as conn:
exchange = Exchange(name, type=type, channel=conn, **options)
exchange.declare()
self.exchanges[name] = exchange
for q_name, routing_key in queues:
queue = Queue(name=q_name, channel=conn)
queue.declare()
queue.bind_to(exchange=name, routing_key=routing_key)
self.logger.debug('Queue "%s" with routing_key "%s" was bond to exchange "%s"', q_name,
routing_key if routing_key else q_name, name)
def delete_exchange(self, name):
"""Delete exchange by name
:param name: name of exchange
:type name: str
"""
with self.connections[self.connection].acquire() as conn:
exchange = self.exchanges.pop(name, Exchange(name, channel=conn))
exchange.delete()
self.logger.debug('Exchange "%s" was deleted', name)
def purge_queue(self, name):
"""Remove all messages from queue
:param name: name of queue
:type name: str
"""
connections = pools.Connections(self.limit)
with connections[self.connection].acquire() as conn:
Queue(name=name, channel=conn).purge()
self.logger.debug('Queue "%s" was purged', name)
def delete_queue(self, name):
"""Delete queue by name
:param name: name of queue
:type name: str
"""
with self.connections[self.connection].acquire() as conn:
Queue(name=name, channel=conn).delete()
self.logger.debug('Queue "%s" was deleted', name)
def exchange(self, name, routing_key=None):
"""Create exchange instance for simple publishing
:param name: name of exchange
:type name: str
:param routing_key: routing key
:type routing_key: str
:return: _exchange
"""
return _exchange(self, name, routing_key=routing_key, logger=self.logger)
def queue(self, name):
"""Create queue instance for simple publishing
:param name: name of queue
:type name: str
:return: _queue
"""
return _queue(self, name, logger=self.logger)
def publish_to_exchange(self, name, routing_key, message, **properties):
"""Publish message to exchange
:param name: name of exchange
:type name: str
:param routing_key: routing key
:type routing_key: str
:param message: payload for publishing
:type message: any serializable object
:param properties: additional properties for Producer.publish()
"""
with self.connections[self.connection].acquire() as conn:
producer = conn.Producer()
result = producer.publish(message, exchange=self.exchanges[name], routing_key=routing_key, **properties)
self.logger.info('Message (len: %s) was published to exchange "%s" with routing_key "%s"', len(message),
name,
routing_key if routing_key else '')
return result
def publish_to_queue(self, name, message, **properties):
"""Publish message to queue
:param name: name of queue
:type name: str
:param message: payload for publishing
:type message: any serializable object
:param properties: additional properties for Producer publish
"""
with self.connections[self.connection].acquire() as conn:
producer = conn.Producer()
result = producer.publish(message, routing_key=name, **properties)
self.logger.info('Message (len: %s) was published to queue "%s"', len(message), name)
return result
|
'''
The four adjacent digits in the 1000-digit number that
have the greatest product are 9 × 9 × 8 × 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that
have the greatest product. What is the value of this product?
'''
import timeit
num_str = "73167176531330624919225119674426574742355349194934" \
"96983520312774506326239578318016984801869478851843" \
"85861560789112949495459501737958331952853208805511" \
"12540698747158523863050715693290963295227443043557" \
"66896648950445244523161731856403098711121722383113" \
"62229893423380308135336276614282806444486645238749" \
"30358907296290491560440772390713810515859307960866" \
"70172427121883998797908792274921901699720888093776" \
"65727333001053367881220235421809751254540594752243" \
"52584907711670556013604839586446706324415722155397" \
"53697817977846174064955149290862569321978468622482" \
"83972241375657056057490261407972968652414535100474" \
"82166370484403199890008895243450658541227588666881" \
"16427171479924442928230863465674813919123162824586" \
"17866458359124566529476545682848912883142607690042" \
"24219022671055626321111109370544217506941658960408" \
"07198403850962455444362981230987879927244284909188" \
"84580156166097919133875499200524063689912560717606" \
"05886116467109405077541002256983155200055935729725" \
"71636269561882670428252483600823257530420752963450"
def loop(str, adj):
p = 0
for i in range(len(str)-adj):
t = 1
if '0' in str[i:i+adj]: continue # 0 multiplication means 0
if '1' in str[i:i+adj]: continue # ad-hoc heuristic for speed improvement
for j in range(0, adj):
t *= int(str[i+j])
if t > p: p = t
return p
if __name__ == '__main__':
print loop(num_str, 13)
print timeit.Timer('problem_008.loop(problem_008.num_str, 13)',
'import problem_008').timeit(10)
|
import logging
try:
from urlparse import urlparse
except ImportError:
# python 3
from urllib.parse import urlparse
from django.conf import settings
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from django.utils.encoding import iri_to_uri
from django.utils.six.moves.urllib.parse import urljoin
from social.apps.django_app.utils import psa, STORAGE
from social.strategies.utils import get_strategy
from social.utils import user_is_authenticated
from social.apps.django_app.views import _do_login as social_auth_login
from social.exceptions import AuthException
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework import status
from requests.exceptions import HTTPError
from .serializers import (SocialAuthInputSerializer, UserSerializer,
TokenSerializer, UserTokenSerializer)
l = logging.getLogger(__name__)
REDIRECT_URI = getattr(settings, 'REST_SOCIAL_OAUTH_REDIRECT_URI', '/')
DOMAIN_FROM_ORIGIN = getattr(settings, 'REST_SOCIAL_DOMAIN_FROM_ORIGIN', True)
def load_strategy(request=None):
return get_strategy('rest_social_auth.strategy.DRFStrategy', STORAGE, request)
@psa(REDIRECT_URI, load_strategy=load_strategy)
def register_by_auth_token(request, backend, *args, **kwargs):
user = request.user
redirect_uri = kwargs.pop('manual_redirect_uri', None)
if redirect_uri:
request.backend.redirect_uri = redirect_uri
elif DOMAIN_FROM_ORIGIN:
origin = request.strategy.request.META.get('HTTP_ORIGIN')
if origin:
relative_path = urlparse(request.backend.redirect_uri).path
url = urlparse(origin)
origin_scheme_host = "%s://%s" % (url.scheme, url.netloc)
location = urljoin(origin_scheme_host, relative_path)
request.backend.redirect_uri = iri_to_uri(location)
is_authenticated = user_is_authenticated(user)
user = is_authenticated and user or None
# skip checking state by setting following params to False
# it is responsibility of front-end to check state
# TODO: maybe create an additional resource, where front-end will
# store the state before making a call to oauth provider
# so server can save it in session and consequently check it before
# sending request to acquire access token.
# In case of token authentication we need a way to store an anonymous
# session to do it.
request.backend.REDIRECT_STATE = False
request.backend.STATE_PARAMETER = False
user = request.backend.complete(user=user, *args, **kwargs)
return user
class BaseSocialAuthView(GenericAPIView):
"""
View will login or signin (create) the user from social oauth2.0 provider.
**Input** (default serializer_class_in):
{
"provider": "facebook",
"code": "AQBPBBTjbdnehj51"
}
+ optional
"redirect_uri": "/relative/or/absolute/redirect/uri"
**Output**:
user data in serializer_class format
"""
serializer_class_in = SocialAuthInputSerializer
serializer_class = None
def get_serializer_class_in(self):
assert self.serializer_class_in is not None, (
"'%s' should either include a `serializer_class_in` attribute, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class_in
def get_serializer_in(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class_in()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
@method_decorator(never_cache)
def post(self, request, *args, **kwargs):
serializer_in = self.get_serializer_in(data=request.data)
serializer_in.is_valid(raise_exception=True)
self.set_input_data(request, serializer_in.validated_data.copy())
try:
user = self.get_object()
except (AuthException, HTTPError) as e:
l.exception(e)
return self.respond_error(e)
resp_data = self.get_serializer(instance=user)
self.do_login(request.backend, user)
return Response(resp_data.data)
def get_object(self):
provider = self.request.auth_data.pop('provider')
manual_redirect_uri = self.request.auth_data.pop('redirect_uri', None)
manual_redirect_uri = self.get_redirect_uri(manual_redirect_uri)
return register_by_auth_token(self.request, provider,
manual_redirect_uri=manual_redirect_uri)
def do_login(self, backend, user):
"""
Do login action here.
For example in case of session authentication store the session in
cookies.
"""
def set_input_data(self, request, auth_data):
"""
auth_data will be used used as request_data in strategy
"""
request.auth_data = auth_data
def get_redirect_uri(self, manual_redirect_uri):
if not manual_redirect_uri:
manual_redirect_uri = getattr(settings,
'REST_SOCIAL_OAUTH_ABSOLUTE_REDIRECT_URI', None)
return manual_redirect_uri
def respond_error(self, error):
return Response(status=status.HTTP_400_BAD_REQUEST)
class SocialSessionAuthView(BaseSocialAuthView):
serializer_class = UserSerializer
def do_login(self, backend, user):
social_auth_login(backend, user, user.social_user)
@method_decorator(csrf_protect) # just to be sure csrf is not disabled
def post(self, request, *args, **kwargs):
return super(SocialSessionAuthView, self).post(request, *args, **kwargs)
class SocialTokenOnlyAuthView(BaseSocialAuthView):
serializer_class = TokenSerializer
class SocialTokenUserAuthView(BaseSocialAuthView):
serializer_class = UserTokenSerializer
|
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django_cas.models import SessionServiceTicket
class Command(NoArgsCommand):
help = "Purges CAS session - service ticket mappings not matching any session."
def handle_noargs(self, **options):
"""Purges Session Service Tickets with non-existing session keys."""
verbose = True if options.get('verbosity') in ['2','3'] else False
session_engine = __import__(name=settings.SESSION_ENGINE, fromlist=['SessionStore'])
SessionStore = getattr(session_engine, 'SessionStore')
s = SessionStore()
for sst in SessionServiceTicket.objects.all():
if not s.exists(sst.session_key):
if verbose:
print "deleting session service ticket for session: " + sst.session_key
sst.delete()
|
"""
A simple GEGL snippet with the Gobject automatic introspection method.
This inverts the colors of a PNG file.
BUG: GEGL has been built without introspection on Debian 8.
See https://github.com/jsbueno/python-gegl/issues/2
See: http://linuxfr.org/news/gegl-0-3-0-et-babl-0-1-12-sont-de-sortie
http://gegl.org/operations.html#Gegl
http://gegl.org/operations.html#GEGL%20operations
https://github.com/jsbueno/python-gegl/blob/master/snippets.py
https://github.com/jsbueno/python-gegl
Debian dependencies: libgegl-dev (?)
"""
import argparse
from gi.repository import Gegl as gegl
def main():
# Parse options
parser = argparse.ArgumentParser(description='An argparse snippet.')
parser.add_argument("--infile", "-i", help="the input file", required=True, metavar="STRING")
parser.add_argument("--outfile", "-o", help="the output file", required=True, metavar="STRING")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
# GEGL ######################################
gegl.init([])
#print(gegl.list_operations())
# Make nodes
node1 = gegl.Node()
node2 = gegl.Node() # png-load
node3 = gegl.Node() # invert
node4 = gegl.Node() # png-save
# Set properties
node2.set_property("operation", "gegl:png-load")
node2.set_property("path", infile)
node3.set_property("operation", "gegl:invert")
node4.set_property("operation", "gegl:png-save")
node4.set_property("path", outfile)
# Make the graph
node1.add_child(node2)
node1.add_child(node3)
node1.add_child(node4)
node2.connect_to("output", node3, "input")
node3.connect_to("output", node4, "input")
# Process
node4.process()
if __name__ == '__main__':
main()
|
import sys
import traceback
import unittest
from cutoff import CutoffTest
from mfcc import MFCCTest
import signal_comparison
from sound_file import SoundFileTest
def run_tests():
unittest.main()
if __name__ == "__main__":
try:
file_names = ["recordings//" + str + ".wav" for str in
["ja1", "ja2", "ja3", "nein1", "nein2", "nein3"]]
metrics = signal_comparison.compare_files(file_names, True)
# nodes, edges = visualization.build_graph(metrics)
# visualization.print_graph(nodes, edges)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(repr(e))
|
import os, re, os.path, shutil
from behave import *
def get_stream(context, stream):
assert stream in ['stderr', 'stdout'], "Unknown output stream {}".format(stream)
return getattr(context.output, stream)
def get_env_path(context, file_):
return os.path.join(context.env.cwd, file_)
def get_data_file_path(file_):
dir_ = os.path.dirname(os.path.abspath(__file__))
return os.path.join(dir_, '..', '..', 'verification', 'data', file_)
def assert_not_empty(obj):
assert len(obj) > 0
def assert_file_exists(file_):
assert os.path.isfile(file_), "The file \"{}\" does not exist.".format(file_)
def assert_file_not_empty(file_):
with open(file_, 'r') as f:
assert_not_empty(f.read().strip())
def remove_warnings(string_):
return re.sub("^WARNING:.+\n", "", string_)
@given(u'I create the directory "{directory}"')
def step_impl(context, directory):
os.makedirs(get_env_path(context, directory))
@given(u'I create the file "{file_}" with the contents')
def step_impl(context, file_):
with open(get_env_path(context, file_), 'w') as f:
f.write(context.text)
@given(u'I copy the example data files')
def step_impl(context):
for row in context.table.rows:
shutil.copy(get_data_file_path(row['source']),
get_env_path(context, row['dest']))
@given(u'I copy the example data directories')
def step_impl(context):
for row in context.table.rows:
shutil.copytree(get_data_file_path(row['source']),
get_env_path(context, row['dest']))
@when(u'I run the command')
def step_impl(context):
context.output = context.env.run(
"bash -c '{}'".format(os.path.expandvars(context.text)),
expect_error = True,
expect_stderr = True)
@then(u'the {stream} should be empty')
def step_impl(context, stream):
output = get_stream(context, stream)
assert output == "",\
"The {} should be empty but contains:\n\n{}".format(stream, output)
@then(u'excluding warnings the {stream} should be empty')
def step_impl(context, stream):
output = get_stream(context, stream)
assert remove_warnings(output) == "",\
"The {} should be empty but contains:\n\n{}".format(stream, output)
@then(u'the exit code should be {code}')
def step_impl(context, code):
returned = context.output.returncode
assert returned == int(code),\
"Process should return exit code {} but was {}".format(code, returned)
@then(u'the {stream} should contain')
def step_impl(context, stream):
output = get_stream(context, stream)
assert context.text in output
@then(u'the {stream} should equal')
def step_impl(context, stream):
output = get_stream(context, stream)
assert context.text == output
@then(u'excluding warnings the {stream} should equal')
def step_impl(context, stream):
output = get_stream(context, stream)
assert context.text == remove_warnings(output)
@then(u'the {stream} should match /{regexp}/')
def step_impl(context, stream, regexp):
output = get_stream(context, stream)
assert re.match(regexp, output) != None,\
"Regular expression {} not found in:\n'{}'".format(regexp, output)
@then(u'the directory "{}" should not exist')
def step_impl(context, dir_):
assert not os.path.isdir(dir_),\
"The directory \"{}\" should not exist.".format(dir_)
@then(u'the file "{}" should exist')
def step_impl(context, file_):
assert_file_exists(get_env_path(context, file_))
@then(u'the file "{}" should not be empty')
def step_impl(context, file_):
assert_file_not_empty(get_env_path(context, file_))
@then(u'the following files should exist and not be empty')
def step_impl(context):
for row in context.table.rows:
file_ = get_env_path(context, row['file'])
assert_file_exists(file_)
assert_file_not_empty(file_)
|
from flask import Flask, render_template, request
import trafficlights.controller as controller
import trafficlights.poller as poller
from trafficlights.updaters.teamcity_updater import TeamCityUpdater
from trafficlights.updaters.flash_updater import FlashUpdater
import os
import pwd
import logging
from logging.handlers import RotatingFileHandler
app = Flask(__name__)
def username():
return pwd.getpwuid(os.geteuid()).pw_name
def log_file_path():
return '/var/tmp/' + username() + 'trafficlights.log'
log_file = log_file_path()
file_handler = RotatingFileHandler(log_file, maxBytes=100000, backupCount=3)
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.DEBUG)
app.logger.info('')
app.logger.info('-------------------------------------------------------')
app.logger.info('Starting traffiglights website')
app.logger.info('Running as user ' + username())
try:
def poweroff():
for i in range(lights.num_indicators):
lights.set_indicator(i, controller.Controller.BOTH)
os.system('sudo poweroff')
lights = controller.Controller(controller.FULLSIZE_V1, app.logger)
if lights.num_inputs > 0:
lights.add_input_response(0, poweroff)
app.logger.info('Creating updaters')
teamcity_updater = TeamCityUpdater(lights, app.logger)
flash_updater = FlashUpdater(lights, app.logger, enable_lights=False)
app.logger.debug('Starting poller')
poller = poller.Poller(lights, [teamcity_updater, flash_updater], app.logger)
poller.start()
import trafficlights.views.index
import trafficlights.views.admin
import trafficlights.views.logs
import trafficlights.views.teamcity
except Exception as ex:
app.logger.exception(ex)
raise
|
import traceback
import json
from rq.timeouts import JobTimeoutException
import smtplib
import quopri
from email.parser import Parser
import frappe
from frappe import _, safe_encode, task
from frappe.model.document import Document
from frappe.email.queue import get_unsubcribed_url
from frappe.email.email_body import add_attachment
from frappe.utils import cint
from email.policy import SMTPUTF8
MAX_RETRY_COUNT = 3
class EmailQueue(Document):
DOCTYPE = 'Email Queue'
def set_recipients(self, recipients):
self.set("recipients", [])
for r in recipients:
self.append("recipients", {"recipient":r, "status":"Not Sent"})
def on_trash(self):
self.prevent_email_queue_delete()
def prevent_email_queue_delete(self):
if frappe.session.user != 'Administrator':
frappe.throw(_('Only Administrator can delete Email Queue'))
def get_duplicate(self, recipients):
values = self.as_dict()
del values['name']
duplicate = frappe.get_doc(values)
duplicate.set_recipients(recipients)
return duplicate
@classmethod
def find(cls, name):
return frappe.get_doc(cls.DOCTYPE, name)
@classmethod
def find_one_by_filters(cls, **kwargs):
name = frappe.db.get_value(cls.DOCTYPE, kwargs)
return cls.find(name) if name else None
def update_db(self, commit=False, **kwargs):
frappe.db.set_value(self.DOCTYPE, self.name, kwargs)
if commit:
frappe.db.commit()
def update_status(self, status, commit=False, **kwargs):
self.update_db(status = status, commit = commit, **kwargs)
if self.communication:
communication_doc = frappe.get_doc('Communication', self.communication)
communication_doc.set_delivery_status(commit=commit)
@property
def cc(self):
return (self.show_as_cc and self.show_as_cc.split(",")) or []
@property
def to(self):
return [r.recipient for r in self.recipients if r.recipient not in self.cc]
@property
def attachments_list(self):
return json.loads(self.attachments) if self.attachments else []
def get_email_account(self):
from frappe.email.doctype.email_account.email_account import EmailAccount
if self.email_account:
return frappe.get_doc('Email Account', self.email_account)
return EmailAccount.find_outgoing(
match_by_email = self.sender, match_by_doctype = self.reference_doctype)
def is_to_be_sent(self):
return self.status in ['Not Sent','Partially Sent']
def can_send_now(self):
hold_queue = (cint(frappe.defaults.get_defaults().get("hold_queue"))==1)
if frappe.are_emails_muted() or not self.is_to_be_sent() or hold_queue:
return False
return True
def send(self, is_background_task=False):
""" Send emails to recipients.
"""
if not self.can_send_now():
frappe.db.rollback()
return
with SendMailContext(self, is_background_task) as ctx:
message = None
for recipient in self.recipients:
if not recipient.is_mail_to_be_sent():
continue
message = ctx.build_message(recipient.recipient)
if not frappe.flags.in_test:
ctx.smtp_session.sendmail(from_addr=self.sender, to_addrs=recipient.recipient, msg=message)
ctx.add_to_sent_list(recipient)
if frappe.flags.in_test:
frappe.flags.sent_mail = message
return
if ctx.email_account_doc.append_emails_to_sent_folder and ctx.sent_to:
ctx.email_account_doc.append_email_to_sent_folder(message)
@task(queue = 'short')
def send_mail(email_queue_name, is_background_task=False):
"""This is equalent to EmqilQueue.send.
This provides a way to make sending mail as a background job.
"""
record = EmailQueue.find(email_queue_name)
record.send(is_background_task=is_background_task)
class SendMailContext:
def __init__(self, queue_doc: Document, is_background_task: bool = False):
self.queue_doc = queue_doc
self.is_background_task = is_background_task
self.email_account_doc = queue_doc.get_email_account()
self.smtp_server = self.email_account_doc.get_smtp_server()
self.sent_to = [rec.recipient for rec in self.queue_doc.recipients if rec.is_main_sent()]
def __enter__(self):
self.queue_doc.update_status(status='Sending', commit=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
exceptions = [
smtplib.SMTPServerDisconnected,
smtplib.SMTPAuthenticationError,
smtplib.SMTPRecipientsRefused,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
JobTimeoutException
]
self.smtp_server.quit()
self.log_exception(exc_type, exc_val, exc_tb)
if exc_type in exceptions:
email_status = (self.sent_to and 'Partially Sent') or 'Not Sent'
self.queue_doc.update_status(status = email_status, commit = True)
elif exc_type:
if self.queue_doc.retry < MAX_RETRY_COUNT:
update_fields = {'status': 'Not Sent', 'retry': self.queue_doc.retry + 1}
else:
update_fields = {'status': (self.sent_to and 'Partially Errored') or 'Error'}
self.queue_doc.update_status(**update_fields, commit = True)
else:
email_status = self.is_mail_sent_to_all() and 'Sent'
email_status = email_status or (self.sent_to and 'Partially Sent') or 'Not Sent'
self.queue_doc.update_status(status = email_status, commit = True)
def log_exception(self, exc_type, exc_val, exc_tb):
if exc_type:
traceback_string = "".join(traceback.format_tb(exc_tb))
traceback_string += f"\n Queue Name: {self.queue_doc.name}"
if self.is_background_task:
frappe.log_error(title = 'frappe.email.queue.flush', message = traceback_string)
else:
frappe.log_error(message = traceback_string)
@property
def smtp_session(self):
if frappe.flags.in_test:
return
return self.smtp_server.session
def add_to_sent_list(self, recipient):
# Update recipient status
recipient.update_db(status='Sent', commit=True)
self.sent_to.append(recipient.recipient)
def is_mail_sent_to_all(self):
return sorted(self.sent_to) == sorted([rec.recipient for rec in self.queue_doc.recipients])
def get_message_object(self, message):
return Parser(policy=SMTPUTF8).parsestr(message)
def message_placeholder(self, placeholder_key):
map = {
'tracker': '<!--email open check-->',
'unsubscribe_url': '<!--unsubscribe url-->',
'cc': '<!--cc message-->',
'recipient': '<!--recipient-->',
}
return map.get(placeholder_key)
def build_message(self, recipient_email):
"""Build message specific to the recipient.
"""
message = self.queue_doc.message
if not message:
return ""
message = message.replace(self.message_placeholder('tracker'), self.get_tracker_str())
message = message.replace(self.message_placeholder('unsubscribe_url'),
self.get_unsubscribe_str(recipient_email))
message = message.replace(self.message_placeholder('cc'), self.get_receivers_str())
message = message.replace(self.message_placeholder('recipient'),
self.get_receipient_str(recipient_email))
message = self.include_attachments(message)
return message
def get_tracker_str(self):
tracker_url_html = \
'<img src="https://{}/api/method/frappe.core.doctype.communication.email.mark_email_as_seen?name={}"/>'
message = ''
if frappe.conf.use_ssl and self.email_account_doc.track_email_status:
message = quopri.encodestring(
tracker_url_html.format(frappe.local.site, self.queue_doc.communication).encode()
).decode()
return message
def get_unsubscribe_str(self, recipient_email):
unsubscribe_url = ''
if self.queue_doc.add_unsubscribe_link and self.queue_doc.reference_doctype:
doctype, doc_name = self.queue_doc.reference_doctype, self.queue_doc.reference_name
unsubscribe_url = get_unsubcribed_url(doctype, doc_name, recipient_email,
self.queue_doc.unsubscribe_method, self.queue_doc.unsubscribe_param)
return quopri.encodestring(unsubscribe_url.encode()).decode()
def get_receivers_str(self):
message = ''
if self.queue_doc.expose_recipients == "footer":
to_str = ', '.join(self.queue_doc.to)
cc_str = ', '.join(self.queue_doc.cc)
message = f"This email was sent to {to_str}"
message = message + f" and copied to {cc_str}" if cc_str else message
return message
def get_receipient_str(self, recipient_email):
message = ''
if self.queue_doc.expose_recipients != "header":
message = recipient_email
return message
def include_attachments(self, message):
message_obj = self.get_message_object(message)
attachments = self.queue_doc.attachments_list
for attachment in attachments:
if attachment.get('fcontent'):
continue
fid = attachment.get("fid")
if fid:
_file = frappe.get_doc("File", fid)
fcontent = _file.get_content()
attachment.update({
'fname': _file.file_name,
'fcontent': fcontent,
'parent': message_obj
})
attachment.pop("fid", None)
add_attachment(**attachment)
elif attachment.get("print_format_attachment") == 1:
attachment.pop("print_format_attachment", None)
print_format_file = frappe.attach_print(**attachment)
print_format_file.update({"parent": message_obj})
add_attachment(**print_format_file)
return safe_encode(message_obj.as_string())
@frappe.whitelist()
def retry_sending(name):
doc = frappe.get_doc("Email Queue", name)
if doc and (doc.status == "Error" or doc.status == "Partially Errored"):
doc.status = "Not Sent"
for d in doc.recipients:
if d.status != 'Sent':
d.status = 'Not Sent'
doc.save(ignore_permissions=True)
@frappe.whitelist()
def send_now(name):
record = EmailQueue.find(name)
if record:
record.send()
def on_doctype_update():
"""Add index in `tabCommunication` for `(reference_doctype, reference_name)`"""
frappe.db.add_index('Email Queue', ('status', 'send_after', 'priority', 'creation'), 'index_bulk_flush')
|
"""
Package for magical effects
"""
from .damage import DamageEffect
from .damagemodifier import DamageModifier
from .effectscollection import EffectsCollection
from .effect import Effect, EffectHandle
from .heal import Heal
from .movementmode import MovementModeModifier
from .poison import Poison
|
from pyramid.config import Configurator
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
import os
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application."""
database_url = os.environ.get('DATABASE_URL', None)
if database_url is not None:
settings['sqlalchemy.url'] = database_url
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
auth_secret = os.environ.get('AUTH_SECRET', 'supersecret')
authn_policy = AuthTktAuthenticationPolicy(auth_secret)
authz_policy = ACLAuthorizationPolicy()
config = Configurator(settings=settings,
root_factory='learning_journal.security.MyRoot')
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
config.include('pyramid_jinja2')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('article', '/article/{article_id}')
config.add_route('add_entry', '/add_entry')
config.add_route('edit_entry', '/edit_entry/{article_id}')
config.add_route('delete_entry', '/delete_entry/{article_id}')
config.scan()
return config.make_wsgi_app()
|
class TinydealPipeline(object):
def process_item(self, item, spider):
return item
|
c = open('commons.py')
b = open('blabla.py')
o = open('commons2.py', 'w')
u = [line.strip('\n') for line in c]
lo = [line for line in b]
for x, y in zip(u, lo):
o.write(x + y)
|
import csv as csvreader
import pdb as pdb
import time as time
import sys as sys
import logging as logging
""" SERP_mat_conditioner is a simple python script that takes in a comma delimited input
file which contains parameters and options and outputs the appropriate material
composition in atomic percent inside of a SERPENT file that is also specified in the
input file. A sample input file is given below
<<
< Free/Preserved , [number of header lines] , [Material Name] , [Density Option] , [Temperature in K]
, [ logger option] >
< [Path_to_SERPENT_input_file.txt] , [Mid Part of New File Name] >
< [ Ratio of group n] , [Ratio of group n + 1] , [ ....N] >
< [ Number of carrier salt species ] , [ Z of n species ] , [Ratio of n species ] , [ Z of N ] , [ R of N] >
< [ Num of salt species ] , [ Z of non host ] , [ Density of species with Z and host ] .... [ Nth species ] >
< [ele],[iso],[grp],[cth],[htc],[pct],[ptt],[mof],[atf] #These are the columns in the input files below which
#correspond to the above catagories >
< [ Comments ] >
< [ele] , [iso] , [grp] , [htc] , [cth] , [pct] , [ptt]
>>
[logger option] sets theh logger default level. 0 for all, > 10 for just info and crits
[Material Name] is the exact material name used in SERPENT
[Density Option] -1 indicates a desire to calculate density using the density array (row 4) a
positive number will be taken as the input density
[Ratio of group n] in the 2nd row is for the preserved option where a spent fuel ratio is
desired to be preserved
[ele] is the Z number of each isotope
[iso] is the A number of each isotope
[grp] is the salt calculation group (detemrines how it is handled)
1 = Salt Constituent, think Na in NaCl
2 = Salt Primary Carrier, think Cl in NaCl
3...N = all other groups, generally assumed to be fuel
[htc] is the number of group 2 atoms per that element in the salt
[cth] is the number of that element atoms per group 2 atom
[pct] is the input atomic or weight percentage (as a percent, so 100,
not 1, for 100%) within the group. Unless it is group 1
in which case it is the atomic percentage of that element's isotope within that element
[ptt] where -1 indicates a weight percentage and 1 indicates an atomic percentage for input
option [pct]
"""
def trunc( f , n ):
''' Truncates/pads a float f to n decimal places no rounding '''
slen = len( '%.*f' % ( n , f ) )
return str( f )[:slen]
dzeros = "00"
tzeros = "000"
inputfile = raw_input( "Please enter file name, local path only, of csv file to open \n" )
csvfile = open( inputfile, "r" )
reader = csvreader.reader( csvfile )
csvinput = []
for row in reader:
csvinput.append( row )
HostFileName = csvinput[ 1 ][ 0 ][ 0 : len( csvinput[ 1 ][ 0 ] ) ]
HostFile = open( HostFileName , "r" )
NameStartIndex = HostFileName.rfind("/") + 1
NameEndIndex = HostFileName.rfind(".")
BaseName = HostFileName[ NameStartIndex : NameEndIndex ]
NameExtension = csvinput[ 1 ][ 1 ][ 0 : len( csvinput[ 1 ][ 1 ] ) ]
NewFileName = BaseName + "_" + NameExtension + "_" + time.strftime( "%d_%m_%Y" ) \
+ ".txt"
LogFileName = BaseName + "_" + NameExtension + "_" + time.strftime( "%d_%m_%Y" ) \
+ "_log" + ".txt"
NewFile = open( str( NewFileName ) , "w" )
try:
LogLevel = int( csvinput[ 0 ][ 5 ] )
except:
sys.exit( "ERROR!!: Log level can not be cast as an integer!" )
logging.basicConfig( filename = LogFileName , format ="[%(levelname)8s] %(message)s" \
, filemode = 'w' , level = LogLevel )
logging.debug( "This is the debug level reporting in" )
logging.info( "This is the info level reporting in " )
logging.warning( "This is the warning level reporting in" )
logging.error( "This is the error level reporting in" )
logging.critical( "This is the critical level reporting in" )
ele = int( csvinput[ 5 ][ 0 ] )
iso = int( csvinput[ 5 ][ 1 ] )
grp = int( csvinput[ 5 ][ 2 ] )
cth = int( csvinput[ 5 ][ 3 ] )
htc = int( csvinput[ 5 ][ 4 ] )
pct = int( csvinput[ 5 ][ 5 ] )
ptt = int( csvinput[ 5 ][ 6 ] )
mof = int( csvinput[ 5 ][ 7 ] )
atf = int( csvinput[ 5 ][ 8 ] )
StartRow = int( csvinput[ 0 ][ 1 ] )
Temperature = csvinput[ 0 ][ 4 ]
if len( Temperature ) > 3:
Temp = Temperature[ 0 : 2 ]
else:
Temp = "0" + Temperature[ 0 : 1 ]
logging.info( "The temperature is " + str( Temperature ) )
logging.info( "The SERPENT tempreature library is " + str( Temp ) )
material = csvinput[ 0 ][ 2 ][ 0 : len( csvinput[ 0 ][ 2 ] ) ]
mat = "mat " + material
logging.info( "The material search pattern is " + str( mat ) )
CarrierComp = {}
for i in range( 1 , int( csvinput[ 3 ][ 0 ] ) * 2 , 2 ):
CarrierComp[ int( csvinput[ 3 ][ i ] ) ] = float( csvinput[ 3 ][ i + 1 ] ) * \
( 1.0 / 100 )
logging.debug( "The CarrierComp is " + str( CarrierComp ) )
FloatsInput=[]
csvinputrows = len( csvinput )
floatrows = csvinputrows - StartRow
for i in range( floatrows ):
FloatsInput.append( 0 )
for row in range( StartRow , csvinputrows ):
tmpArray=[]
for column in range( len( csvinput[ row ] ) ):
if len( csvinput[ row ][ column ] ) > 0:
tmpArray.append( float( csvinput[ row ][ column ] ) )
FloatsInput[ row - StartRow ] = tmpArray
if LogLevel <= 10:
logging.debug( "At initilization FloatsInput has the form")
for row in range( floatrows ):
logging.debug( FloatsInput[ row ] )
if csvinput[ 0 ][ 0 ] == "Free" or csvinput[ 0 ][ 0 ] == "free" \
or csvinput[ 0 ][ 0 ] == "FREE":
logging.info( "Running under the \"Free\" assumption" )
molcar = 1
for column in range( len( FloatsInput[ 0 ] ) ):
molcar -= FloatsInput[ 0 ][ column ] / 100
logging.debug( "molcar has the value " + str( molcar ) )
for row in range( 1 , floatrows ):
if FloatsInput[ row ][ ptt ] < 0:
if FloatsInput[ row ][ grp ] == 1:
sys.exit("ERROR!!: Carrier constituent fractions are not allowed as \
weight percents! Please correct to atomic percents as percentages \
of element type! Yes we know this is buggy but is a result of \
development occuring in stages")
componentType = FloatsInput[ row ][ grp ]
masstotal = 0
for i in range( 1 , floatrows ):
if FloatsInput[ i ][ grp ] == componentType:
masstotal += FloatsInput[ i ][ pct ] / FloatsInput[ i ][ iso ]
for i in range( 1, floatrows ):
if FloatsInput[ i ][ grp ] == componentType:
FloatsInput[ i ][ pct ] = 100 * FloatsInput[ i ][ pct ] / FloatsInput[ i ][ iso ] \
/ masstotal
FloatsInput[ i ][ ptt ] = 1
if FloatsInput[ row ][ grp ] == 1:
FloatsInput[ row ].append( molcar * FloatsInput[ row ][ pct ] * \
CarrierComp[ int( FloatsInput[ row ][ ele ] ) ] )
if FloatsInput[ row ][ grp ] > 2:
FloatsInput[ row ].append( FloatsInput[ 0 ][ int( FloatsInput[ row ][ grp ] ) - 3 ] )
if LogLevel <= 10:
logging.debug( "After modification for weight percentage and molar ratios" )
logging.debug( "FloatsInpput has the value seen below" )
for row in range( floatrows ):
logging.debug( FloatsInput [ row ] )
if float( csvinput[ 0 ][ 3 ] ) < 0:
logging.debug( "A density calculation request was made")
density = 0.0
DensityArray = {}
for i in range( 1 , int( csvinput[ 4 ][ 0 ] ) * 2 , 2 ):
DensityArray[ int( csvinput[ 4 ][ i ] ) ] = float( csvinput[ 4 ][ i + 1 ] )
logging.debug( "DensityArray has the value seen below" )
logging.debug( DensityArray )
for i in range( 1 , floatrows ):
if FloatsInput[ i ][ grp ] != 2:
density += DensityArray[ int( FloatsInput[ i ][ ele ] ) ] * \
FloatsInput[ i ][ mof ] * FloatsInput[ i ][ pct ] * ( 1.0 / 10000.0 )
density = str( density )
else:
density = csvinput[ 0 ][ 3 ]
logging.debug( "density has the value of " + str( density ) )
molsTotal = 0
for i in range( 1 , floatrows ):
if FloatsInput[ i ][ grp ] > 2:
molsTotal += FloatsInput[ 0 ][ int(FloatsInput[ i ][ grp ] ) - 3 ] * FloatsInput[ i ][ cth ] * \
FloatsInput[ i ][ pct ] / ( 100 )
if FloatsInput[ i ][ grp ] == 2:
SaltTotal = 0
for j in range( 1 , floatrows ):
if FloatsInput[ j ][ grp ] != 2:
molsTotal += FloatsInput[ j ][ htc ] * FloatsInput[ j ][ mof ] * \
FloatsInput[ i ][ pct ] * FloatsInput[ j ][ pct ] / (100 * 100 )
SaltTotal += FloatsInput[ j ][ htc ] * FloatsInput[ j ][ mof ] * \
FloatsInput[ i ][ pct ] * FloatsInput[ j ][ pct ] / (100 * 100 )
FloatsInput[ i ].append( SaltTotal )
if FloatsInput[ i ][ grp ] == 1:
molsTotal += FloatsInput[ i ][ cth ] * FloatsInput[ i ][ pct ] * \
FloatsInput[ i ][ mof ] / ( 100 )
for i in range( 1 , floatrows ):
FloatsInput[ i ].append( FloatsInput[ i ][ mof ] * FloatsInput[ i ][ pct ] \
* FloatsInput[ i ][ cth ]/ ( molsTotal ) )
logging.debug( "molsTotal has the value " + str( molsTotal ) )
if LogLevel <= 10:
logging.debug( "FloatsInput with atomic ratios is as seen below" )
for row in range( floatrows ):
logging.debug( FloatsInput[ row ] )
if csvinput[ 0 ][ 0 ] == "Preserved" or csvinput[ 0 ][ 0 ] == "preserved" \
or csvinput[ 0 ][ 0 ] == "PRESERVED":
logging.info( "Running under the \"Preserved\" assumption" )
# initilize ratios array
ratio = []
for column in range( len( csvinput[ 2 ] ) ):
if len ( csvinput[ 2 ][ column ] ) > 0:
ratio.append( float( csvinput[ 2 ][ column ] ) )
logging.debug( "The ratio array has value seen below" )
logging.debug( ratio )
# initilize multiplier array
#print " Displaying ratio array"
#print ratio
mult = []
for column in range( len( ratio ) ):
mult.append( FloatsInput[ 0 ][ column ] / ratio[ column ] )
logging.debug( "mult has value seen below" )
logging.debug( mult )
multiplier = min( mult )
logging.debug( "multiplier has the value " + str( multiplier ) )
for column in range( len( FloatsInput[ 0 ] ) ):
FloatsInput[ 0 ][ column ] = ratio[ column ] * multiplier
if LogLevel <= 10:
logging.debug( "After ratio modification FloatsInpput has value seen below" )
for row in range( floatrows ):
logging.debug( FloatsInput[ row ] )
molcar = 1
for column in range( len( FloatsInput[ 0 ] ) ):
molcar -= FloatsInput[ 0 ][ column ] / 100
logging.debug( "molcar has the value " + str( molcar ) )
for row in range( 1 , floatrows ):
if FloatsInput[ row ][ ptt ] < 0:
if FloatsInput[ row ][ grp ] == 1:
sys.exit("ERROR!!: Carrier constituent fractions are not allowed as \
weight percents! Please correct to atomic percents as percentages \
of element type! Yes we know this is buggy but is a result of \
development occuring in stages")
componentType = FloatsInput[ row ][ grp ]
masstotal = 0
for i in range( 1 , floatrows ):
if FloatsInput[ i ][ grp ] == componentType:
masstotal += FloatsInput[ i ][ pct ] / FloatsInput[ i ][ iso ]
for i in range( 1, floatrows ):
if FloatsInput[ i ][ grp ] == componentType:
FloatsInput[ i ][ pct ] = 100 * FloatsInput[ i ][ pct ] / FloatsInput[ i ][ iso ] \
/ masstotal
FloatsInput[ i ][ ptt ] = 1
if FloatsInput[ row ][ grp ] == 1:
FloatsInput[ row ].append( molcar * FloatsInput[ row ][ pct ] * \
CarrierComp[ int( FloatsInput[ row ][ ele ] ) ] )
if FloatsInput[ row ][ grp ] > 2:
FloatsInput[ row ].append( FloatsInput[ 0 ][ int( FloatsInput[ row ][ grp ] ) - 3 ] )
if LogLevel <= 10:
logging.debug( "After weight percentage and molar ratio mods FloatsInput has value")
for row in range( floatrows ):
logging.debug( FloatsInput[ row ] )
if float( csvinput[ 0 ][ 3 ] ) < 0:
logging.debug( "A density calculation request was made")
density = 0.0
DensityArray = {}
for i in range( 1 , int( csvinput[ 4 ][ 0 ] ) * 2 , 2 ):
DensityArray[ int( csvinput[ 4 ][ i ] ) ] = float( csvinput[ 4 ][ i + 1 ] )
logging.debug( "DensityArray has the value seen below" )
logging.debug( DensityArray)
for i in range( 1 , floatrows ):
if FloatsInput[ i ][ grp ] != 2:
density += DensityArray[ int( FloatsInput[ i ][ ele ] ) ] * \
FloatsInput[ i ][ mof ] * FloatsInput[ i ][ pct ] * ( 1.0 / 10000.0 )
density = str( density )
else:
density = csvinput[ 0 ][ 3 ]
logging.debug( "density has the value " + str( density ) )
molsTotal = 0
for i in range( 1 , floatrows ):
if FloatsInput[ i ][ grp ] > 2:
molsTotal += FloatsInput[ 0 ][ int(FloatsInput[ i ][ grp ] ) - 3 ] * FloatsInput[ i ][ cth ] * \
FloatsInput[ i ][ pct ] / ( 100 )
if FloatsInput[ i ][ grp ] == 2:
SaltTotal = 0
for j in range( 1 , floatrows ):
if FloatsInput[ j ][ grp ] != 2:
molsTotal += FloatsInput[ j ][ htc ] * FloatsInput[ j ][ mof ] * \
FloatsInput[ i ][ pct ] * FloatsInput[ j ][ pct ] / (100 * 100 )
SaltTotal += FloatsInput[ j ][ htc ] * FloatsInput[ j ][ mof ] * \
FloatsInput[ i ][ pct ] * FloatsInput[ j ][ pct ] / (100 * 100 )
FloatsInput[ i ].append( SaltTotal )
if FloatsInput[ i ][ grp ] == 1:
molsTotal += FloatsInput[ i ][ cth ] * FloatsInput[ i ][ pct ] * \
FloatsInput[ i ][ mof ] / ( 100 )
for i in range( 1 , floatrows ):
FloatsInput[ i ].append( FloatsInput[ i ][ mof ] * FloatsInput[ i ][ pct ] \
* FloatsInput[ i ][ cth ] / ( molsTotal ) )
logging.debug( "molsTotal has the value " + str( molsTotal ) )
if LogLevel <= 10:
logging.debug( "FloatsInput, after atomic ratio calc, has value seen below" )
for row in range( floatrows ):
logging.debug( FloatsInput[ row ] )
for row in range( 1 , floatrows ):
FloatsInput[ row ][ atf ] = "%.10f" % ( FloatsInput[ row ][ atf ] / 100.0 )
if LogLevel <= 10:
logging.debug( "After truncation to strings FloatsInput has the valeu" )
for row in range( floatrows ):
logging.debug( FloatsInput[ row ] )
NewFile.write( "% ------ Created on " + time.strftime( "%d-%m-%Y") + " at " + \
time.strftime( "%H:%M" ) + "\n" )
NewFile.write( "% ------ Comments:" + "\n" )
NewFile.write( "% ------ << \n")
if len( csvinput[ 6 ][ 0 ] ) > 0:
for i in range( 0 , len( csvinput[ 6 ][ 0 ] ) , 49 ):
NewFile.write( "% ------ < " + csvinput[ 6 ][ 0 ][ i : i + 49 ] + "\n" )
NewFile.write( "% ------ >> \n")
for line in HostFile:
if line.find( mat ) > -1:
OldString = line
Anchor = OldString.find( "rgb" )
NewString2 = OldString[ Anchor : ]
NewString = mat + " -" + density + " tmp " + csvinput[ 0 ][ 4 ] + " " + \
NewString2
NewFile.write( NewString )
for row in range( 1, floatrows ):
Z = str( int( FloatsInput[ row ][ ele ] ) )
#print Z
A = str( int( FloatsInput[ row ][ iso ] ) )
#print A
ModA = tzeros[ 0 : ( 3 - len ( A ) ) ] + A
Isotope = Z + ModA + "." + Temp + "c"
#print Isotope
NewFile.write( "{:<14}{}".format( Isotope , FloatsInput[ row ][ atf ] ) + "\n" )
else:
NewFile.write( line )
HostFile.close()
NewFile.close()
logging.info( "Code has finished running succesfully" )
logging.shutdown()
exit()
|
from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
testinfo = "s, q"
tags = "TmxObjectLayer, get_in_region"
import pyglet
from pyglet.window import key
pyglet.resource.path.append(pyglet.resource.get_script_home())
pyglet.resource.reindex()
import cocos
from cocos import tiles, actions, layer
class DriveCar(actions.Driver):
def step(self, dt):
# handle input and move the car
self.target.rotation += (keyboard[key.RIGHT] - keyboard[key.LEFT]) * 150 * dt
self.target.acceleration = (keyboard[key.UP] - keyboard[key.DOWN]) * 400
if keyboard[key.SPACE]: self.target.speed = 0
super(DriveCar, self).step(dt)
scroller.set_focus(self.target.x, self.target.y)
# update wall colors
rect = self.target.get_rect()
overlapped = self.walls.get_in_region(rect.left, rect.bottom, rect.right, rect.top)
for obj in self.walls.objects:
color = (255, 0, 0) if obj in overlapped else (255, 255, 255)
if obj in self.walls._sprites:
self.walls._sprites[obj].color = color
description = """
TmxObjectLayer.get_in_region() test - move actor with arrows,
objects overlapped by the car will tint red
"""
def main():
global keyboard, walls, scroller
from cocos.director import director
director.init(width=800, height=600, autoscale=False)
print(description)
car_layer = layer.ScrollableLayer()
car = cocos.sprite.Sprite('car.png')
car_layer.add(car)
car.position = (200, 100)
car.max_forward_speed = 200
car.max_reverse_speed = -100
worker_action = car.do(DriveCar())
# add the map and the player sprite layer to a scrolling manager
scroller = layer.ScrollingManager()
walls = tiles.load('tmx_collision.tmx')['walls']
assert isinstance(walls, tiles.TmxObjectLayer)
worker_action.walls = walls
scroller.add(walls, z=0)
scroller.add(car_layer, z=1)
player_start = walls.find_cells(player_start=True)[0]
# extract the player_start, which is not a wall
walls.objects.remove(player_start)
# give car access to the walls so it can change colors
car.walls = walls
# construct the scene with a background layer color and the scrolling layers
platformer_scene = cocos.scene.Scene()
platformer_scene.add(layer.ColorLayer(100, 120, 150, 255), z=0)
platformer_scene.add(scroller, z=1)
# track keyboard presses
keyboard = key.KeyStateHandler()
director.window.push_handlers(keyboard)
# run the scene
director.run(platformer_scene)
if __name__ == '__main__':
main()
|
import subprocess, os
from conans.errors import ConanException
from conans.util.runners import check_output_runner
class PkgConfig(object):
@staticmethod
def _cmd_output(command):
return check_output_runner(command).strip()
def __init__(self, library, pkg_config_executable=None, static=False, msvc_syntax=False, variables=None,
print_errors=True):
"""
:param library: library (package) name, such as libastral
:param pkg_config_executable: specify custom pkg-config executable (e.g. for cross-compilation)
:param static: output libraries suitable for static linking (adds --static to pkg-config command line)
:param msvc_syntax: MSVC compatibility (adds --msvc-syntax to pkg-config command line)
:param variables: dictionary of pkg-config variables (passed as --define-variable=VARIABLENAME=VARIABLEVALUE)
:param print_errors: output error messages (adds --print-errors)
"""
self.library = library
self.pkg_config_executable = pkg_config_executable or os.getenv('PKG_CONFIG', 'pkg-config')
self.static = static
self.msvc_syntax = msvc_syntax
self.define_variables = variables
self.print_errors = print_errors
self._variables = dict()
self.info = dict()
def _parse_output(self, option):
command = [self.pkg_config_executable, '--' + option, self.library]
if self.static:
command.append('--static')
if self.msvc_syntax:
command.append('--msvc-syntax')
if self.print_errors:
command.append('--print-errors')
if self.define_variables:
for name, value in self.define_variables.items():
command.append('--define-variable=%s=%s' % (name, value))
try:
return self._cmd_output(command)
except subprocess.CalledProcessError as e:
raise ConanException('pkg-config command %s failed with error: %s' % (command, e))
def _get_option(self, option):
if option not in self.info:
self.info[option] = self._parse_output(option).split()
return self.info[option]
@property
def cflags(self):
return self._get_option('cflags')
@property
def cflags_only_I(self):
return self._get_option('cflags-only-I')
@property
def cflags_only_other(self):
return self._get_option('cflags-only-other')
@property
def libs(self):
return self._get_option('libs')
@property
def libs_only_L(self):
return self._get_option('libs-only-L')
@property
def libs_only_l(self):
return self._get_option('libs-only-l')
@property
def libs_only_other(self):
return self._get_option('libs-only-other')
@property
def provides(self):
return self._get_option('print-provides')
@property
def requires(self):
return self._get_option('print-requires')
@property
def requires_private(self):
return self._get_option('print-requires-private')
@property
def variables(self):
if not self._variables:
variable_names = self._parse_output('print-variables').split()
for name in variable_names:
self._variables[name] = self._parse_output('variable=%s' % name)
return self._variables
@property
def version(self):
return self._get_option('modversion')
|
from app.repositories.nn_training_result_repository import fetch_from_patch
class NnTrainingResultService:
def __init__(self, patch):
self._patch = patch
def get_nn_performance(self):
return fetch_from_patch(self._patch)
|
class Solution(object):
def solveSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
cols = [[False] * 9 for _ in range(9)]
rows = [[False] * 9 for _ in range(9)]
blocks = [[False] * 9 for _ in range(9)]
for i in range(0, 9):
for j in range(0, 9):
if board[i][j] == '.':
continue
val = int(board[i][j]) - 1
k = (i / 3) * 3 + j / 3
cols[j][val] = True
rows[i][val] = True
blocks[k][val] = True
self.dfs(board, cols, rows, blocks)
def dfs(self, board, cols, rows, blocks):
for i in range(0, 9):
for j in range(0, 9):
if board[i][j] == '.':
k = (i / 3) * 3 + j / 3
for num in range(0, 9):
if (not rows[i][num]) and (not cols[j][num]) and (not blocks[k][num]):
rows[i][num] = cols[j][num] = blocks[k][num] = True
board[i][j] = str(num + 1)
if self.dfs(board, cols, rows, blocks):
return True
rows[i][num] = cols[j][num] = blocks[k][num] = False
board[i][j] = '.'
return False
return True
|
from __future__ import unicode_literals
from django.db import migrations, models
import web_client.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('web_client', '0002_delete_siteconfiguration'),
]
operations = [
migrations.CreateModel(
name='SiteConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site_name', models.CharField(default='ellugar.co', max_length=140)),
('default_description', models.CharField(default='description', max_length=140)),
('default_preview', models.ImageField(upload_to=web_client.models.imageLocation)),
('long_description', models.TextField(default='description')),
('mail', models.EmailField(default='arlefreak@gmail.com', max_length=254)),
('twitter', models.URLField(default='twitter')),
('github', models.URLField(default='github')),
('linkdn', models.URLField(default='linkdn')),
],
options={
'verbose_name': 'Site Configuration',
},
),
]
|
import logging
from flask import Blueprint, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
import requests
google_oauth_provider = Blueprint('google_oauth_provider', __name__)
from app import app
from . import user_access
from config.config import BaseConfig
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
oauth = OAuth(app.app)
google = oauth.remote_app(
'google',
consumer_key= BaseConfig.GOOGLE_ID,
consumer_secret= BaseConfig.GOOGLE_SECRET,
request_token_params={
'scope': 'email'
},
base_url='https://www.googleapis.com/oauth2/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://accounts.google.com/o/oauth2/token',
authorize_url='https://accounts.google.com/o/oauth2/auth',
)
verifyURL = 'https://www.googleapis.com/oauth2/v3/tokeninfo'
@google_oauth_provider.route('/google/verify', methods=['POST'])
def verify_user_id():
token = request.get_json().get('tokenId')
payload = {'id_token': token}
req = requests.get(verifyURL, payload)
data = req.json()
id = data['sub']
email = data['email']
user = user_access.get_user_secret('google', id)
user_by_mail = user_access.get_user_secret_by_mail('google', email)
logger.debug(' user'+str(id)+'verified')
if user:
# update user details
logger.debug('update user')
user_access.update_user('google', user.secret, data)
elif user_by_mail:
user = user_by_mail
logger.debug('update user')
user_access.update_user('google', user.secret, data)
else:
logger.debug('create user')
user = user_access.create_user('google', data, payload['id_token'])
return jsonify(id=user.id, secret=user.secret)
@google_oauth_provider.route('/google/login')
def login():
return google.authorize(callback=url_for('.authorized', _external=True))
@google_oauth_provider.route('/google/login/authorized')
def authorized():
resp = google.authorized_response()
if resp is None:
logger.error('google resp failed'+str(request))
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
# check if user exists or create a new one
session['google_token'] = (resp['access_token'], '')
me = google.get('userinfo')
user = user_access.get_user_secret('google', me.data['id'])
user_by_mail = user_access.get_user_secret_by_mail('google', me.data['email'])
if user:
# update user details
secret = user.secret
user_access.update_user('google', secret, me.data)
elif user_by_mail:
user = user_by_mail
secret = user.secret
user_access.update_user('google', user.secret, me.data)
else:
secret = user_access.create_user('google',me.data, get_google_oauth_token()[0])
return jsonify({"secret": secret})
@google.tokengetter
def get_google_oauth_token():
return session.get('google_token')
|
from .mgrslib import *
|
import os, sys
from goldensixpacks import app
app.run(host='0.0.0.0')
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "happyteams.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
import utils
class plugin_singleton:
def __init__(self, parent, content, position, arguments):
[pos, class_name] = utils.getClassName(content, position)
self.__content = """
private:
%%classname%%();
%%classname%%(%%classname%% const&);
void operator = (%%classname%% const&);
public:
static %%classname%%& getInstance() {
static %%classname%% instance;
return instance;
}
private:
"""
self.__content = self.__content.replace('%%classname%%', class_name)
parent.toInsert.append([pos, self.__content])
constructor = utils.getConsructor(class_name, content)
if len(constructor) > 0: parent.toInsert.append([-1, constructor])
|
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Brianer
"""
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Recorders.Simulater"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
from ShareYourSystem.Standards.Recorders import Recorder
BrianConnectPrefixStr='Synaps'
@DecorationClass(**{
'ClassingSwitchMethodStrsList':['brian'],
'ClassingStructureVariable':[
('Clock','Clocks'),
('Population','Populations'),
('Trace','Traces'),
('Sample','Samples'),
('Event','Events'),
('Interactome','Interactomes'),
('Interaction','Interactions')
]
})
class BrianerClass(BaseClass):
def default_init(self,
_BrianingNeurongroupDict=None,
_BrianingSynapsesDict=None,
_BrianingConnectVariable=None,
_BrianingTraceDict=None,
_BrianingMoniterTuple=None,
_BrianingSpikesDict=None,
_BrianingPyplotDict=None,
_BrianingTimeQuantityStr='ms',
_BrianingPyplotBool=True,
_BrianingStepTimeFloat=0.1,
_BrianingDebugVariable=0,
_BrianingRecordBool=True,
_BrianedTimeQuantityVariable=None,
_BrianedNetworkVariable=None,
_BrianedNeurongroupVariable=None,
_BrianedSynapsesVariable=None,
_BrianedStateMonitorVariable=None,
_BrianedSpikeMonitorVariable=None,
_BrianedClockVariable=None,
_BrianedParentSingularStr=None,
_BrianedRecordKeyStrsList=None,
_BrianedTraceDeriveBrianersList=None,
_BrianedSynapsesDeriveBrianersList=None,
_BrianedStateDeriveBrianersList=None,
_BrianedSpikeDeriveBrianersList=None,
_BrianedParentNetworkDeriveBrianerVariable=None,
_BrianedParentPopulationDeriveBrianerVariable=None,
_BrianedParentInteractomeDeriveBrianerVariable=None,
_BrianedParentDeriveRecorderVariable=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_brian(self):
#/#################/#
# Determine if it is an inside structure or the top
#
#debug
'''
self.debug(
[
'We brian here',
'First look for deeper teams in the structure',
]
)
'''
#Check
if self.ParentedTotalSingularListDict!=None and len(self.ParentedTotalSingularListDict)>0:
#debug
'''
self.debug(
[
'self.ParentedTotalSingularListDict.keys() is ',
str(self.ParentedTotalSingularListDict.keys())
]
)
'''
#get
self.BrianedParentSingularStr=self.ParentedTotalSingularListDict.keys()[0]
#debug
'''
self.debug(
[
'Ok',
('self.',self,[
'BrianedParentSingularStr'
])
]
)
'''
#/########################/#
# Network level
#
#Check
if (self.ParentDeriveTeamerVariable==None or 'Populations' in self.TeamDict or self.ParentDeriveTeamerVariable.TeamTagStr not in [
'Clocks',
'Traces',
'Samples',
'Events',
'Interactomes',
'Interactions'
]) and self.BrianedParentSingularStr!='Population':
#debug
'''
self.debug(
[
'It is a Network level',
'We set the brian network'
]
)
'''
#set
self.brianNetwork()
#/########################/#
# Special Network-Neurongroup level
#
#Check
if 'Populations' not in self.TeamDict:
#debug
'''
self.debug(
[
'It is a network with a one level pop',
'So set the neurongroup'
]
)
'''
#brianPopulation
self.brianPopulation()
#debug
'''
self.debug(
[
'We end to set the neuron group here'
]
)
'''
#/########################/#
# structure
#
#debug
'''
self.debug(
[
'We brian structure in all the brian children...',
]
)
'''
#structure
self.structure(
[
'Clocks',
'Populations',
'Traces',
'Events',
'Samples',
'Interactomes',
'Interactions'
],
'#all',
_ManagerCommandSetList=['brian']
)
#debug
'''
self.debug(
[
'Ok we have brian structured all the brian children...',
]
)
'''
else:
#debug
'''
self.debug(
[
'Ok we check if this parentsingular has a special method ',
('self.',self,[
'BrianedParentSingularStr'
])
]
)
'''
#set
BrianedMethodKeyStr='brian'+self.BrianedParentSingularStr
#Check
if hasattr(self,BrianedMethodKeyStr):
#/########################/#
# call the special brian<BrianedParentSingularStr> method
#
#debug
'''
self.debug(
[
'It is a '+self.BrianedParentSingularStr+' level',
'We brian<BrianedParentSingularStr>'
]
)
'''
#call
getattr(
self,
BrianedMethodKeyStr
)()
#debug
'''
self.debug(
[
'Ok we have setted brian'+self.BrianedParentSingularStr
]
)
'''
#debug
'''
self.debug(
[
'end of brian here'
]
)
'''
def brianNetwork(self):
#/####################/#
# init the Network
#
#maybe should import
import brian2
#set
self.BrianedNetworkVariable=brian2.Network()
#get
self.BrianedTimeQuantityVariable=getattr(
brian2,
self.BrianingTimeQuantityStr
)
#/####################/#
# init a simulation clock
#
#debug
'''
self.debug(
[
'We set a simulation clock at least'
]
)
'''
#Check
if 'Clocks' not in self.TeamDict:
ClocksDeriveManager=self.team('Clocks').TeamedValueVariable
else:
ClocksDeriveManager=self.TeamDict['Clocks']
#manage
if 'Simulation' not in ClocksDeriveManager.ManagementDict:
#debug
'''
self.debug(
[
'We init a simulation clock here'
]
)
'''
#manage
SimulationDeriveBrianer=ClocksDeriveManager.manage(
'Simulation',
{
'BrianingStepTimeFloat':self.BrianingStepTimeFloat
}
)
def brianClock(self):
#/####################/#
# Determine the parents
#
#get
self.BrianedParentNetworkDeriveBrianerVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#/####################/#
# Set the brian clock
#
#import
from brian2 import Clock
#debug
'''
self.debug(
[
'We set the brian clock',
('self.',self,['StructureTagStr'])
]
)
'''
#init
self.BrianedClockVariable=Clock(
dt=self.BrianingStepTimeFloat*self.BrianedParentNetworkDeriveBrianerVariable.BrianedTimeQuantityVariable,
#name=self.StructureTagStr
)
#debug
'''
self.debug(
[
'We have setted the clock',
('self.',self,[
'BrianedClockVariable'
])
]
)
'''
def brianPopulation(self):
#debug
'''
self.debug(
[
'It is a Neurongroup level, we set the Neurongroup',
'We adapt the shape of BrianingNeurongroupDict',
('self.',self,[
'BrianingNeurongroupDict',
'RecordingKeyVariable'
])
]
)
'''
#/########################/#
# Determine parents
#
#Check
if self.ParentDeriveTeamerVariable!=None:
#set the parent
self.BrianedParentNetworkDeriveBrianerVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
else:
#set the parent
self.BrianedParentNetworkDeriveBrianerVariable=self
#/################/#
# Adapt the arg
#
#Check
if 'N' not in self.BrianingNeurongroupDict:
self.BrianingNeurongroupDict['N']=0
#Check
if 'model' not in self.BrianingNeurongroupDict:
self.BrianingNeurongroupDict['model']=''
#maybe should import
from brian2 import NeuronGroup
#debug
'''
self.debug(
[
('self.',self,[
'BrianingNeurongroupDict'
]),
'We now set the model system Neurongroup if N>0 and model!=""'
]
)
'''
#/################/#
# Set the brian neurongroup
#
#Check
if self.BrianingNeurongroupDict['N']>0 and self.BrianingNeurongroupDict['model']!="":
#init
self.BrianedNeurongroupVariable=NeuronGroup(
**dict(
self.BrianingNeurongroupDict,
**{
#'name':self.ParentedTotalPathStr.replace('/','_')+'_'+self.ManagementTagStr,
#'clock':self.BrianedParentNetworkDeriveBrianerVariable.TeamDict[
# 'Clocks'
#].ManagementDict['Simulation'].BrianedClockVariable
}
)
)
#debug
'''
self.debug(
[
'Ok we have setted the Neurongroup',
('self.',self,[
'BrianedNeurongroupVariable'
])
]
)
'''
#/##################/#
# Define a debug
#
#Check
if self.BrianingDebugVariable>0
#import
from brian2 import network_operation,ms
@network_operation(
dt=self.BrianingDebugVariable*slf.BrianedParentNetworkDeriveBrianerVariable.BrianedTimeQuantityVariable
)
def debugNeurongroup():
#init
PrintStr='At time t='+str(self.BrianedNeurongroupVariable.clock.t)+', \n'
PrintStr+='In the NeuronGroup '+self.BrianedNeurongroupVariable.name+' : \n'
#loop
for __KeyStr in self.BrianedNeurongroupVariable.equations._equations.keys():
#set
PrintStr+=__KeyStr+" is "+str(
getattr(
self.BrianedNeurongroupVariable,
__KeyStr
)
)+'\n'
#add
PrintStr+='\n'
#print
print PrintStr
#add
self.BrianedParentNetworkDeriveBrianerVariable.BrianedNetworkVariable.add(
debugNeurongroup
)
#Check
if self.BrianingNeurongroupDict['N']>0:
#/##################/#
# team States first all the brian variables
#
#get
self.BrianedRecordKeyStrsList=self.BrianedNeurongroupVariable.equations._equations.keys()
#Check
if len(self.BrianedRecordKeyStrsList)>0:
#debug
'''
self.debug(
[
'We simulate with neurongroup',
'adapt the initial conditions of all the brian variables',
'so first we team Traces and put Recorders inside or get it and mapSet'
]
)
'''
#Check
if 'Traces' not in self.TeamDict:
BrianedDeriveTraces=self.team(
'Traces'
).TeamedValueVariable
else:
BrianedDeriveTraces=self.TeamDict[
'Traces'
]
#debug
'''
self.debug(
[
'We set the tracers',
('self.',self,['BrianedRecordKeyStrsList'])
]
)
'''
#map
self.BrianedTraceDeriveBrianersList=map(
lambda __ManagementKeyStr,__RecordKeyStr:
BrianedDeriveTraces.manage(
__ManagementKeyStr,
{
'RecordingKeyVariable':getattr(
self.BrianedNeurongroupVariable,
__RecordKeyStr
),
'RecordKeyStr':__RecordKeyStr
}
).ManagedValueVariable
if __ManagementKeyStr not in BrianedDeriveTraces.ManagementDict
else BrianedDeriveTraces.ManagementDict[__ManagementKeyStr].mapSet(
{
'RecordingKeyVariable':getattr(
self.BrianedNeurongroupVariable,
__RecordKeyStr
),
'RecordKeyStr':__RecordKeyStr
}
),
map(
lambda __BrianedRecordKeyStr:
Recorder.RecordPrefixStr+__BrianedRecordKeyStr,
self.BrianedRecordKeyStrsList
),
self.BrianedRecordKeyStrsList
)
#/##################/#
# add in the net
#
#add
self.BrianedParentNetworkDeriveBrianerVariable.BrianedNetworkVariable.add(
self.BrianedNeurongroupVariable
)
"""
#/####################/#
# maybe view a draw plot
#
#call
self.viewPopulation()
"""
#debug
'''
self.debug(
[
'End of brianPopulation'
]
)
'''
def brianInteraction(self):
#/########################/#
# Postlet level
#
#debug
'''
self.debug(
[
'It is an Interaction level',
('self.',self,[
#'BrianingSynapsesDict'
]
)
]
)
'''
#/####################/#
# Set the parent
#
#Check
if self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable.BrianedParentSingularStr=='Interactome':
#debug
'''
self.debug(
[
'We are in a projectome structure'
]
)
'''
#set
self.BrianedParentInteractomeDeriveBrianerVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#get
self.BrianedParentPopulationDeriveBrianerVariable=self.BrianedParentInteractomeDeriveBrianerVariable.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
else:
#debug
'''
self.debug(
[
'There is no projectome structure'
]
)
'''
#get
self.BrianedParentPopulationDeriveBrianerVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#get
self.BrianedParentNetworkDeriveBrianerVariable=self.BrianedParentPopulationDeriveBrianerVariable.BrianedParentNetworkDeriveBrianerVariable
#/####################/#
# Set the ConnectedTo Variable
#
#debug
'''
self.debug(
[
'Check if we have to get the connected to variable',
('self.',self,['ConnectedToVariable'])
]
)
'''
#Check
if self.ConnectedToVariable==None:
#debug
'''
self.debug(
[
'We setConnection here'
]
)
'''
#setConnection
self.setConnection(
self.ManagementTagStr,
self,
self.BrianedParentPopulationDeriveBrianerVariable
)
#/####################/#
# Set the BrianedParentPopulationDeriveBrianerVariable
#
#debug
'''
self.debug(
[
'Do we have to make parent-brian the connected variable ?',
'self.ConnectedToVariable.BrianedNeurongroupVariable is ',
str(self.ConnectedToVariable.BrianedNeurongroupVariable)
]
)
'''
#Check
if self.ConnectedToVariable.BrianedNeurongroupVariable==None:
#parent brian
self.ConnectedToVariable.parent(
).brian(
)
#set
BrianedNameStr=self.BrianedParentPopulationDeriveBrianerVariable.StructureTagStr+'_To_'+self.ConnectedToVariable.StructureTagStr
#debug
'''
self.debug(
[
'We set the synapses',
'self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable is ',
str(self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable),
'self.ConnectedToVariable.BrianedNeurongroupVariable is ',
str(self.ConnectedToVariable.BrianedNeurongroupVariable),
'Maybe we have to make brian the post',
'BrianedNameStr is '+BrianedNameStr
]
)
'''
#import
from brian2 import Synapses
#init
self.BrianedSynapsesVariable=Synapses(
source=self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable,
target=self.ConnectedToVariable.BrianedNeurongroupVariable,
#name=BrianedNameStr.replace('/','_'),
**self.BrianingSynapsesDict
)
#/####################/#
# Connect options
#
#connect
if type(self.BrianingConnectVariable)==float:
#debug
'''
self.debug(
[
'we connect with a sparsity of ',
('self.',self,[
'BrianingConnectVariable'
])
]
)
'''
#connect
self.BrianedSynapsesVariable.connect(
True,
p=self.BrianingConnectVariable
)
#/####################/#
# add to the structure
#
#add
self.BrianedParentNetworkDeriveBrianerVariable.BrianedNetworkVariable.add(
self.BrianedSynapsesVariable
)
#/##################/#
# Define a debug
#
#Check
if self.BrianingDebugVariable>0
#import
from brian2 import network_operation,ms
@network_operation(
dt=self.BrianingDebugVariable*slf.BrianedParentNetworkDeriveBrianerVariable.BrianedTimeQuantityVariable
)
def debugSynapses():
#init
PrintStr='At time t='+str(self.BrianedSynapsesVariable.clock.t)+', \n'
PrintStr+='In the Synapses '+self.BrianedSynapsesVariable.name+' : \n'
#loop
for __KeyStr in self.BrianedSynapsesVariable.equations._equations.keys():
#set
PrintStr+=__KeyStr+" is "+str(
getattr(
self.BrianedSynapsesVariable,
__KeyStr
)
)+'\n'
#add
PrintStr+='\n'
#print
print PrintStr
#add
self.BrianedParentNetworkDeriveBrianerVariable.BrianedNetworkVariable.add(
debugSynapses
)
def brianTrace(self):
#debug
'''
self.debug(
[
'It is a Trace level, we set the Samples',
('self.',self,[
#'RecordingKeyVariable',
'RecordKeyStr'
])
]
)
'''
#/####################/#
# Set the parent
#
#get
self.BrianedParentPopulationDeriveBrianerVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#get
self.BrianedParentNetworkDeriveBrianerVariable=self.BrianedParentPopulationDeriveBrianerVariable.BrianedParentNetworkDeriveBrianerVariable
#Check
if self.BrianedParentPopulationDeriveBrianerVariable.BrianingNeurongroupDict['N']>0:
#/####################/#
# we record
#
#Check
if self.BrianingRecordBool:
#debug
'''
self.debug(
[
'We record here'
]
)
'''
#record
self.record()
#/###################/#
# Build the samples and maybe one default moniter
#
#debug
'''
self.debug(
[
'Look if we have samples here',
"'Samples' not in self.TeamDict is ",
str('Samples' not in self.TeamDict)
]
)
'''
#Check
if 'Samples' not in self.TeamDict:
BrianedSamplesDeriveManager=self.team(
'Samples'
).TeamedValueVariable
else:
BrianedSamplesDeriveManager=self.TeamDict[
'Samples'
]
#debug
'''
self.debug(
[
'Do we have to set a default moniter ?',
#'len(self.BrianedParentPopulationDeriveBrianerVariable.BrianedRecordKeyStrsList) is ',
#str(len(self.BrianedParentPopulationDeriveBrianerVariable.BrianedRecordKeyStrsList)),
'self.BrianedParentPopulationDeriveBrianerVariable.BrianedRecordKeyStrsList) is ',
str(self.BrianedParentPopulationDeriveBrianerVariable.BrianedRecordKeyStrsList),
]
)
'''
#Check
if len(self.BrianedParentPopulationDeriveBrianerVariable.BrianedRecordKeyStrsList)==1:
#debug
'''
self.debug(
[
'BrianedSamplesDeriveManager.ManagementDict.keys() is',
str(BrianedSamplesDeriveManager.ManagementDict.keys())
]
)
'''
#Check
if len(BrianedSamplesDeriveManager.ManagementDict)==0 or (
len(BrianedSamplesDeriveManager.ManagementDict)==1 and 'Default' in BrianedSamplesDeriveManager.ManagementDict
):
#debug
'''
self.debug(
[
'There is just one variable that we sample',
'we manage and make it brian'
]
)
'''
#manage
BrianedDefaultBrianer=BrianedSamplesDeriveManager.manage(
'Default',
).ManagedValueVariable
#Check
if BrianedDefaultBrianer.RecordingLabelVariable==None:
#Check
if self.BrianedParentPopulationDeriveBrianerVariable.RecordingLabelVariable!=None:
#get
BrianedDefaultBrianer.RecordingLabelVariable=self.BrianedParentPopulationDeriveBrianerVariable.RecordingLabelVariable
else:
#set the record labels
BrianedDefaultBrianer.RecordingLabelVariable=[0] if self.BrianedParentPopulationDeriveBrianerVariable.BrianingNeurongroupDict[
'N']>0 else []
#brian
BrianedDefaultBrianer.parent(
).brian(
)
def brianSample(self):
#debug
'''
self.debug(
[
'It is a Sample State Moniter level',
('self.',self,[
'RecordingLabelVariable',
])
]
)
'''
#/####################/#
# Set the parent
#
#get
self.BrianedParentDeriveRecorderVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#get
self.BrianedParentPopulationDeriveBrianerVariable=self.BrianedParentDeriveRecorderVariable.BrianedParentPopulationDeriveBrianerVariable
#get
self.BrianedParentNetworkDeriveBrianerVariable=self.BrianedParentPopulationDeriveBrianerVariable.BrianedParentNetworkDeriveBrianerVariable
#Check
if self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable!=None:
#/####################/#
# Set the brian monitor
#
#debug
'''
self.debug(
[
'We set the state monitor',
('self.',self,[
#'BrianedParentPopulationDeriveBrianerVariable'
]),
#'self.BrianedParentDeriveRecorderVariable.RecordKeyStr is ',
#str(self.BrianedParentDeriveRecorderVariable.RecordKeyStr)
'self.ParentedTotalManagementOrderedDict.keys() is ',
str(self.ParentedTotalManagementOrderedDict.keys())
]
)
'''
#import
from brian2 import StateMonitor
#init
self.BrianedStateMonitorVariable=StateMonitor(
self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable,
self.BrianedParentDeriveRecorderVariable.RecordKeyStr,
self.RecordingLabelVariable,
)
#debug
'''
self.debug(
[
'Ok we have setted the monitor',
('self.',self,['BrianedStateMonitorVariable']),
'Now we add to the structure',
'self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable is ',
str(self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable)
]
)
'''
#/####################/#
# add to the structure
#
#debug
'''
self.debug(
[
'We add to the structure'
]
)
'''
#add
self.BrianedParentNetworkDeriveBrianerVariable.BrianedNetworkVariable.add(
self.BrianedStateMonitorVariable
)
"""
#/####################/#
# maybe view a draw plot
#
#call
self.viewSample()
"""
def brianEvent(self):
#debug
'''
self.debug(
[
'It is a Spike Moniter level',
('self.',self,[
])
]
)
'''
#/####################/#
# Set the BrianedParentPopulationDeriveBrianerVariable
#
#get
self.BrianedParentPopulationDeriveBrianerVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#get
self.BrianedParentNetworkDeriveBrianerVariable=self.BrianedParentPopulationDeriveBrianerVariable.BrianedParentNetworkDeriveBrianerVariable
#Check
if self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable!=None:
#/####################/#
# Set the brian monitor
#
#debug
'''
self.debug(
[
'We set the spike monitor'
]
)
'''
#import
from brian2 import SpikeMonitor
#init
self.BrianedSpikeMonitorVariable=SpikeMonitor(
self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable,
)
#debug
'''
self.debug(
[
'Ok we have setted the monitor',
('self.',self,['BrianedSpikeMonitorVariable']),
'Now we add to the structure',
'self.BrianedParentPopulationDeriveBrianerVariable.BrianedNetworkVariable is ',
str(self.BrianedParentPopulationDeriveBrianerVariable.BrianedNetworkVariable)
]
)
'''
#/####################/#
# add to the structure
#
#add
self.BrianedParentNetworkDeriveBrianerVariable.BrianedNetworkVariable.add(
self.BrianedSpikeMonitorVariable
)
"""
#/####################/#
# maybe view a draw plot
#
#call
self.viewEvent()
"""
def mimic_view(self):
#/########################/#
# Network level
#
#Check
if (self.ParentDeriveTeamerVariable==None or 'Populations' in self.TeamDict or self.ParentDeriveTeamerVariable.TeamTagStr not in [
'Clocks',
'Traces',
'Samples',
'Events',
'Interactomes',
'Interactions'
]) and self.BrianedParentSingularStr!='Population':
#debug
'''
self.debug(
[
'It is a Network level',
'We sructure view'
]
)
'''
#/########################/#
# structure
#
#debug
'''
self.debug(
[
'We view structure in all the brian children...',
]
)
'''
#structure
self.structure(
[
'Clocks',
'Populations',
'Traces',
'Events',
'Samples',
'Interactomes',
'Interactions'
],
'#all',
_ManagerCommandSetList=['view']
)
#debug
'''
self.debug(
[
'Ok we have view structured all the brian children...',
]
)
'''
else:
#debug
'''
self.debug(
[
'Ok we check if this parentsingular has a special method ',
('self.',self,[
'BrianedParentSingularStr'
])
]
)
'''
#set
BrianedMethodKeyStr='view'+self.BrianedParentSingularStr
#Check
if hasattr(self,BrianedMethodKeyStr):
#/########################/#
# call the special view<BrianedParentSingularStr> method
#
#debug
'''
self.debug(
[
'It is a '+self.BrianedParentSingularStr+' level',
'We view<BrianedParentSingularStr>'
]
)
'''
#call
getattr(
self,
BrianedMethodKeyStr
)()
#debug
'''
self.debug(
[
'Ok we have setted view'+self.BrianedParentSingularStr
]
)
'''
def viewPopulation(self):
#debug
'''
self.debug(
[
'We complete a view so first fill the draw'
]
)
'''
#Check
if 'Charts' not in self.TeamDict:
BrianedChartsDeriveTeamer=self.team(
'Charts'
).TeamedValueVariable
else:
BrianedChartsDeriveTeamer=self.TeamDict['Charts']
def viewSample(self):
#debug
self.debug(
[
'We complete a view so first fill the draw',
('self.',self,[
'RecordingLabelVariable',
'ViewedLegendLabelStr'
])
]
)
#/##################/#
# Determine the way of labeling the variable names
#
#set
if self.ViewedLegendLabelStr=="":
#set
self.ViewedLegendLabelStr='$'+self.BrianedParentDeriveRecorderVariable.RecordKeyStr
'''
self.ViewedLegendLabelStr+='_{'+str(
#self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable.name
#self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable.
).replace('_','/')+'}'
'''
#debug
self.debug(
[
'We have specified the legend label',
('self.',self,[
'ViewedLegendLabelStr'
])
]
)
#set
self.PyplotingDrawVariable=map(
lambda __IndexInt:
(
'plot',
{
'#liarg:#map@get':[
'#IdGetStr.BrianedStateMonitorVariable.t',
'>>SYS.IdDict[#IdStr].BrianedStateMonitorVariable.'+self.BrianedParentDeriveRecorderVariable.RecordKeyStr+'['+str(
__IndexInt)+',:]'
],
'#kwarg':dict(
{
'label':self.ViewedLegendLabelStr+'^{'+str(__IndexInt)+'}$',
'linestyle':'-',
'color':'b'
},
**self.BrianingPyplotDict
)
}
),
self.RecordingLabelVariable
)
#/####################/#
# maybe set for the Chart
#
#init
self.PyplotingChartVariable=[]
#/####################/#
# maybe set the X Chart also
#
#scale
self.ViewingXVariable=self.BrianedStateMonitorVariable.t[:]
#set
self.ViewingXLabelStr='$t\ ('+str(
(1./self.ViewingXScaleFloat)*self.BrianedStateMonitorVariable.t.unit
).split('.')[-1]+')$'
"""
#set
SampleTagStr=self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable.name+'t'
#join
XlimLiargStr="".join([
">>SYS.set(SYS,'"+SampleTagStr+"LimFloatsArray',",
"[SYS.IdDict[#IdStr].ViewingXVariable.min(),",
"SYS.IdDict[#IdStr].ViewingXVariable.max()]",
').'+SampleTagStr+"LimFloatsArray"
])
#join
XticksLiargStr="".join([
">>SYS.set(SYS,'"+SampleTagStr+"TickFloatsArray',",
"map(lambda __Float:float('%.2f'%__Float),",
"SYS.getTickFloatsArray(",
'SYS.'+SampleTagStr+"LimFloatsArray,3",
")))."+SampleTagStr+"TickFloatsArray"
])
XtickLabelLiargStr="".join([
">>SYS.set(SYS,'"+SampleTagStr+"TickStrsArray',",
"map(lambda __Float:'$'+str(__Float)+'$',",
"SYS."+SampleTagStr+"TickFloatsArray))."+SampleTagStr+"TickStrsArray"
])
#debug
'''
self.debug(
[
'Xself.ViewedLabelStr is ',
Xself.ViewedLabelStr,
'XlimLiargStr is',
XlimLiargStr,
'XticksLiargStr is ',
XticksLiargStr,
'XtickLabelLiargStr is ',
XtickLabelLiargStr
]
)
'''
#set
self.PyplotingChartVariable+=[
(
'set_xlabel',self.ViewingXLabelStr
),
(
'set_xlim',{
'#liarg:#map@get':[XlimLiargStr]
}
),
(
'set_xticks',{
'#liarg:#map@get':[XticksLiargStr]
}
),
(
'set_xticklabels',{
'#liarg:#map@get':[XtickLabelLiargStr]
}
)
]
"""
#/####################/#
# maybe set the Y Chart also
#
#debug
self.debug(
[
'We set the y axis',
'self.BrianedParentDeriveRecorderVariable.RecordedTraceFloatsArray is ',
str(self.BrianedParentDeriveRecorderVariable.RecordedTraceFloatsArray)
]
)
#alias
self.ViewingYVariable=getattr(
self.BrianedStateMonitorVariable,
self.BrianedParentDeriveRecorderVariable.RecordKeyStr
)
#set
self.ViewingYLabelStr='$'+self.BrianedParentDeriveRecorderVariable.RecordKeyStr
"""
self.ViewingYLabelStr+='_{'+str(
self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable.name
).replace('_','/')+'}(t)\ ('+str(
self.BrianedParentDeriveRecorderVariable.RecordedTraceFloatsArray.unit
)+')'
"""
self.ViewingYLabelStr+='\ ('+str(
(1./self.ViewingYScaleFloat)*self.ViewingYVariable
).split(' ')[-1]+')'
self.ViewingYLabelStr+='$'
#/################/#
# call the base view method
#
#debug
self.debug(
[
'Now we call the view'
]
)
#call
BaseClass.view(self)
"""
#set
SampleTagStr=self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable.name+self.BrianedParentDeriveRecorderVariable.RecordKeyStr
#join
YlimLiargStr="".join([
">>SYS.set(SYS,'"+SampleTagStr+"LimFloatsArray',",
"[SYS.IdDict[#IdStr].BrianedStateMonitorVariable."+self.BrianedParentDeriveRecorderVariable.RecordKeyStr+".min(),",
"SYS.IdDict[#IdStr].BrianedStateMonitorVariable."+self.BrianedParentDeriveRecorderVariable.RecordKeyStr+".max()]",
').'+SampleTagStr+"LimFloatsArray"
])
#join
YticksLiargStr="".join([
">>SYS.set(SYS,'"+SampleTagStr+"TickFloatsArray',",
"map(lambda __Float:float('%.2f'%__Float),",
"SYS.getTickFloatsArray(",
'SYS.'+SampleTagStr+"LimFloatsArray,3",
")))."+SampleTagStr+"TickFloatsArray"
])
YtickLabelLiargStr="".join([
">>SYS.set(SYS,'"+SampleTagStr+"TickStrsArray',",
"map(lambda __Float:'$'+str(__Float)+'$',",
"SYS."+SampleTagStr+"TickFloatsArray))."+SampleTagStr+"TickStrsArray"
])
#debug
'''
self.debug(
[
'Yself.ViewedLabelStr is ',
Yself.ViewedLabelStr,
'YlimLiargStr is',
YlimLiargStr,
'YticksLiargStr is ',
YticksLiargStr,
'YtickLabelLiargStr is ',
YtickLabelLiargStr
]
)
'''
#set
self.PyplotingChartVariable+=[
(
'set_ylabel',self.ViewingYLabelStr
),
(
'set_ylim',{
'#liarg:#map@get':[YlimLiargStr]
}
),
(
'set_yticks',{
'#liarg:#map@get':[YticksLiargStr]
}
),
(
'set_yticklabels',{
'#liarg:#map@get':[YtickLabelLiargStr]
}
)
]
"""
#/####################/#
# maybe set global Chart also
#
self.PyplotingChartVariable+=[
(
'tick_params',{
'#kwarg':{
'length':10,
'width':5,
'which':'major'
}
}
),
(
'tick_params',{
'#kwarg':{
'length':5,
'width':2,
'which':'minor'
}
}
),
('xaxis.set_ticks_position',
{
'#liarg':['bottom']
}
),
('yaxis.set_ticks_position',
{
'#liarg':['left']
}
),
('legend',{
'#liarg':[],
'#kwarg':{
'fontsize':10,
'shadow':True,
'fancybox':True,
'ncol':max(1,len(
getattr(
self.BrianedStateMonitorVariable,
self.BrianedParentDeriveRecorderVariable.RecordKeyStr
)
)/2),
'loc':2,
'bbox_to_anchor':(1.05, 1)
}
})
]
#/####################/#
# maybe replace Chart also
#
#debug
'''
self.debug(
[
'Before replace',
('self.',self,[
'PyplotingDrawVariable',
'PyplotingChartVariable'
])
]
)
'''
#mapReplace
[
self.PyplotingDrawVariable,
self.PyplotingChartVariable
]=map(
lambda __Variable:
SYS.replace(
__Variable,
{
'#IdStr':str(self.PrintIdInt),
'#IdGetStr':"#id:"+str(self.PrintIdInt)
},
self
)
if __Variable!=None
else None,
map(
lambda __KeyStr:
getattr(
self,
__KeyStr
),
[
'PyplotingDrawVariable',
'PyplotingChartVariable'
]
)
)
#debug
'''
self.debug(
[
'After replace',
('self.',self,[
#'PyplotingDrawVariable',
'PyplotingChartVariable'
])
]
)
'''
#/####################/#
# Update maybe the
# parent neuron group
#debug
self.debug(
[
'Maybe we also update the view in the parent population'
]
)
#Check
if 'Charts' in self.BrianedParentPopulationDeriveBrianerVariable.TeamDict:
#get
BrianedChartsDeriveManager=self.BrianedParentPopulationDeriveBrianerVariable.TeamDict[
'Charts'
]
#manage
BrianedChartDerivePyploter=BrianedChartsDeriveManager.manage(
self.BrianedParentDeriveRecorderVariable.ManagementTagStr
).ManagedValueVariable
#debug
'''
self.debug(
[
'We update in the parent neurongroup chart',
'BrianedChartDerivePyploter is ',
SYS._str(BrianedChartDerivePyploter),
('self.',self,[])
]
)
'''
#team
BrianedDrawDeriveManager=BrianedChartDerivePyploter.team(
'Draws'
).TeamedValueVariable
#manage
BrianedDrawDeriveManager.manage(
str(self.ManagementIndexInt),
{
'PyplotingDrawVariable':self.PyplotingDrawVariable
}
)
def viewEvent(self):
#debug
'''
self.debug(
[
'We complete a view so first fill the draw'
]
)
'''
#set
self.ViewedLabelStr='$'+self.ManagementTagStr+'_{'+str(
self.BrianedParentPopulationDeriveBrianerVariable.BrianedNeurongroupVariable.name
).replace('_','/')+'}'
#set
self.PyplotingDrawVariable=[
(
'plot',
{
'#liarg:#map@get':[
'#IdGetStr.BrianedSpikeMonitorVariable.t',
'>>SYS.IdDict[#IdStr].BrianedSpikeMonitorVariable.i'
],
'#kwarg':dict(
{
'label':self.ViewedLabelStr,
'linestyle':'',
'marker':'.',
'color':'b'
},
**self.BrianingPyplotDict
)
}
)
]
#/####################/#
# maybe replace Chart also
#
#debug
'''
self.debug(
[
'Before replace',
('self.',self,[
'PyplotingDrawVariable',
'PyplotingChartVariable'
])
]
)
'''
#mapReplace
[
self.PyplotingDrawVariable,
self.PyplotingChartVariable
]=map(
lambda __Variable:
SYS.replace(
__Variable,
{
'#IdStr':str(self.PrintIdInt),
'#IdGetStr':"#id:"+str(self.PrintIdInt)
},
self
)
if __Variable!=None
else None,
map(
lambda __KeyStr:
getattr(
self,
__KeyStr
),
[
'PyplotingDrawVariable',
'PyplotingChartVariable'
]
)
)
#debug
'''
self.debug(
[
'After replace',
('self.',self,[
#'PyplotingDrawVariable',
'PyplotingChartVariable'
])
]
)
'''
#/####################/#
# Update maybe the
# parent neuron group
#get
BrianedChartDeriveManager=self.BrianedParentPopulationDeriveBrianerVariable.TeamDict[
'Charts'
]
#manage
BrianedChartDerivePyploter=BrianedChartDeriveManager.manage(
self.ManagementTagStr
).ManagedValueVariable
#debug
'''
self.debug(
[
'We update in the parent neurongroup chart',
'BrianedChartDerivePyploter is ',
SYS._str(BrianedChartDerivePyploter),
('self.',self,[])
]
)
'''
#team
BrianedDrawDeriveManager=BrianedChartDerivePyploter.team(
'Draws'
).TeamedValueVariable
#manage
BrianedDrawDeriveManager.manage(
str(self.ManagementIndexInt),
{
'PyplotingDrawVariable':self.PyplotingDrawVariable
}
)
def mimic_simulate(self):
#parent method
BaseClass.simulate(self)
#debug
'''
self.debug('We start simulate in brian')
'''
#run with the brian method
self.BrianedNetworkVariable.run(
self.SimulatingStopTimeFloat*self.BrianedTimeQuantityVariable
)
#debug
'''
self.debug('We stop running in brian')
'''
def mimic_record(self):
#base method
BaseClass.record(self)
#debug
'''
self.debug(
[
'We have traced, alias the init in the brian object',
('self.',self,[
'RecordedTraceFloatsArray',
'RecordedInitFloatsArray'
])
]
)
'''
#alias
self.RecordedTraceFloatsArray[:]=self.RecordedInitFloatsArray*self.RecordedTraceFloatsArray.unit
#debug
'''
self.debug(
[
('self.',self,['RecordedTraceFloatsArray'])
]
)
'''
def mimic__print(self,**_KwargVariablesDict):
#/##################/#
# Modify the printing Variable
#
#Check
if self.PrintingSelfBool:
#/##################/#
# Remove the brian objects that are non setted
#
#map
map(
lambda __KeyStr:
self.forcePrint(
[__KeyStr],
'BrianerClass'
)
if getattr(self.PrintingCopyVariable,__KeyStr)!=None
else None,
[
'BrianedNetworkVariable',
'BrianedNeurongroupVariable',
'BrianedSynapsesVariable',
'BrianedStateMonitorVariable',
'BrianedSpikeMonitorVariable',
'BrianedClockVariable'
]
)
#/##################/#
# Call the base method
#
#call
BaseClass._print(self,**_KwargVariablesDict)
BrianerClass.PrintingClassSkipKeyStrsList.extend(
[
'BrianingNeurongroupDict',
'BrianingSynapsesDict',
'BrianingConnectVariable',
'BrianingTraceDict',
'BrianingMoniterTuple',
'BrianingSpikesDict',
'BrianingPyplotDict',
'BrianingTimeQuantityStr',
'BrianingPyplotBool',
'BrianingStepTimeFloat',
'BrianingDebugVariable', 'BrianingRecordBool',
'BrianedTimeQuantityVariable',
'BrianedNetworkVariable',
'BrianedNeurongroupVariable',
'BrianedSynapsesVariable',
'BrianedStateMonitorVariable',
'BrianedSpikeMonitorVariable',
'BrianedClockVariable',
'BrianedRecordKeyStrsList',
'BrianedTraceDeriveBrianersList',
'BrianedSynapsesDeriveBrianersList',
'BrianedStateDeriveBrianersList',
'BrianedSpikeDeriveBrianersList',
'BrianedParentSingularStr',
'BrianedParentNetworkDeriveBrianerVariable',
'BrianedParentPopulationDeriveBrianerVariable',
'BrianedParentInteractomeDeriveBrianerVariable',
'BrianedParentDeriveRecorderVariable'
]
)
|
from math import sqrt
class Position:
def __init__(self, x, y):
self.x = x
self.y = y
def length(self):
return sqrt(self.x**2 + self.y**2)
def substract(self, vector):
return Position(self.x - vector.x, self.y - vector.y)
def multiply(self, magnitude):
return Position(self.x * magnitude, self.y * magnitude)
def scalarMultiplyByVector(self, vector):
return self.x * vector.x + self.y * vector.y
|
from dp_tornado.engine.controller import Controller
class FlushallController(Controller):
def get(self):
self.model.tests.model_test.cache_test.flushall_redis()
self.finish('done')
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SudokuSolver.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150, verbose_name='\u6807\u9898')),
('body', models.TextField(verbose_name='\u6b63\u6587')),
('date', models.DateTimeField(verbose_name='\u53d1\u5e03\u65f6\u95f4')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u540d\u79f0')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u540d\u79f0')),
],
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='\u5206\u7c7b'),
),
migrations.AddField(
model_name='article',
name='tags',
field=models.ManyToManyField(blank=True, to='blog.Tag', verbose_name='\u6807\u7b7e'),
),
]
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'stores.views',
url(r'^(?P<slug>[\w-]+)/$', 'store', name='store'),
url(r'^(?P<store_slug>[\w-]+)/(?P<slug>[\w-]+)/$',
'product', name='product'),
url(r'^(?P<store_slug>[\w-]+)/(?P<slug>[\w-]+)/add/$',
'add_to_cart', name='add_to_cart'),
url(r'^(?P<store_slug>[\w-]+)/(?P<slug>[\w-]+)/update/$',
'update_cart', name='update_cart'),
)
|
from __future__ import with_statement
from fabric.api import run, cd
from journeyman.buildrunner.registry import registry
def fetch_pip_dependencies(build_runner, **kwargs):
# Move into the source/repo directory
with cd(build_runner.build_src):
# Get files from config
files = kwargs.get('lines', [])
for req_file in files:
# do the pip install dance for every requirements file
output = run('pip -E .. install -r %s' % req_file)
# if anything else than 0 returned, abort.
if output.return_code != 0:
return False, output.return_code
# Done!
return True, 0
registry.add_step('fetch_pip_dependencies', fetch_pip_dependencies)
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from resnet.data import cifar_input
class CIFAR100Dataset():
def __init__(self,
folder,
split,
num_fold=10,
fold_id=0,
data_aug=False,
whiten=False,
div255=False):
self.split = split
self.data = cifar_input.read_CIFAR100(folder)
num_ex = 50000
self.split_idx = np.arange(num_ex)
rnd = np.random.RandomState(0)
rnd.shuffle(self.split_idx)
num_valid = int(np.ceil(num_ex / num_fold))
valid_start = fold_id * num_valid
valid_end = min((fold_id + 1) * num_valid, num_ex)
self.valid_split_idx = self.split_idx[valid_start:valid_end]
self.train_split_idx = np.concatenate(
[self.split_idx[:valid_start], self.split_idx[valid_end:]])
if data_aug or whiten:
with tf.device("/cpu:0"):
self.inp_preproc, self.out_preproc = cifar_input.cifar_tf_preprocess(
random_crop=data_aug, random_flip=data_aug, whiten=whiten)
self.session = tf.Session()
self.data_aug = data_aug
self.whiten = whiten
self.div255 = div255
if div255 and whiten:
log.fatal("Applying both /255 and whitening is not recommended.")
def get_size(self):
if self.split == "train":
return 50000
elif self.split == "traintrain":
return 45000
elif self.split == "trainval":
return 5000
else:
return 10000
def get_batch_idx(self, idx):
if self.split == "train":
result = {
"img": self.data["train_img"][idx],
"label": self.data["train_label"][idx]
}
elif self.split == "traintrain":
result = {
"img": self.data["train_img"][self.train_split_idx[idx]],
"label": self.data["train_label"][self.train_split_idx[idx]]
}
elif self.split == "trainval":
result = {
"img": self.data["train_img"][self.valid_split_idx[idx]],
"label": self.data["train_label"][self.valid_split_idx[idx]]
}
else:
result = {
"img": self.data["test_img"][idx],
"label": self.data["test_label"][idx]
}
if self.data_aug or self.whiten:
img = np.zeros(result["img"].shape)
for ii in range(len(idx)):
img[ii] = self.session.run(
self.out_preproc, feed_dict={self.inp_preproc: result["img"][ii]})
result["img"] = img
if self.div255:
result["img"] = result["img"] / 255.0
return result
|
import socket
import argparse
import traceback
import configuration
import tuner
import sys
class streamJit:
def __init__(self, port):
self.port = port
self.program = "streamApp"
def listen(self):
try:
print 'listening for client at local prot %d'%self.port
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("localhost", self.port))
server_socket.listen(1)
client_socket, address = server_socket.accept()
print "Got a connection from ", address
self.socket = client_socket
self.file = client_socket.makefile("rb") # map the socket to file for high performance and convienence.
server_socket.close()
except Exception, e:
print "Exception occured : %s"%e
traceback.print_exc()
data = raw_input ( "Press Keyboard to exit..." )
def run(self):
while 1:
data = self.recvmsg()
if ( data == 'exit\n'):
print data, "I have received exit. I am gonna exit."
break;
elif ( data == 'program\n'):
self.program = self.file.readline()
elif ( data == 'confg\n' ):
print "Config received."
cfgString = self.file.readline()
try:
cfg = configuration.getConfiguration(cfgString)
argv = ['--program', self.program, '--test-limit', '500']
tuner.start(argv, cfg, self)
except Exception, e:
print "Exception occured : %s"%e
traceback.print_exc()
data = raw_input ( "Press Keyboard to exit..." )
break;
else:
print "###Invalid data received. Please check...:" , data
def sendmsg(self, msg):
if not msg.endswith("\n"):
msg = msg + "\n"
self.socket.send(msg)
def recvmsg(self):
data = self.file.readline()
if not data:
print "Socket closed...."
data = raw_input ( "Press Keyboard to exit..." )
self.close()
sys.exit(1)
else:
return data
def close(self):
self.socket.close()
print "Socket has been closed successfully"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('port', help='TCP port number', type=int)
args = parser.parse_args()
s = streamJit(args.port)
s.listen()
s.run()
s.close()
|
import re
import unicodedata
from flask_sqlalchemy.model import camel_to_snake_case
from .decorators import was_decorated_without_parenthesis
from .mail import send_mail
def slugify(string):
string = re.sub(r'[^\w\s-]', '',
unicodedata.normalize('NFKD', string.strip()))
return re.sub(r'[-\s]+', '-', string).lower()
def title_case(string):
return camel_to_snake_case(string).replace('_', ' ').title()
def pluralize(name):
if name.endswith('y'):
# right replace 'y' with 'ies'
return 'ies'.join(name.rsplit('y', 1))
elif name.endswith('s'):
return f'{name}es'
return f'{name}s'
|
from __future__ import annotations
import argparse
import os.path
import re
from typing import Sequence
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument(
'--django', default=False, action='store_true',
help='Use Django-style test naming pattern (test*.py)',
)
args = parser.parse_args(argv)
retcode = 0
test_name_pattern = r'test.*\.py' if args.django else r'.*_test\.py'
for filename in args.filenames:
base = os.path.basename(filename)
if (
not re.match(test_name_pattern, base) and
not base == '__init__.py' and
not base == 'conftest.py'
):
retcode = 1
print(f'{filename} does not match pattern "{test_name_pattern}"')
return retcode
if __name__ == '__main__':
raise SystemExit(main())
|
from django.db import models, migrations
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('created', models.DateTimeField(auto_now_add=True, editable=False)),
('modified', models.DateTimeField(auto_now=True, editable=False)),
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('description', models.TextField(verbose_name='description')),
('start_date', models.DateTimeField(verbose_name='start date')),
('end_date', models.DateTimeField(verbose_name='end date', blank=True)),
('homepage', models.URLField(verbose_name='homepage', blank=True)),
('is_published', models.BooleanField(default=False, verbose_name='is published')),
('auth_token', models.CharField(max_length=100)),
('owner_email', models.EmailField(help_text='Email address of the submitter.', max_length=256, verbose_name="owner's email address")),
],
options={
'ordering': ('-start_date',),
'verbose_name': 'event',
'verbose_name_plural': 'events',
},
),
]
|
import sys, os, time
from shutil import copyfile
from docutils.core import publish_parts
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('html'))
def rst2html(input, output):
"""
Create html file from rst file.
:param input: Path to rst source file
:type: `str`
:param output: Path to html output file
:type: `str`
"""
file = os.path.abspath(input)
rst = open(file, 'r').read()
html = publish_parts(rst, writer_name='html')
body = html['html_body']
tmp = open(output, 'w')
tmp.write(body)
tmp.close()
return body
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.extlinks']
templates_path = ['html']
source_suffix = '.rst'
readme = rst2html('../README.txt', 'html/intro.html')
readme = copyfile('../CHANGES.txt', 'changelog.rst')
from plasma.version import version
project = 'Plasma'
url = 'http://plasmads.org'
description = 'Flex Messaging support for Python'
copyright = "Copyright © %s The <a href='%s'>%s</a> Project. All rights reserved." % (
time.strftime('%Y'), url, project)
version = str(version)
release = version[:3]
today_fmt = '%B %d, %Y'
exclude_trees = ['_build', 'tutorials/examples']
add_function_parentheses = True
add_module_names = True
pygments_style = 'trac'
html_theme = 'beam'
html_theme_path = ['themes']
html_title = '%s - %s' % (project, description)
html_logo = None
html_last_updated_fmt = '%b %d, %Y'
html_sidebars = {
'toc': 'sidebartoc.html'
}
html_additional_pages = {
'index': 'indexcontent.html',
'tutorials/index': 'tutorials.html',
}
html_index = 'indexcontent.html'
html_use_modindex = True
html_copy_source = True
html_use_opensearch = 'http://plasmads.org'
htmlhelp_basename = 'plasma' + release.replace('.', '')
html_split_index = True
intersphinx_mapping = {'python': ('http://docs.python.org', None)}
linkcheck_ignore = [r'http://localhost:\d+/']
trac_url = 'http://dev.plasmads.org'
extlinks = {'ticket': (trac_url + '/ticket/%s', '#')}
|
import sys
import traceback
if sys.version_info[0] == 3:
def B(value):
return value.encode('ascii')
else:
def B(value):
return value
kAEInternetSuite=B('gurl')
kAEISGetURL=B('gurl')
kCoreEventClass=B('aevt')
kAEOpenApplication=B('oapp')
kAEOpenDocuments=B('odoc')
keyDirectObject=B('----')
typeAEList=B('list')
typeChar=B('TEXT')
typeAlias=B('alis')
highLevelEventMask=1024
kHighLevelEvent=23
import ctypes
from Carbon import AE
from Carbon import Evt
from Carbon import File
carbon = ctypes.CDLL('/System/Library/Carbon.framework/Carbon')
print carbon.RunCurrentEventLoop
def _get_argvemulator():
"""argvemulator - create sys.argv from OSA events. Used by applets that
want unix-style arguments.
"""
class ArgvCollector:
"""A minimal FrameWork.Application-like class"""
def __init__(self):
self.quitting = 0
AE.AEInstallEventHandler(kCoreEventClass, kAEOpenApplication,
self.__runapp)
AE.AEInstallEventHandler(kCoreEventClass, kAEOpenDocuments,
self.__openfiles)
AE.AEInstallEventHandler(kAEInternetSuite, kAEISGetURL,
self.__geturl)
# The definition of kAEInternetSuite seems to be wrong,
# the lines below ensures that the code will work anyway.
AE.AEInstallEventHandler('GURL', 'GURL', self.__geturl)
def close(self):
AE.AERemoveEventHandler(kCoreEventClass, kAEOpenApplication)
AE.AERemoveEventHandler(kCoreEventClass, kAEOpenDocuments)
AE.AERemoveEventHandler(kAEInternetSuite, kAEISGetURL)
AE.AERemoveEventHandler('GURL', 'GURL')
def mainloop(self, mask = highLevelEventMask, timeout = 1*60):
# Note: this is not the right way to run an event loop in OSX or
# even "recent" versions of MacOS9. This is however code that has
# proven itself.
# Remove the funny -psn_xxx_xxx argument
if len(sys.argv) > 1 and sys.argv[1][:4] == '-psn':
del sys.argv[1]
stoptime = Evt.TickCount() + timeout
while not self.quitting and Evt.TickCount() < stoptime:
self._dooneevent(mask, timeout)
if not self.quitting:
print "argvemulator: timeout waiting for arguments"
self.close()
def _dooneevent(self, mask = highLevelEventMask, timeout = 1*60):
got, event = Evt.WaitNextEvent(mask, timeout)
if got:
self._lowlevelhandler(event)
def _lowlevelhandler(self, event):
what, message, when, where, modifiers = event
h, v = where
if what == kHighLevelEvent:
try:
AE.AEProcessAppleEvent(event)
except AE.Error, err:
msg = "High Level Event: %r %r" % (hex(message),
hex(h | (v<<16)))
print 'AE error: ', err
print 'in', msg
traceback.print_exc()
return
else:
print "Unhandled event:", event
def _quit(self):
self.quitting = 1
def __runapp(self, requestevent, replyevent):
self._quit()
def __openfiles(self, requestevent, replyevent):
try:
listdesc = requestevent.AEGetParamDesc(keyDirectObject,
typeAEList)
for i in range(listdesc.AECountItems()):
aliasdesc = listdesc.AEGetNthDesc(i+1, typeAlias)[1]
alias = File.Alias(rawdata=aliasdesc.data)
fsref = alias.FSResolveAlias(None)[0]
pathname = fsref.as_pathname()
sys.argv.append(pathname)
except Exception, e:
print "argvemulator.py warning: can't unpack an open document event"
import traceback
traceback.print_exc()
self._quit()
def __geturl(self, requestevent, replyevent):
try:
listdesc = requestevent.AEGetParamDesc(keyDirectObject, typeAEList)
for i in range(listdesc.AECountItems()):
desc = listdesc.AEGetNthDesc(i+1, typeChar)[1]
#url = desc.data.decode('utf8')
url = desc.data
sys.argv.append(url)
except Exception, e:
print "argvemulator.py warning: can't unpack a GetURL event"
import traceback
traceback.print_exc()
self._quit()
return ArgvCollector()
def _argv_emulation():
import sys
# only use if started by LaunchServices
for arg in sys.argv[1:]:
if arg.startswith('-psn'):
_get_argvemulator().mainloop()
break
_argv_emulation()
def _chdir_resource():
import os
os.chdir(os.environ['RESOURCEPATH'])
_chdir_resource()
def _disable_linecache():
import linecache
def fake_getline(*args, **kwargs):
return ''
linecache.orig_getline = linecache.getline
linecache.getline = fake_getline
_disable_linecache()
def _run(*scripts):
global __file__
import os, sys, site
sys.frozen = 'macosx_app'
base = os.environ['RESOURCEPATH']
site.addsitedir(base)
site.addsitedir(os.path.join(base, 'Python', 'site-packages'))
if not scripts:
import __main__
for script in scripts:
path = os.path.join(base, script)
sys.argv[0] = __file__ = path
execfile(path, globals(), globals())
_run('RJC-1000.py')
|
import bleach
from django.contrib.contenttypes.models import ContentType
from django.core import urlresolvers
from django.db import models
from django.utils.formats import date_format
from django.utils.translation import ugettext as _
from django.utils import timezone
from django_countries.fields import CountryField
class ClanMemberManager(models.Manager):
"""
This manager returns ClanMember objects whose is_member property is set to True
"""
def get_queryset(self):
return super(ClanMemberManager, self).get_queryset().filter(is_member=True)
class ClanMember(models.Model):
"""Model for clan member"""
# If player's score is of this value - we know they are a clan member, but unranked.
SCORE_UNRANKED = -1
RACE_TERRAN = 'TERRAN'
RACE_ZERG = 'ZERG'
RACE_PROTOSS = 'PROTOSS'
RACE_RANDOM = 'RANDOM'
# For how many days after a member is added is it's status protected from being changed by management jobs.
STATUS_PROTECTION_DAYS = 3
name = models.CharField('player name', max_length=50, help_text=_(
'In the Battle.net URL http://eu.battle.net/sc2/en/profile/3568824/1/pundurs/, '
'<i>pundurs</i> is the player name.'
))
bnet_id = models.IntegerField('battle.net ID', help_text=_(
'In the Battle.net URL http://eu.battle.net/sc2/en/profile/3568824/1/pundurs/, '
'<i>3568824</i> is the Battle.net ID.'
))
region = models.IntegerField('region', default=1, help_text=_(
'In the Battle.net URL http://eu.battle.net/sc2/en/profile/3568824/1/pundurs/, '
'<i>1</i> is the region. It\'s <i>1</i> for most accounts.'
))
ladder_name = models.CharField('division', max_length=50, editable=False, blank=True)
ladder_id = models.IntegerField('division id', editable=False, default=0)
country = CountryField(blank_label=_('(select country)'), blank=True)
league = models.CharField('league', editable=False, max_length=12, choices=(
('BRONZE', _('Bronze')),
('SILVER', _('Silver')),
('GOLD', _('Gold')),
('PLATINUM', _('Platinum')),
('DIAMOND', _('Diamond')),
('MASTER', _('Masters')),
('GRANDMASTER', _('Grandmasters')),
))
race = models.CharField('race', editable=False, max_length=10, choices=(
(RACE_TERRAN, _('Terran')),
(RACE_ZERG, _('Zerg')),
(RACE_PROTOSS, _('Protoss')),
(RACE_RANDOM, _('Random')),
))
skype_id = models.CharField('skype', max_length=100, blank=True)
last_game = models.DateField('last game', auto_now_add=True, editable=False)
wins = models.IntegerField('wins', editable=False, default=0)
losses = models.IntegerField('losses', editable=False, default=0)
score = models.IntegerField('ladder points', editable=False, default=SCORE_UNRANKED)
rank = models.IntegerField('rank in division', editable=False, default=100)
join_date = models.DateField('date joined', auto_now_add=True)
practice_register = models.CharField('practice register', max_length=1, choices=(
('Y', _('Yes')),
('N', _('No (No excuse)')),
('E', _('Explained')),
), default='Y')
strikes = models.IntegerField('amount of strikes', default=0)
notes = models.TextField('notes', blank=True, help_text=_(
'You can use HTML. Allowed tags - %s.<br> Links will be made clickable automatically.' % ', '.join(['<' + tag + '>' for tag in bleach.ALLOWED_TAGS])
))
twitch_username = models.CharField('Twitch username', max_length=100, blank=True)
is_member = models.BooleanField('is clan member', default=True, help_text=_(
'Uncheck this instead of deleting the member from the database, so you don\'t lose the extra data you added '
'which you might need again in case the member re-joins the clan.'
))
# Member won't be affected by sync with Nios.kr.
name_locked = models.BooleanField('is name locked', default=False, help_text=_(
'If this is enabled, the player\'s name won\'t be changed by sync jobs.<br><br>Example use case: Nios.kr lists '
'a member with an old name, but you want to specify the new one, so the stats sync works.'
))
membership_status_locked = models.BooleanField('is membership status locked', default=False, help_text=_(
'If this is enabled, the player\'s status won\'t be changed by sync jobs.<br><br>Example use case: Nios.kr '
'lists a member who actually isn\'t in the clan anymore, but keeps getting listed as one.<br> '
'<strong>NOTE:</strong> You most likely won\'t need to use this if you add a member manually, since '
'their status is locked for ' + str(STATUS_PROTECTION_DAYS) + ' days allowing Nios.kr to catch up!'
))
is_staff = models.BooleanField('is staff', default=False, help_text=_(
'Used for informative purposes only.'
))
rankedftw_teamid = models.IntegerField(_('RankedFTW team id'), default=0, editable=False)
objects = models.Manager()
clanmembers = ClanMemberManager()
@property
def is_unranked(self):
return self.score == self.SCORE_UNRANKED
@property
def winrate(self):
if self.wins == 0 and self.losses == 0:
return 0
return round(100 / (self.wins + self.losses) * self.wins, 2)
@property
def is_winrate_positive(self):
return self.winrate > 50
@property
def total_games(self):
return self.wins + self.losses
@property
def days_since_last_game(self):
return (timezone.now().date() - self.last_game).days
@property
def bnet_profile_url(self):
return 'http://eu.battle.net/sc2/en/profile/{}/{}/{}/'.format(self.bnet_id, self.region, self.name)
@property
def bnet_ladder_url(self):
return 'http://eu.battle.net/sc2/en/profile/{}/{}/{}/ladder/{}#current-rank'.format(
self.bnet_id, self.region, self.name, self.ladder_id
)
@property
def rankedftw_url(self):
if not self.rankedftw_teamid:
return ''
return 'http://www.rankedftw.com/ladder/lotv/1v1/ladder-rank/?team={}'.format(
self.rankedftw_teamid
)
@property
def rankedftw_graph_url(self):
if not self.rankedftw_teamid:
return ''
return 'http://www.rankedftw.com/team/{}/'.format(
self.rankedftw_teamid
)
@property
def twitch_url(self):
if not self.twitch_username:
return ''
return 'http://twitch.tv/{}/'.format(self.twitch_username)
@property
def get_admin_url(self):
# Get admin URL for specific object, AdminSite independent.
content_type = ContentType.objects.get_for_model(self.__class__)
return urlresolvers.reverse(
"admin:%s_%s_change" % (content_type.app_label, content_type.model), args=(self.id,)
)
@property
def bleached_notes(self):
linkified = bleach.linkify(self.notes)
return bleach.clean(linkified)
def __str__(self):
return self.name
def serialize(self):
"""
Returns JSON serializable dict of instance
"""
return dict(
name=self.name,
bnet_id=self.bnet_id,
region=self.region,
ranked=not self.is_unranked,
ladder_name=self.ladder_name,
ladder_id=self.ladder_id,
country=self.get_country_display(),
league=self.get_league_display(),
race=self.get_race_display(),
last_game=date_format(self.last_game, 'SHORT_DATE_FORMAT'),
wins=self.wins,
losses=self.losses,
score=self.score,
rank=self.rank,
join_date=date_format(self.join_date, 'SHORT_DATE_FORMAT'),
winrate=self.winrate,
total_games=self.total_games,
bnet_profile_url=self.bnet_profile_url,
twitch_username=self.twitch_username,
twitch_url=self.twitch_url,
rankedftw_url=self.rankedftw_url,
rankedftw_graph_url=self.rankedftw_graph_url
)
class Meta:
verbose_name = 'player'
verbose_name_plural = 'players'
class SyncLog(models.Model):
""" A log entry for sync events triggered by management jobs"""
# Identifiers for actions
CLAN_MEMBER_SYNC = 'CM'
CLAN_MEMBER_DETAIL_SYNC = 'CMD'
action = models.CharField('identifier', max_length=3, choices=(
(CLAN_MEMBER_SYNC, _('Clan member sync')),
(CLAN_MEMBER_DETAIL_SYNC, _('Clan member detail sync')),
))
time = models.DateTimeField('time', auto_now_add=True)
notes = models.TextField('notes', blank=True)
success = models.BooleanField('was operation successful?', default=False)
class Meta:
verbose_name = 'sync event'
verbose_name_plural = 'sync events'
def __str__(self):
return self.get_action_display()
class ClanWar(models.Model):
date = models.DateTimeField(_('Date and time'), help_text=_(u'The date and time have to be entered in UTC!'))
opponent_name = models.CharField(_('Opponent'), max_length=50)
game_channel = models.CharField(_('In game channel'), blank=True, max_length=50)
players = models.ManyToManyField(
ClanMember, verbose_name=_('Players'), through='ClanWarPlayer'
)
notes = models.TextField(_('Notes'), blank=True)
class Meta:
verbose_name = 'clan war'
verbose_name_plural = 'clan wars'
ordering = ('-date',)
def __str__(self):
return '{} - {}'.format(date_format(self.date, 'SHORT_DATETIME_FORMAT'), self.opponent_name)
@property
def get_admin_url(self):
# Get admin URL for specific object, AdminSite independent.
content_type = ContentType.objects.get_for_model(self.__class__)
return urlresolvers.reverse(
"admin:%s_%s_change" % (content_type.app_label, content_type.model), args=(self.id,)
)
def serialize(self):
"""
Returns JSON serializable dict of instance
"""
return dict(
id=self.id,
datetime=date_format(self.date, 'SHORT_DATETIME_FORMAT'),
datetime_timestamp=self.date.timestamp(),
opponent=self.opponent_name,
ingame_channel=self.game_channel,
players=[p.serialize() for p in self.players.all()],
notes=self.notes
)
class ClanWarPlayer(models.Model):
player = models.ForeignKey(ClanMember, verbose_name=_('Player'), limit_choices_to={'is_member': True})
clanwar = models.ForeignKey(ClanWar, verbose_name=_('Clan war'))
class Meta:
verbose_name = _('Clan war player')
verbose_name_plural = _('Clan war players')
unique_together = ('clanwar', 'player')
|
import os
import sys
use_verbose_logging = 'verbose' in sys.argv
def sh(cmd):
if use_verbose_logging:
print(cmd)
return os.system(cmd)
def compile_test(test):
sh('gcc -std=gnu99 -Wall -Wextra -I../include -ffreestanding -nostdlib -c {test}.c -o {test}.o'.format(test=test))
def link_test(test):
sh('ld main.o {test}.o ../../build/libc_host/libnorbyc.a -o {test}.bin'.format(test=test))
def run_test(test):
print('{test}... '.format(test=test), end='')
return os.system('./{test}.bin'.format(test=test))
def cleanup_after_test(test):
for extension in ['.o', '.bin']:
os.remove(test + extension)
tests = [item[:-2] for item in os.listdir('.') if item.endswith('.c') and os.path.isfile(item)]
print('\033[1m+-------------------------------------+')
print( '| Starting Norby C Library Test Suite |')
print( '+-------------------------------------+\033[0m')
sh('nasm main.asm -felf64 -o main.o')
failure_count = 0
for test in tests:
compile_test(test)
link_test(test)
if run_test(test) != 0:
failure_count += 1
print('\033[91mFAILURE!\033[0m')
else:
print('\033[92mSUCCESS!\033[0m')
cleanup_after_test(test)
exit(failure_count)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.