code
stringlengths 1
199k
|
|---|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_vpn_ipsec_phase1
short_description: Configure VPN remote gateway in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify vpn_ipsec feature and phase1 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
vpn_ipsec_phase1:
description:
- Configure VPN remote gateway.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
acct_verify:
description:
- Enable/disable verification of RADIUS accounting record.
type: str
choices:
- enable
- disable
add_gw_route:
description:
- Enable/disable automatically add a route to the remote gateway.
type: str
choices:
- enable
- disable
add_route:
description:
- Enable/disable control addition of a route to peer destination selector.
type: str
choices:
- disable
- enable
assign_ip:
description:
- Enable/disable assignment of IP to IPsec interface via configuration method.
type: str
choices:
- disable
- enable
assign_ip_from:
description:
- Method by which the IP address will be assigned.
type: str
choices:
- range
- usrgrp
- dhcp
- name
authmethod:
description:
- Authentication method.
type: str
choices:
- psk
- signature
authmethod_remote:
description:
- Authentication method (remote side).
type: str
choices:
- psk
- signature
authpasswd:
description:
- XAuth password (max 35 characters).
type: str
authusr:
description:
- XAuth user name.
type: str
authusrgrp:
description:
- Authentication user group. Source user.group.name.
type: str
auto_negotiate:
description:
- Enable/disable automatic initiation of IKE SA negotiation.
type: str
choices:
- enable
- disable
backup_gateway:
description:
- Instruct unity clients about the backup gateway address(es).
type: list
suboptions:
address:
description:
- Address of backup gateway.
required: true
type: str
banner:
description:
- Message that unity client should display after connecting.
type: str
cert_id_validation:
description:
- Enable/disable cross validation of peer ID and the identity in the peer's certificate as specified in RFC 4945.
type: str
choices:
- enable
- disable
certificate:
description:
- Names of up to 4 signed personal certificates.
type: list
suboptions:
name:
description:
- Certificate name. Source vpn.certificate.local.name.
required: true
type: str
childless_ike:
description:
- Enable/disable childless IKEv2 initiation (RFC 6023).
type: str
choices:
- enable
- disable
client_auto_negotiate:
description:
- Enable/disable allowing the VPN client to bring up the tunnel when there is no traffic.
type: str
choices:
- disable
- enable
client_keep_alive:
description:
- Enable/disable allowing the VPN client to keep the tunnel up when there is no traffic.
type: str
choices:
- disable
- enable
comments:
description:
- Comment.
type: str
dhgrp:
description:
- DH group.
type: str
choices:
- 1
- 2
- 5
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 27
- 28
- 29
- 30
- 31
digital_signature_auth:
description:
- Enable/disable IKEv2 Digital Signature Authentication (RFC 7427).
type: str
choices:
- enable
- disable
distance:
description:
- Distance for routes added by IKE (1 - 255).
type: int
dns_mode:
description:
- DNS server mode.
type: str
choices:
- manual
- auto
domain:
description:
- Instruct unity clients about the default DNS domain.
type: str
dpd:
description:
- Dead Peer Detection mode.
type: str
choices:
- disable
- on-idle
- on-demand
dpd_retrycount:
description:
- Number of DPD retry attempts.
type: int
dpd_retryinterval:
description:
- DPD retry interval.
type: str
eap:
description:
- Enable/disable IKEv2 EAP authentication.
type: str
choices:
- enable
- disable
eap_identity:
description:
- IKEv2 EAP peer identity type.
type: str
choices:
- use-id-payload
- send-request
enforce_unique_id:
description:
- Enable/disable peer ID uniqueness check.
type: str
choices:
- disable
- keep-new
- keep-old
forticlient_enforcement:
description:
- Enable/disable FortiClient enforcement.
type: str
choices:
- enable
- disable
fragmentation:
description:
- Enable/disable fragment IKE message on re-transmission.
type: str
choices:
- enable
- disable
fragmentation_mtu:
description:
- IKE fragmentation MTU (500 - 16000).
type: int
group_authentication:
description:
- Enable/disable IKEv2 IDi group authentication.
type: str
choices:
- enable
- disable
group_authentication_secret:
description:
- Password for IKEv2 IDi group authentication. (ASCII string or hexadecimal indicated by a leading 0x.)
type: str
ha_sync_esp_seqno:
description:
- Enable/disable sequence number jump ahead for IPsec HA.
type: str
choices:
- enable
- disable
idle_timeout:
description:
- Enable/disable IPsec tunnel idle timeout.
type: str
choices:
- enable
- disable
idle_timeoutinterval:
description:
- IPsec tunnel idle timeout in minutes (5 - 43200).
type: int
ike_version:
description:
- IKE protocol version.
type: str
choices:
- 1
- 2
include_local_lan:
description:
- Enable/disable allow local LAN access on unity clients.
type: str
choices:
- disable
- enable
interface:
description:
- Local physical, aggregate, or VLAN outgoing interface. Source system.interface.name.
type: str
ipv4_dns_server1:
description:
- IPv4 DNS server 1.
type: str
ipv4_dns_server2:
description:
- IPv4 DNS server 2.
type: str
ipv4_dns_server3:
description:
- IPv4 DNS server 3.
type: str
ipv4_end_ip:
description:
- End of IPv4 range.
type: str
ipv4_exclude_range:
description:
- Configuration Method IPv4 exclude ranges.
type: list
suboptions:
end_ip:
description:
- End of IPv4 exclusive range.
type: str
id:
description:
- ID.
required: true
type: int
start_ip:
description:
- Start of IPv4 exclusive range.
type: str
ipv4_name:
description:
- IPv4 address name. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_netmask:
description:
- IPv4 Netmask.
type: str
ipv4_split_exclude:
description:
- IPv4 subnets that should not be sent over the IPsec tunnel. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_split_include:
description:
- IPv4 split-include subnets. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_start_ip:
description:
- Start of IPv4 range.
type: str
ipv4_wins_server1:
description:
- WINS server 1.
type: str
ipv4_wins_server2:
description:
- WINS server 2.
type: str
ipv6_dns_server1:
description:
- IPv6 DNS server 1.
type: str
ipv6_dns_server2:
description:
- IPv6 DNS server 2.
type: str
ipv6_dns_server3:
description:
- IPv6 DNS server 3.
type: str
ipv6_end_ip:
description:
- End of IPv6 range.
type: str
ipv6_exclude_range:
description:
- Configuration method IPv6 exclude ranges.
type: list
suboptions:
end_ip:
description:
- End of IPv6 exclusive range.
type: str
id:
description:
- ID.
required: true
type: int
start_ip:
description:
- Start of IPv6 exclusive range.
type: str
ipv6_name:
description:
- IPv6 address name. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_prefix:
description:
- IPv6 prefix.
type: int
ipv6_split_exclude:
description:
- IPv6 subnets that should not be sent over the IPsec tunnel. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_split_include:
description:
- IPv6 split-include subnets. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_start_ip:
description:
- Start of IPv6 range.
type: str
keepalive:
description:
- NAT-T keep alive interval.
type: int
keylife:
description:
- Time to wait in seconds before phase 1 encryption key expires.
type: int
local_gw:
description:
- Local VPN gateway.
type: str
localid:
description:
- Local ID.
type: str
localid_type:
description:
- Local ID type.
type: str
choices:
- auto
- fqdn
- user-fqdn
- keyid
- address
- asn1dn
mesh_selector_type:
description:
- Add selectors containing subsets of the configuration depending on traffic.
type: str
choices:
- disable
- subnet
- host
mode:
description:
- ID protection mode used to establish a secure channel.
type: str
choices:
- aggressive
- main
mode_cfg:
description:
- Enable/disable configuration method.
type: str
choices:
- disable
- enable
name:
description:
- IPsec remote gateway name.
required: true
type: str
nattraversal:
description:
- Enable/disable NAT traversal.
type: str
choices:
- enable
- disable
- forced
negotiate_timeout:
description:
- IKE SA negotiation timeout in seconds (1 - 300).
type: int
peer:
description:
- Accept this peer certificate. Source user.peer.name.
type: str
peergrp:
description:
- Accept this peer certificate group. Source user.peergrp.name.
type: str
peerid:
description:
- Accept this peer identity.
type: str
peertype:
description:
- Accept this peer type.
type: str
choices:
- any
- one
- dialup
- peer
- peergrp
ppk:
description:
- Enable/disable IKEv2 Postquantum Preshared Key (PPK).
type: str
choices:
- disable
- allow
- require
ppk_identity:
description:
- IKEv2 Postquantum Preshared Key Identity.
type: str
ppk_secret:
description:
- IKEv2 Postquantum Preshared Key (ASCII string or hexadecimal encoded with a leading 0x).
type: str
priority:
description:
- Priority for routes added by IKE (0 - 4294967295).
type: int
proposal:
description:
- Phase1 proposal.
type: str
choices:
- des-md5
- des-sha1
- des-sha256
- des-sha384
- des-sha512
psksecret:
description:
- Pre-shared secret for PSK authentication (ASCII string or hexadecimal encoded with a leading 0x).
type: str
psksecret_remote:
description:
- Pre-shared secret for remote side PSK authentication (ASCII string or hexadecimal encoded with a leading 0x).
type: str
reauth:
description:
- Enable/disable re-authentication upon IKE SA lifetime expiration.
type: str
choices:
- disable
- enable
rekey:
description:
- Enable/disable phase1 rekey.
type: str
choices:
- enable
- disable
remote_gw:
description:
- Remote VPN gateway.
type: str
remotegw_ddns:
description:
- Domain name of remote gateway (eg. name.DDNS.com).
type: str
rsa_signature_format:
description:
- Digital Signature Authentication RSA signature format.
type: str
choices:
- pkcs1
- pss
save_password:
description:
- Enable/disable saving XAuth username and password on VPN clients.
type: str
choices:
- disable
- enable
send_cert_chain:
description:
- Enable/disable sending certificate chain.
type: str
choices:
- enable
- disable
signature_hash_alg:
description:
- Digital Signature Authentication hash algorithms.
type: str
choices:
- sha1
- sha2-256
- sha2-384
- sha2-512
split_include_service:
description:
- Split-include services. Source firewall.service.group.name firewall.service.custom.name.
type: str
suite_b:
description:
- Use Suite-B.
type: str
choices:
- disable
- suite-b-gcm-128
- suite-b-gcm-256
type:
description:
- Remote gateway type.
type: str
choices:
- static
- dynamic
- ddns
unity_support:
description:
- Enable/disable support for Cisco UNITY Configuration Method extensions.
type: str
choices:
- disable
- enable
usrgrp:
description:
- User group name for dialup peers. Source user.group.name.
type: str
wizard_type:
description:
- GUI VPN Wizard Type.
type: str
choices:
- custom
- dialup-forticlient
- dialup-ios
- dialup-android
- dialup-windows
- dialup-cisco
- static-fortigate
- dialup-fortigate
- static-cisco
- dialup-cisco-fw
xauthtype:
description:
- XAuth type.
type: str
choices:
- disable
- client
- pap
- chap
- auto
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure VPN remote gateway.
fortios_vpn_ipsec_phase1:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
vpn_ipsec_phase1:
acct_verify: "enable"
add_gw_route: "enable"
add_route: "disable"
assign_ip: "disable"
assign_ip_from: "range"
authmethod: "psk"
authmethod_remote: "psk"
authpasswd: "<your_own_value>"
authusr: "<your_own_value>"
authusrgrp: "<your_own_value> (source user.group.name)"
auto_negotiate: "enable"
backup_gateway:
-
address: "<your_own_value>"
banner: "<your_own_value>"
cert_id_validation: "enable"
certificate:
-
name: "default_name_19 (source vpn.certificate.local.name)"
childless_ike: "enable"
client_auto_negotiate: "disable"
client_keep_alive: "disable"
comments: "<your_own_value>"
dhgrp: "1"
digital_signature_auth: "enable"
distance: "26"
dns_mode: "manual"
domain: "<your_own_value>"
dpd: "disable"
dpd_retrycount: "30"
dpd_retryinterval: "<your_own_value>"
eap: "enable"
eap_identity: "use-id-payload"
enforce_unique_id: "disable"
forticlient_enforcement: "enable"
fragmentation: "enable"
fragmentation_mtu: "37"
group_authentication: "enable"
group_authentication_secret: "<your_own_value>"
ha_sync_esp_seqno: "enable"
idle_timeout: "enable"
idle_timeoutinterval: "42"
ike_version: "1"
include_local_lan: "disable"
interface: "<your_own_value> (source system.interface.name)"
ipv4_dns_server1: "<your_own_value>"
ipv4_dns_server2: "<your_own_value>"
ipv4_dns_server3: "<your_own_value>"
ipv4_end_ip: "<your_own_value>"
ipv4_exclude_range:
-
end_ip: "<your_own_value>"
id: "52"
start_ip: "<your_own_value>"
ipv4_name: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_netmask: "<your_own_value>"
ipv4_split_exclude: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_split_include: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_start_ip: "<your_own_value>"
ipv4_wins_server1: "<your_own_value>"
ipv4_wins_server2: "<your_own_value>"
ipv6_dns_server1: "<your_own_value>"
ipv6_dns_server2: "<your_own_value>"
ipv6_dns_server3: "<your_own_value>"
ipv6_end_ip: "<your_own_value>"
ipv6_exclude_range:
-
end_ip: "<your_own_value>"
id: "67"
start_ip: "<your_own_value>"
ipv6_name: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_prefix: "70"
ipv6_split_exclude: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_split_include: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_start_ip: "<your_own_value>"
keepalive: "74"
keylife: "75"
local_gw: "<your_own_value>"
localid: "<your_own_value>"
localid_type: "auto"
mesh_selector_type: "disable"
mode: "aggressive"
mode_cfg: "disable"
name: "default_name_82"
nattraversal: "enable"
negotiate_timeout: "84"
peer: "<your_own_value> (source user.peer.name)"
peergrp: "<your_own_value> (source user.peergrp.name)"
peerid: "<your_own_value>"
peertype: "any"
ppk: "disable"
ppk_identity: "<your_own_value>"
ppk_secret: "<your_own_value>"
priority: "92"
proposal: "des-md5"
psksecret: "<your_own_value>"
psksecret_remote: "<your_own_value>"
reauth: "disable"
rekey: "enable"
remote_gw: "<your_own_value>"
remotegw_ddns: "<your_own_value>"
rsa_signature_format: "pkcs1"
save_password: "disable"
send_cert_chain: "enable"
signature_hash_alg: "sha1"
split_include_service: "<your_own_value> (source firewall.service.group.name firewall.service.custom.name)"
suite_b: "disable"
type: "static"
unity_support: "disable"
usrgrp: "<your_own_value> (source user.group.name)"
wizard_type: "custom"
xauthtype: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_vpn_ipsec_phase1_data(json):
option_list = ['acct_verify', 'add_gw_route', 'add_route',
'assign_ip', 'assign_ip_from', 'authmethod',
'authmethod_remote', 'authpasswd', 'authusr',
'authusrgrp', 'auto_negotiate', 'backup_gateway',
'banner', 'cert_id_validation', 'certificate',
'childless_ike', 'client_auto_negotiate', 'client_keep_alive',
'comments', 'dhgrp', 'digital_signature_auth',
'distance', 'dns_mode', 'domain',
'dpd', 'dpd_retrycount', 'dpd_retryinterval',
'eap', 'eap_identity', 'enforce_unique_id',
'forticlient_enforcement', 'fragmentation', 'fragmentation_mtu',
'group_authentication', 'group_authentication_secret', 'ha_sync_esp_seqno',
'idle_timeout', 'idle_timeoutinterval', 'ike_version',
'include_local_lan', 'interface', 'ipv4_dns_server1',
'ipv4_dns_server2', 'ipv4_dns_server3', 'ipv4_end_ip',
'ipv4_exclude_range', 'ipv4_name', 'ipv4_netmask',
'ipv4_split_exclude', 'ipv4_split_include', 'ipv4_start_ip',
'ipv4_wins_server1', 'ipv4_wins_server2', 'ipv6_dns_server1',
'ipv6_dns_server2', 'ipv6_dns_server3', 'ipv6_end_ip',
'ipv6_exclude_range', 'ipv6_name', 'ipv6_prefix',
'ipv6_split_exclude', 'ipv6_split_include', 'ipv6_start_ip',
'keepalive', 'keylife', 'local_gw',
'localid', 'localid_type', 'mesh_selector_type',
'mode', 'mode_cfg', 'name',
'nattraversal', 'negotiate_timeout', 'peer',
'peergrp', 'peerid', 'peertype',
'ppk', 'ppk_identity', 'ppk_secret',
'priority', 'proposal', 'psksecret',
'psksecret_remote', 'reauth', 'rekey',
'remote_gw', 'remotegw_ddns', 'rsa_signature_format',
'save_password', 'send_cert_chain', 'signature_hash_alg',
'split_include_service', 'suite_b', 'type',
'unity_support', 'usrgrp', 'wizard_type',
'xauthtype']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def vpn_ipsec_phase1(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['vpn_ipsec_phase1'] and data['vpn_ipsec_phase1']:
state = data['vpn_ipsec_phase1']['state']
else:
state = True
vpn_ipsec_phase1_data = data['vpn_ipsec_phase1']
filtered_data = underscore_to_hyphen(filter_vpn_ipsec_phase1_data(vpn_ipsec_phase1_data))
if state == "present":
return fos.set('vpn.ipsec',
'phase1',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('vpn.ipsec',
'phase1',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_vpn_ipsec(data, fos):
if data['vpn_ipsec_phase1']:
resp = vpn_ipsec_phase1(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"vpn_ipsec_phase1": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"acct_verify": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"add_gw_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"add_route": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"assign_ip": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"assign_ip_from": {"required": False, "type": "str",
"choices": ["range", "usrgrp", "dhcp",
"name"]},
"authmethod": {"required": False, "type": "str",
"choices": ["psk", "signature"]},
"authmethod_remote": {"required": False, "type": "str",
"choices": ["psk", "signature"]},
"authpasswd": {"required": False, "type": "str"},
"authusr": {"required": False, "type": "str"},
"authusrgrp": {"required": False, "type": "str"},
"auto_negotiate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"backup_gateway": {"required": False, "type": "list",
"options": {
"address": {"required": True, "type": "str"}
}},
"banner": {"required": False, "type": "str"},
"cert_id_validation": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"certificate": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"childless_ike": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client_auto_negotiate": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"client_keep_alive": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"comments": {"required": False, "type": "str"},
"dhgrp": {"required": False, "type": "str",
"choices": ["1", "2", "5",
"14", "15", "16",
"17", "18", "19",
"20", "21", "27",
"28", "29", "30",
"31"]},
"digital_signature_auth": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"distance": {"required": False, "type": "int"},
"dns_mode": {"required": False, "type": "str",
"choices": ["manual", "auto"]},
"domain": {"required": False, "type": "str"},
"dpd": {"required": False, "type": "str",
"choices": ["disable", "on-idle", "on-demand"]},
"dpd_retrycount": {"required": False, "type": "int"},
"dpd_retryinterval": {"required": False, "type": "str"},
"eap": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"eap_identity": {"required": False, "type": "str",
"choices": ["use-id-payload", "send-request"]},
"enforce_unique_id": {"required": False, "type": "str",
"choices": ["disable", "keep-new", "keep-old"]},
"forticlient_enforcement": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fragmentation": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fragmentation_mtu": {"required": False, "type": "int"},
"group_authentication": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"group_authentication_secret": {"required": False, "type": "str"},
"ha_sync_esp_seqno": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"idle_timeout": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"idle_timeoutinterval": {"required": False, "type": "int"},
"ike_version": {"required": False, "type": "str",
"choices": ["1", "2"]},
"include_local_lan": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"interface": {"required": False, "type": "str"},
"ipv4_dns_server1": {"required": False, "type": "str"},
"ipv4_dns_server2": {"required": False, "type": "str"},
"ipv4_dns_server3": {"required": False, "type": "str"},
"ipv4_end_ip": {"required": False, "type": "str"},
"ipv4_exclude_range": {"required": False, "type": "list",
"options": {
"end_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"start_ip": {"required": False, "type": "str"}
}},
"ipv4_name": {"required": False, "type": "str"},
"ipv4_netmask": {"required": False, "type": "str"},
"ipv4_split_exclude": {"required": False, "type": "str"},
"ipv4_split_include": {"required": False, "type": "str"},
"ipv4_start_ip": {"required": False, "type": "str"},
"ipv4_wins_server1": {"required": False, "type": "str"},
"ipv4_wins_server2": {"required": False, "type": "str"},
"ipv6_dns_server1": {"required": False, "type": "str"},
"ipv6_dns_server2": {"required": False, "type": "str"},
"ipv6_dns_server3": {"required": False, "type": "str"},
"ipv6_end_ip": {"required": False, "type": "str"},
"ipv6_exclude_range": {"required": False, "type": "list",
"options": {
"end_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"start_ip": {"required": False, "type": "str"}
}},
"ipv6_name": {"required": False, "type": "str"},
"ipv6_prefix": {"required": False, "type": "int"},
"ipv6_split_exclude": {"required": False, "type": "str"},
"ipv6_split_include": {"required": False, "type": "str"},
"ipv6_start_ip": {"required": False, "type": "str"},
"keepalive": {"required": False, "type": "int"},
"keylife": {"required": False, "type": "int"},
"local_gw": {"required": False, "type": "str"},
"localid": {"required": False, "type": "str"},
"localid_type": {"required": False, "type": "str",
"choices": ["auto", "fqdn", "user-fqdn",
"keyid", "address", "asn1dn"]},
"mesh_selector_type": {"required": False, "type": "str",
"choices": ["disable", "subnet", "host"]},
"mode": {"required": False, "type": "str",
"choices": ["aggressive", "main"]},
"mode_cfg": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"name": {"required": True, "type": "str"},
"nattraversal": {"required": False, "type": "str",
"choices": ["enable", "disable", "forced"]},
"negotiate_timeout": {"required": False, "type": "int"},
"peer": {"required": False, "type": "str"},
"peergrp": {"required": False, "type": "str"},
"peerid": {"required": False, "type": "str"},
"peertype": {"required": False, "type": "str",
"choices": ["any", "one", "dialup",
"peer", "peergrp"]},
"ppk": {"required": False, "type": "str",
"choices": ["disable", "allow", "require"]},
"ppk_identity": {"required": False, "type": "str"},
"ppk_secret": {"required": False, "type": "str"},
"priority": {"required": False, "type": "int"},
"proposal": {"required": False, "type": "str",
"choices": ["des-md5", "des-sha1", "des-sha256",
"des-sha384", "des-sha512"]},
"psksecret": {"required": False, "type": "str"},
"psksecret_remote": {"required": False, "type": "str"},
"reauth": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"rekey": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"remote_gw": {"required": False, "type": "str"},
"remotegw_ddns": {"required": False, "type": "str"},
"rsa_signature_format": {"required": False, "type": "str",
"choices": ["pkcs1", "pss"]},
"save_password": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"send_cert_chain": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"signature_hash_alg": {"required": False, "type": "str",
"choices": ["sha1", "sha2-256", "sha2-384",
"sha2-512"]},
"split_include_service": {"required": False, "type": "str"},
"suite_b": {"required": False, "type": "str",
"choices": ["disable", "suite-b-gcm-128", "suite-b-gcm-256"]},
"type": {"required": False, "type": "str",
"choices": ["static", "dynamic", "ddns"]},
"unity_support": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"usrgrp": {"required": False, "type": "str"},
"wizard_type": {"required": False, "type": "str",
"choices": ["custom", "dialup-forticlient", "dialup-ios",
"dialup-android", "dialup-windows", "dialup-cisco",
"static-fortigate", "dialup-fortigate", "static-cisco",
"dialup-cisco-fw"]},
"xauthtype": {"required": False, "type": "str",
"choices": ["disable", "client", "pap",
"chap", "auto"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
"""
Unit tests for the function
:func:`iris.analysis.cartography.rotate_winds`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris.tests as tests
import numpy as np
import numpy.ma as ma
import cartopy.crs as ccrs
from iris.analysis.cartography import rotate_winds, unrotate_pole
from iris.cube import Cube
from iris.coords import DimCoord, AuxCoord
import iris.coord_systems
def uv_cubes(x=None, y=None):
"""Return u, v cubes with a grid in a rotated pole CRS."""
cs = iris.coord_systems.RotatedGeogCS(grid_north_pole_latitude=37.5,
grid_north_pole_longitude=177.5)
if x is None:
x = np.linspace(311.9, 391.1, 6)
if y is None:
y = np.linspace(-23.6, 24.8, 5)
x2d, y2d = np.meshgrid(x, y)
u = 10 * (2 * np.cos(2 * np.deg2rad(x2d) + 3 * np.deg2rad(y2d + 30)) ** 2)
v = 20 * np.cos(6 * np.deg2rad(x2d))
lon = DimCoord(x, standard_name='grid_longitude', units='degrees',
coord_system=cs)
lat = DimCoord(y, standard_name='grid_latitude', units='degrees',
coord_system=cs)
u_cube = Cube(u, standard_name='x_wind', units='m/s')
v_cube = Cube(v, standard_name='y_wind', units='m/s')
for cube in (u_cube, v_cube):
cube.add_dim_coord(lat.copy(), 0)
cube.add_dim_coord(lon.copy(), 1)
return u_cube, v_cube
def uv_cubes_3d(ref_cube, n_realization=3):
"""
Return 3d u, v cubes with a grid in a rotated pole CRS taken from
the provided 2d cube, by adding a realization dimension
coordinate bound to teh zeroth dimension.
"""
lat = ref_cube.coord('grid_latitude')
lon = ref_cube.coord('grid_longitude')
x2d, y2d = np.meshgrid(lon.points, lat.points)
u = 10 * (2 * np.cos(2 * np.deg2rad(x2d) + 3 * np.deg2rad(y2d + 30)) ** 2)
v = 20 * np.cos(6 * np.deg2rad(x2d))
# Multiply slices by factor to give variation over 0th dim.
factor = np.arange(1, n_realization + 1).reshape(n_realization, 1, 1)
u = factor * u
v = factor * v
realization = DimCoord(np.arange(n_realization), 'realization')
u_cube = Cube(u, standard_name='x_wind', units='m/s')
v_cube = Cube(v, standard_name='y_wind', units='m/s')
for cube in (u_cube, v_cube):
cube.add_dim_coord(realization.copy(), 0)
cube.add_dim_coord(lat.copy(), 1)
cube.add_dim_coord(lon.copy(), 2)
return u_cube, v_cube
class TestPrerequisites(tests.IrisTest):
def test_different_coord_systems(self):
u, v = uv_cubes()
v.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
with self.assertRaisesRegexp(
ValueError, 'Coordinates differ between u and v cubes'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_different_xy_coord_systems(self):
u, v = uv_cubes()
u.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
v.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
with self.assertRaisesRegexp(
ValueError,
'Coordinate systems of x and y coordinates differ'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_different_shape(self):
x = np.linspace(311.9, 391.1, 6)
y = np.linspace(-23.6, 24.8, 5)
u, _ = uv_cubes(x, y)
_, v = uv_cubes(x[:-1], y)
with self.assertRaisesRegexp(ValueError, 'same shape'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_xy_dimensionality(self):
u, v = uv_cubes()
# Replace 1d lat with 2d lat.
x = u.coord('grid_longitude').points
y = u.coord('grid_latitude').points
x2d, y2d = np.meshgrid(x, y)
lat_2d = AuxCoord(y2d, 'grid_latitude', units='degrees',
coord_system=u.coord('grid_latitude').coord_system)
for cube in (u, v):
cube.remove_coord('grid_latitude')
cube.add_aux_coord(lat_2d.copy(), (0, 1))
with self.assertRaisesRegexp(
ValueError,
'x and y coordinates must have the same number of dimensions'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_dim_mapping(self):
x = np.linspace(311.9, 391.1, 3)
y = np.linspace(-23.6, 24.8, 3)
u, v = uv_cubes(x, y)
v.transpose()
with self.assertRaisesRegexp(ValueError, 'Dimension mapping'):
rotate_winds(u, v, iris.coord_systems.OSGB())
class TestAnalyticComparison(tests.IrisTest):
@staticmethod
def _unrotate_equation(rotated_lons, rotated_lats,
rotated_us, rotated_vs, pole_lon, pole_lat):
# Perform a rotated-pole 'unrotate winds' transformation on arrays of
# rotated-lat, rotated-lon, u and v.
# This can be defined as an analytic function : cf. UMDP015
# Work out the rotation angles.
lambda_angle = np.radians(pole_lon - 180.0)
phi_angle = np.radians(90.0 - pole_lat)
# Get the locations in true lats+lons.
trueLongitude, trueLatitude = unrotate_pole(rotated_lons,
rotated_lats,
pole_lon,
pole_lat)
# Calculate inter-coordinate rotation coefficients.
cos_rot = (np.cos(np.radians(rotated_lons)) *
np.cos(np.radians(trueLongitude) - lambda_angle) +
np.sin(np.radians(rotated_lons)) *
np.sin(np.radians(trueLongitude) - lambda_angle) *
np.cos(phi_angle))
sin_rot = -((np.sin(np.radians(trueLongitude) - lambda_angle) *
np.sin(phi_angle)) /
np.cos(np.radians(rotated_lats)))
# Matrix-multiply to rotate the vectors.
u_true = rotated_us * cos_rot - rotated_vs * sin_rot
v_true = rotated_vs * cos_rot + rotated_us * sin_rot
return u_true, v_true
def _check_rotated_to_true(self, u_rot, v_rot, target_cs, **kwds):
# Run test calculation (numeric).
u_true, v_true = rotate_winds(u_rot, v_rot, target_cs)
# Perform same calculation via the reference method (equations).
cs_rot = u_rot.coord('grid_longitude').coord_system
pole_lat = cs_rot.grid_north_pole_latitude
pole_lon = cs_rot.grid_north_pole_longitude
rotated_lons = u_rot.coord('grid_longitude').points
rotated_lats = u_rot.coord('grid_latitude').points
rotated_lons_2d, rotated_lats_2d = np.meshgrid(
rotated_lons, rotated_lats)
rotated_u, rotated_v = u_rot.data, v_rot.data
u_ref, v_ref = self._unrotate_equation(rotated_lons_2d,
rotated_lats_2d,
rotated_u, rotated_v,
pole_lon, pole_lat)
# Check that all the numerical results are within given tolerances.
self.assertArrayAllClose(u_true.data, u_ref, **kwds)
self.assertArrayAllClose(v_true.data, v_ref, **kwds)
def test_rotated_to_true__small(self):
# Check for a small field with varying data.
target_cs = iris.coord_systems.GeogCS(6371229)
u_rot, v_rot = uv_cubes()
self._check_rotated_to_true(u_rot, v_rot, target_cs,
rtol=1e-5, atol=0.0005)
def test_rotated_to_true_global(self):
# Check for global fields with various constant wind values
# - constant in the rotated pole system, that is.
# We expect less accuracy where this gets close to the true poles.
target_cs = iris.coord_systems.GeogCS(6371229)
u_rot, v_rot = uv_cubes(x=np.arange(0, 360.0, 15),
y=np.arange(-89, 89, 10))
for vector in ((1, 0), (0, 1), (1, 1), (-3, -1.5)):
u_rot.data[...] = vector[0]
v_rot.data[...] = vector[1]
self._check_rotated_to_true(u_rot, v_rot, target_cs,
rtol=5e-4, atol=5e-4,
err_msg='vector={}'.format(vector))
class TestRotatedToOSGB(tests.IrisTest):
# Define some coordinate ranges for the uv_cubes 'standard' RotatedPole
# system, that exceed the OSGB margins, but not by "too much".
_rp_x_min, _rp_x_max = -5.0, 5.0
_rp_y_min, _rp_y_max = -5.0, 15.0
def _uv_cubes_limited_extent(self):
# Make test cubes suitable for transforming to OSGB, as the standard
# 'uv_cubes' result goes too far outside, leading to errors.
x = np.linspace(self._rp_x_min, self._rp_x_max, 6)
y = np.linspace(self._rp_y_min, self._rp_y_max, 5)
return uv_cubes(x=x, y=y)
def test_name(self):
u, v = self._uv_cubes_limited_extent()
u.rename('bob')
v.rename('alice')
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
self.assertEqual(ut.name(), 'transformed_' + u.name())
self.assertEqual(vt.name(), 'transformed_' + v.name())
def test_new_coords(self):
u, v = self._uv_cubes_limited_extent()
x = u.coord('grid_longitude').points
y = u.coord('grid_latitude').points
x2d, y2d = np.meshgrid(x, y)
src_crs = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
tgt_crs = ccrs.OSGB()
xyz_tran = tgt_crs.transform_points(src_crs, x2d, y2d)
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
points = xyz_tran[..., 0].reshape(x2d.shape)
expected_x = AuxCoord(points,
standard_name='projection_x_coordinate',
units='m',
coord_system=iris.coord_systems.OSGB())
self.assertEqual(ut.coord('projection_x_coordinate'), expected_x)
self.assertEqual(vt.coord('projection_x_coordinate'), expected_x)
points = xyz_tran[..., 1].reshape(y2d.shape)
expected_y = AuxCoord(points,
standard_name='projection_y_coordinate',
units='m',
coord_system=iris.coord_systems.OSGB())
self.assertEqual(ut.coord('projection_y_coordinate'), expected_y)
self.assertEqual(vt.coord('projection_y_coordinate'), expected_y)
def test_new_coords_transposed(self):
u, v = self._uv_cubes_limited_extent()
# Transpose cubes so that cube is in xy order rather than the
# typical yx order of meshgrid.
u.transpose()
v.transpose()
x = u.coord('grid_longitude').points
y = u.coord('grid_latitude').points
x2d, y2d = np.meshgrid(x, y)
src_crs = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
tgt_crs = ccrs.OSGB()
xyz_tran = tgt_crs.transform_points(src_crs, x2d, y2d)
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
points = xyz_tran[..., 0].reshape(x2d.shape)
expected_x = AuxCoord(points,
standard_name='projection_x_coordinate',
units='m',
coord_system=iris.coord_systems.OSGB())
self.assertEqual(ut.coord('projection_x_coordinate'), expected_x)
self.assertEqual(vt.coord('projection_x_coordinate'), expected_x)
points = xyz_tran[..., 1].reshape(y2d.shape)
expected_y = AuxCoord(points,
standard_name='projection_y_coordinate',
units='m',
coord_system=iris.coord_systems.OSGB())
self.assertEqual(ut.coord('projection_y_coordinate'), expected_y)
self.assertEqual(vt.coord('projection_y_coordinate'), expected_y)
# Check dim mapping for 2d coords is yx.
expected_dims = (u.coord_dims('grid_latitude') +
u.coord_dims('grid_longitude'))
self.assertEqual(ut.coord_dims('projection_x_coordinate'),
expected_dims)
self.assertEqual(ut.coord_dims('projection_y_coordinate'),
expected_dims)
self.assertEqual(vt.coord_dims('projection_x_coordinate'),
expected_dims)
self.assertEqual(vt.coord_dims('projection_y_coordinate'),
expected_dims)
def test_orig_coords(self):
u, v = self._uv_cubes_limited_extent()
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
self.assertEqual(u.coord('grid_latitude'), ut.coord('grid_latitude'))
self.assertEqual(v.coord('grid_latitude'), vt.coord('grid_latitude'))
self.assertEqual(u.coord('grid_longitude'), ut.coord('grid_longitude'))
self.assertEqual(v.coord('grid_longitude'), vt.coord('grid_longitude'))
def test_magnitude_preservation(self):
u, v = self._uv_cubes_limited_extent()
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
orig_sq_mag = u.data**2 + v.data**2
res_sq_mag = ut.data**2 + vt.data**2
self.assertArrayAllClose(orig_sq_mag, res_sq_mag, rtol=5e-4)
def test_data_values(self):
u, v = self._uv_cubes_limited_extent()
# Slice out 4 points that lie in and outside OSGB extent.
u = u[1:3, 3:5]
v = v[1:3, 3:5]
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
# Values precalculated and checked.
expected_ut_data = np.array([[0.16285514, 0.35323639],
[1.82650698, 2.62455840]])
expected_vt_data = np.array([[19.88979966, 19.01921346],
[19.88018847, 19.01424281]])
# Compare u and v data values against previously calculated values.
self.assertArrayAllClose(ut.data, expected_ut_data, rtol=1e-5)
self.assertArrayAllClose(vt.data, expected_vt_data, rtol=1e-5)
def test_nd_data(self):
u2d, y2d = self._uv_cubes_limited_extent()
u, v = uv_cubes_3d(u2d)
u = u[:, 1:3, 3:5]
v = v[:, 1:3, 3:5]
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
# Values precalculated and checked (as test_data_values above),
# then scaled by factor [1, 2, 3] along 0th dim (see uv_cubes_3d()).
expected_ut_data = np.array([[0.16285514, 0.35323639],
[1.82650698, 2.62455840]])
expected_vt_data = np.array([[19.88979966, 19.01921346],
[19.88018847, 19.01424281]])
factor = np.array([1, 2, 3]).reshape(3, 1, 1)
expected_ut_data = factor * expected_ut_data
expected_vt_data = factor * expected_vt_data
# Compare u and v data values against previously calculated values.
self.assertArrayAlmostEqual(ut.data, expected_ut_data)
self.assertArrayAlmostEqual(vt.data, expected_vt_data)
def test_transposed(self):
# Test case where the coordinates are not ordered yx in the cube.
u, v = self._uv_cubes_limited_extent()
# Slice out 4 points that lie in and outside OSGB extent.
u = u[1:3, 3:5]
v = v[1:3, 3:5]
# Transpose cubes (in-place)
u.transpose()
v.transpose()
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
# Values precalculated and checked.
expected_ut_data = np.array([[0.16285514, 0.35323639],
[1.82650698, 2.62455840]]).T
expected_vt_data = np.array([[19.88979966, 19.01921346],
[19.88018847, 19.01424281]]).T
# Compare u and v data values against previously calculated values.
self.assertArrayAllClose(ut.data, expected_ut_data, rtol=1e-5)
self.assertArrayAllClose(vt.data, expected_vt_data, rtol=1e-5)
class TestMasking(tests.IrisTest):
def test_rotated_to_osgb(self):
# Rotated Pole data with large extent.
x = np.linspace(311.9, 391.1, 10)
y = np.linspace(-23.6, 24.8, 8)
u, v = uv_cubes(x, y)
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
# Ensure cells with discrepancies in magnitude are masked.
self.assertTrue(ma.isMaskedArray(ut.data))
self.assertTrue(ma.isMaskedArray(vt.data))
# Snapshot of mask with fixed tolerance of atol=2e-3
expected_mask = np.array([[1, 1, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1]], np.bool)
self.assertArrayEqual(expected_mask, ut.data.mask)
self.assertArrayEqual(expected_mask, vt.data.mask)
# Check unmasked values have sufficiently small error in mag.
expected_mag = np.sqrt(u.data**2 + v.data**2)
# Use underlying data to ignore mask in calculation.
res_mag = np.sqrt(ut.data.data**2 + vt.data.data**2)
# Calculate percentage error (note there are no zero magnitudes
# so we can divide safely).
anom = 100.0 * np.abs(res_mag - expected_mag) / expected_mag
self.assertTrue(anom[~ut.data.mask].max() < 0.1)
def test_rotated_to_unrotated(self):
# Suffiently accurate so that no mask is introduced.
u, v = uv_cubes()
ut, vt = rotate_winds(u, v, iris.coord_systems.GeogCS(6371229))
self.assertFalse(ma.isMaskedArray(ut.data))
self.assertFalse(ma.isMaskedArray(vt.data))
class TestRoundTrip(tests.IrisTest):
def test_rotated_to_unrotated(self):
# Check ability to use 2d coords as input.
u, v = uv_cubes()
ut, vt = rotate_winds(u, v, iris.coord_systems.GeogCS(6371229))
# Remove grid lat and lon, leaving 2d projection coords.
ut.remove_coord('grid_latitude')
vt.remove_coord('grid_latitude')
ut.remove_coord('grid_longitude')
vt.remove_coord('grid_longitude')
# Change back.
orig_cs = u.coord('grid_latitude').coord_system
res_u, res_v = rotate_winds(ut, vt, orig_cs)
# Check data values - limited accuracy due to numerical approx.
self.assertArrayAlmostEqual(res_u.data, u.data, decimal=3)
self.assertArrayAlmostEqual(res_v.data, v.data, decimal=3)
# Check coords locations.
x2d, y2d = np.meshgrid(u.coord('grid_longitude').points,
u.coord('grid_latitude').points)
# Shift longitude from 0 to 360 -> -180 to 180.
x2d = np.where(x2d > 180, x2d - 360, x2d)
res_x = res_u.coord('projection_x_coordinate',
coord_system=orig_cs).points
res_y = res_u.coord('projection_y_coordinate',
coord_system=orig_cs).points
self.assertArrayAlmostEqual(res_x, x2d)
self.assertArrayAlmostEqual(res_y, y2d)
res_x = res_v.coord('projection_x_coordinate',
coord_system=orig_cs).points
res_y = res_v.coord('projection_y_coordinate',
coord_system=orig_cs).points
self.assertArrayAlmostEqual(res_x, x2d)
self.assertArrayAlmostEqual(res_y, y2d)
if __name__ == "__main__":
tests.main()
|
import math
EARTH_RADIUS = 6367009
METERS_PER_DEGREE = 111319.0
FEET_PER_METER = 3.2808399
def geographic_distance(loc1, loc2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians,
[loc1.latitude, loc1.longitude,
loc2.latitude, loc2.longitude])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * \
math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.asin(math.sqrt(a))
return EARTH_RADIUS * c
|
"""Support for Roku API emulation."""
import voluptuous as vol
from homeassistant import config_entries, util
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from .binding import EmulatedRoku
from .config_flow import configured_servers
from .const import (
CONF_ADVERTISE_IP, CONF_ADVERTISE_PORT, CONF_HOST_IP, CONF_LISTEN_PORT,
CONF_SERVERS, CONF_UPNP_BIND_MULTICAST, DOMAIN)
SERVER_CONFIG_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_LISTEN_PORT): cv.port,
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_ADVERTISE_IP): cv.string,
vol.Optional(CONF_ADVERTISE_PORT): cv.port,
vol.Optional(CONF_UPNP_BIND_MULTICAST): cv.boolean
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_SERVERS):
vol.All(cv.ensure_list, [SERVER_CONFIG_SCHEMA]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the emulated roku component."""
conf = config.get(DOMAIN)
if conf is None:
return True
existing_servers = configured_servers(hass)
for entry in conf[CONF_SERVERS]:
if entry[CONF_NAME] not in existing_servers:
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN,
context={'source': config_entries.SOURCE_IMPORT},
data=entry
))
return True
async def async_setup_entry(hass, config_entry):
"""Set up an emulated roku server from a config entry."""
config = config_entry.data
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
name = config[CONF_NAME]
listen_port = config[CONF_LISTEN_PORT]
host_ip = config.get(CONF_HOST_IP) or util.get_local_ip()
advertise_ip = config.get(CONF_ADVERTISE_IP)
advertise_port = config.get(CONF_ADVERTISE_PORT)
upnp_bind_multicast = config.get(CONF_UPNP_BIND_MULTICAST)
server = EmulatedRoku(hass, name, host_ip, listen_port,
advertise_ip, advertise_port, upnp_bind_multicast)
hass.data[DOMAIN][name] = server
return await server.setup()
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
name = entry.data[CONF_NAME]
server = hass.data[DOMAIN].pop(name)
return await server.unload()
|
import time
import pytest
import logging
from repair_tests.repair_test import BaseRepairTest
since = pytest.mark.since
logger = logging.getLogger(__name__)
LEGACY_SSTABLES_JVM_ARGS = ["-Dcassandra.streamdes.initial_mem_buffer_size=1",
"-Dcassandra.streamdes.max_mem_buffer_size=5",
"-Dcassandra.streamdes.max_spill_file_size=16"]
@pytest.mark.upgrade_test
@since('3.0', max_version='3.99')
class TestUpgradeRepair(BaseRepairTest):
@since('3.0', max_version='3.99')
def test_repair_after_upgrade(self):
"""
@jira_ticket CASSANDRA-10990
"""
default_install_dir = self.cluster.get_install_dir()
cluster = self.cluster
logger.debug("Setting version to 2.2.5")
cluster.set_install_dir(version="2.2.5")
self._populate_cluster()
self._do_upgrade(default_install_dir)
self._repair_and_verify(True)
def _do_upgrade(self, default_install_dir):
cluster = self.cluster
for node in cluster.nodelist():
logger.debug("Upgrading %s to current version" % node.name)
if node.is_running():
node.flush()
time.sleep(1)
node.stop(wait_other_notice=True)
node.set_install_dir(install_dir=default_install_dir)
node.start(wait_other_notice=True, wait_for_binary_proto=True)
cursor = self.patient_cql_connection(node)
cluster.set_install_dir(default_install_dir)
|
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import volumeops
import nova.volume.driver
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class XenSMDriver(nova.volume.driver.VolumeDriver):
def _convert_config_params(self, conf_str):
params = dict([item.split("=") for item in conf_str.split()])
return params
def _get_introduce_sr_keys(self, params):
if 'name_label' in params:
del params['name_label']
keys = params.keys()
keys.append('sr_type')
return keys
def _create_storage_repo(self, context, backend_ref):
"""Either creates or introduces SR on host
depending on whether it exists in xapi db."""
params = self._convert_config_params(backend_ref['config_params'])
if 'name_label' in params:
label = params['name_label']
del params['name_label']
else:
label = 'SR-' + str(backend_ref['id'])
params['sr_type'] = backend_ref['sr_type']
if backend_ref['sr_uuid'] is None:
# run the sr create command
try:
LOG.debug(_('SR name = %s') % label)
LOG.debug(_('Params: %s') % str(params))
sr_uuid = self._volumeops.create_sr(label, params)
# update sr_uuid and created in db
except Exception as ex:
LOG.debug(_("Failed to create sr %s...continuing") %
str(backend_ref['id']))
msg = _('Create failed')
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_('SR UUID of new SR is: %s') % sr_uuid)
try:
self.db.sm_backend_conf_update(context,
backend_ref['id'],
dict(sr_uuid=sr_uuid))
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to update db")
raise exception.VolumeBackendAPIException(data=msg)
else:
# sr introduce, if not already done
try:
self._volumeops.introduce_sr(backend_ref['sr_uuid'], label,
params)
except Exception as ex:
LOG.exception(ex)
LOG.debug(_("Failed to introduce sr %s...continuing")
% str(backend_ref['id']))
def _create_storage_repos(self, context):
"""Create/Introduce storage repositories at start."""
backends = self.db.sm_backend_conf_get_all(context)
for backend in backends:
try:
self._create_storage_repo(context, backend)
except Exception as ex:
LOG.exception(ex)
msg = _('Failed to reach backend %d') % backend['id']
raise exception.VolumeBackendAPIException(data=msg)
def __init__(self, *args, **kwargs):
"""Connect to the hypervisor."""
# This driver leverages Xen storage manager, and hence requires
# hypervisor to be Xen
if FLAGS.connection_type != 'xenapi':
msg = _('XenSMDriver requires xenapi connection')
raise exception.VolumeBackendAPIException(data=msg)
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
try:
session = xenapi_conn.XenAPISession(url, username, password)
self._volumeops = volumeops.VolumeOps(session)
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to initiate session")
raise exception.VolumeBackendAPIException(data=msg)
super(XenSMDriver, self).__init__(execute=utils.execute,
sync_exec=utils.execute,
*args, **kwargs)
def do_setup(self, ctxt):
"""Setup includes creating or introducing storage repos
existing in the database and destroying deleted ones."""
# TODO(renukaapte) purge storage repos
self.ctxt = ctxt
self._create_storage_repos(ctxt)
def create_volume(self, volume):
"""Creates a logical volume. Can optionally return a Dictionary of
changes to the volume object to be persisted."""
# For now the scheduling logic will be to try to fit the volume in
# the first available backend.
# TODO(renukaapte) better scheduling once APIs are in place
sm_vol_rec = None
backends = self.db.sm_backend_conf_get_all(self.ctxt)
for backend in backends:
# Ensure that storage repo exists, if not create.
# This needs to be done because if nova compute and
# volume are both running on this host, then, as a
# part of detach_volume, compute could potentially forget SR
self._create_storage_repo(self.ctxt, backend)
sm_vol_rec = self._volumeops.create_volume_for_sm(volume,
backend['sr_uuid'])
if sm_vol_rec:
LOG.debug(_('Volume will be created in backend - %d')
% backend['id'])
break
if sm_vol_rec:
# Update db
sm_vol_rec['id'] = volume['id']
sm_vol_rec['backend_id'] = backend['id']
try:
self.db.sm_volume_create(self.ctxt, sm_vol_rec)
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to update volume in db")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Unable to create volume')
raise exception.VolumeBackendAPIException(data=msg)
def delete_volume(self, volume):
vol_rec = self.db.sm_volume_get(self.ctxt, volume['id'])
if not vol_rec:
raise exception.NotFound(_("Volume %s does not exist"),
volume['id'])
try:
# If compute runs on this node, detach could have disconnected SR
backend_ref = self.db.sm_backend_conf_get(self.ctxt,
vol_rec['backend_id'])
self._create_storage_repo(self.ctxt, backend_ref)
self._volumeops.delete_volume_for_sm(vol_rec['vdi_uuid'])
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to delete vdi")
raise exception.VolumeBackendAPIException(data=msg)
try:
self.db.sm_volume_delete(self.ctxt, volume['id'])
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to delete volume in db")
raise exception.VolumeBackendAPIException(data=msg)
def local_path(self, volume):
return str(volume['id'])
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
pass
def discover_volume(self, context, volume):
return str(volume['id'])
def check_for_setup_error(self):
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def ensure_export(self, context, volume):
"""Safely, synchronously recreates an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
try:
xensm_properties = dict(self.db.sm_volume_get(self.ctxt,
volume['id']))
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to find volume in db")
raise exception.VolumeBackendAPIException(data=msg)
# Keep the volume id key consistent with what ISCSI driver calls it
xensm_properties['volume_id'] = xensm_properties['id']
del xensm_properties['id']
try:
backend_conf = self.db.sm_backend_conf_get(self.ctxt,
xensm_properties['backend_id'])
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to find backend in db")
raise exception.VolumeBackendAPIException(data=msg)
params = self._convert_config_params(backend_conf['config_params'])
xensm_properties['flavor_id'] = backend_conf['flavor_id']
xensm_properties['sr_uuid'] = backend_conf['sr_uuid']
xensm_properties['sr_type'] = backend_conf['sr_type']
xensm_properties.update(params)
_introduce_sr_keys = self._get_introduce_sr_keys(params)
xensm_properties['introduce_sr_keys'] = _introduce_sr_keys
return {
'driver_volume_type': 'xensm',
'data': xensm_properties
}
def terminate_connection(self, volume, connector):
pass
|
"""Support for Hydrawise cloud switches."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import (
ALLOWED_WATERING_TIME, CONF_WATERING_TIME, DATA_HYDRAWISE,
DEFAULT_WATERING_TIME, DEVICE_MAP, DEVICE_MAP_INDEX, SWITCHES,
HydrawiseEntity)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS, default=SWITCHES):
vol.All(cv.ensure_list, [vol.In(SWITCHES)]),
vol.Optional(CONF_WATERING_TIME, default=DEFAULT_WATERING_TIME):
vol.All(vol.In(ALLOWED_WATERING_TIME)),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Hydrawise device."""
hydrawise = hass.data[DATA_HYDRAWISE].data
default_watering_timer = config.get(CONF_WATERING_TIME)
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
# Create a switch for each zone
for zone in hydrawise.relays:
sensors.append(
HydrawiseSwitch(default_watering_timer, zone, sensor_type))
add_entities(sensors, True)
class HydrawiseSwitch(HydrawiseEntity, SwitchDevice):
"""A switch implementation for Hydrawise device."""
def __init__(self, default_watering_timer, *args):
"""Initialize a switch for Hydrawise device."""
super().__init__(*args)
self._default_watering_timer = default_watering_timer
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
if self._sensor_type == 'manual_watering':
self.hass.data[DATA_HYDRAWISE].data.run_zone(
self._default_watering_timer, (self.data['relay']-1))
elif self._sensor_type == 'auto_watering':
self.hass.data[DATA_HYDRAWISE].data.suspend_zone(
0, (self.data['relay']-1))
def turn_off(self, **kwargs):
"""Turn the device off."""
if self._sensor_type == 'manual_watering':
self.hass.data[DATA_HYDRAWISE].data.run_zone(
0, (self.data['relay']-1))
elif self._sensor_type == 'auto_watering':
self.hass.data[DATA_HYDRAWISE].data.suspend_zone(
365, (self.data['relay']-1))
def update(self):
"""Update device state."""
mydata = self.hass.data[DATA_HYDRAWISE].data
_LOGGER.debug("Updating Hydrawise switch: %s", self._name)
if self._sensor_type == 'manual_watering':
if not mydata.running:
self._state = False
else:
self._state = int(
mydata.running[0]['relay']) == self.data['relay']
elif self._sensor_type == 'auto_watering':
for relay in mydata.relays:
if relay['relay'] == self.data['relay']:
if relay.get('suspended') is not None:
self._state = False
else:
self._state = True
break
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return DEVICE_MAP[self._sensor_type][
DEVICE_MAP_INDEX.index('ICON_INDEX')]
|
"""Index for object_folders
Revision ID: 1efacad0fff5
Revises: 4d7ce1eaddf2
Create Date: 2014-09-12 21:11:35.908034
"""
revision = '1efacad0fff5'
down_revision = '4d7ce1eaddf2'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ix_folderable_id_type', 'object_folders', ['folderable_type','folderable_id'])
pass
def downgrade():
op.drop_index('ix_folderable_id_type', table_name='object_folders')
pass
|
"""Base test class for nbconvert"""
import io
import os
import glob
import shlex
import shutil
import sys
import unittest
import nbconvert
from subprocess import Popen, PIPE
import nose.tools as nt
from nbformat import v4, write
from testpath.tempdir import TemporaryWorkingDirectory
from ipython_genutils.py3compat import string_types, bytes_to_str
class TestsBase(unittest.TestCase):
"""Base tests class. Contains useful fuzzy comparison and nbconvert
functions."""
def fuzzy_compare(self, a, b, newlines_are_spaces=True, tabs_are_spaces=True,
fuzzy_spacing=True, ignore_spaces=False,
ignore_newlines=False, case_sensitive=False, leave_padding=False):
"""
Performs a fuzzy comparison of two strings. A fuzzy comparison is a
comparison that ignores insignificant differences in the two comparands.
The significance of certain differences can be specified via the keyword
parameters of this method.
"""
if not leave_padding:
a = a.strip()
b = b.strip()
if ignore_newlines:
a = a.replace('\n', '')
b = b.replace('\n', '')
if newlines_are_spaces:
a = a.replace('\n', ' ')
b = b.replace('\n', ' ')
if tabs_are_spaces:
a = a.replace('\t', ' ')
b = b.replace('\t', ' ')
if ignore_spaces:
a = a.replace(' ', '')
b = b.replace(' ', '')
if fuzzy_spacing:
a = self.recursive_replace(a, ' ', ' ')
b = self.recursive_replace(b, ' ', ' ')
if not case_sensitive:
a = a.lower()
b = b.lower()
self.assertEqual(a, b)
def recursive_replace(self, text, search, replacement):
"""
Performs a recursive replacement operation. Replaces all instances
of a search string in a text string with a replacement string until
the search string no longer exists. Recursion is needed because the
replacement string may generate additional search strings.
For example:
Replace "ii" with "i" in the string "Hiiii" yields "Hii"
Another replacement cds "Hi" (the desired output)
Parameters
----------
text : string
Text to replace in.
search : string
String to search for within "text"
replacement : string
String to replace "search" with
"""
while search in text:
text = text.replace(search, replacement)
return text
def create_temp_cwd(self, copy_filenames=None):
temp_dir = TemporaryWorkingDirectory()
#Copy the files if requested.
if copy_filenames is not None:
self.copy_files_to(copy_filenames, dest=temp_dir.name)
#Return directory handler
return temp_dir
def create_empty_notebook(self, path):
nb = v4.new_notebook()
with io.open(path, 'w', encoding='utf-8') as f:
write(nb, f, 4)
def copy_files_to(self, copy_filenames, dest='.'):
"Copy test files into the destination directory"
if not os.path.isdir(dest):
os.makedirs(dest)
files_path = self._get_files_path()
for pattern in copy_filenames:
files = glob.glob(os.path.join(files_path, pattern))
assert files
for match in files:
shutil.copyfile(match, os.path.join(dest, os.path.basename(match)))
def _get_files_path(self):
#Get the relative path to this module in the IPython directory.
names = self.__module__.split('.')[1:-1]
names.append('files')
#Build a path using the nbconvert directory and the relative path we just
#found.
path = os.path.dirname(nbconvert.__file__)
return os.path.join(path, *names)
def nbconvert(self, parameters, ignore_return_code=False, stdin=None):
"""
Run nbconvert as a shell command, listening for both Errors and
non-zero return codes. Returns the tuple (stdout, stderr) of
output produced during the nbconvert run.
Parameters
----------
parameters : str, list(str)
List of parameters to pass to IPython.
ignore_return_code : optional bool (default False)
Throw an OSError if the return code
"""
if isinstance(parameters, string_types):
parameters = shlex.split(parameters)
cmd = [sys.executable, '-m', 'nbconvert'] + parameters
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
stdout, stderr = p.communicate(input=stdin)
if not (p.returncode == 0 or ignore_return_code):
raise OSError(bytes_to_str(stderr))
return stdout.decode('utf8', 'replace'), stderr.decode('utf8', 'replace')
def assert_big_text_equal(a, b, chunk_size=80):
"""assert that large strings are equal
Zooms in on first chunk that differs,
to give better info than vanilla assertEqual for large text blobs.
"""
for i in range(0, len(a), chunk_size):
chunk_a = a[i:i + chunk_size]
chunk_b = b[i:i + chunk_size]
nt.assert_equal(chunk_a, chunk_b, "[offset: %i]\n%r != \n%r" % (
i, chunk_a, chunk_b))
if len(a) > len(b):
nt.fail("Length doesn't match (%i > %i). Extra text:\n%r" % (
len(a), len(b), a[len(b):]
))
elif len(a) < len(b):
nt.fail("Length doesn't match (%i < %i). Extra text:\n%r" % (
len(a), len(b), b[len(a):]
))
|
from sqlalchemy import *
from sqlalchemy.test import *
class FoundRowsTest(TestBase, AssertsExecutionResults):
"""tests rowcount functionality"""
__requires__ = ('sane_rowcount', )
@classmethod
def setup_class(cls):
global employees_table, metadata
metadata = MetaData(testing.db)
employees_table = Table('employees', metadata,
Column('employee_id', Integer,
Sequence('employee_id_seq', optional=True),
primary_key=True),
Column('name', String(50)),
Column('department', String(1)),
)
metadata.create_all()
def setup(self):
global data
data = [ ('Angela', 'A'),
('Andrew', 'A'),
('Anand', 'A'),
('Bob', 'B'),
('Bobette', 'B'),
('Buffy', 'B'),
('Charlie', 'C'),
('Cynthia', 'C'),
('Chris', 'C') ]
i = employees_table.insert()
i.execute(*[{'name':n, 'department':d} for n, d in data])
def teardown(self):
employees_table.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def testbasic(self):
s = employees_table.select()
r = s.execute().fetchall()
assert len(r) == len(data)
def test_update_rowcount1(self):
# WHERE matches 3, 3 rows changed
department = employees_table.c.department
r = employees_table.update(department=='C').execute(department='Z')
print "expecting 3, dialect reports %s" % r.rowcount
assert r.rowcount == 3
def test_update_rowcount2(self):
# WHERE matches 3, 0 rows changed
department = employees_table.c.department
r = employees_table.update(department=='C').execute(department='C')
print "expecting 3, dialect reports %s" % r.rowcount
assert r.rowcount == 3
def test_delete_rowcount(self):
# WHERE matches 3, 3 rows deleted
department = employees_table.c.department
r = employees_table.delete(department=='C').execute()
print "expecting 3, dialect reports %s" % r.rowcount
assert r.rowcount == 3
|
from datetime import datetime
from decimal import Decimal
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext_lazy as _
from l10n.utils import moneyfmt
from payment.modules.giftcertificate.utils import generate_certificate_code
from payment.utils import get_processor_by_key
from product.models import Product
from satchmo_store.contact.models import Contact
from satchmo_store.shop.models import Order
import logging
GIFTCODE_KEY = 'GIFTCODE'
log = logging.getLogger('giftcertificate.models')
SATCHMO_PRODUCT = True
def get_product_types():
return ("GiftcertificateProduct",)
class GiftCertificateManager(models.Manager):
def from_order(self, order):
code = order.get_variable(GIFTCODE_KEY, "")
log.debug("GiftCert.from_order code=%s", code)
if code:
site = order.site
return GiftCertificate.objects.get(code__exact=code.value, valid__exact=True, site=site)
raise GiftCertificate.DoesNotExist()
class GiftCertificate(models.Model):
"""A Gift Cert which holds value."""
site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_('Site'))
order = models.ForeignKey(Order, null=True, blank=True, related_name="giftcertificates", verbose_name=_('Order'))
code = models.CharField(_('Certificate Code'), max_length=100,
blank=True, null=True)
purchased_by = models.ForeignKey(Contact, verbose_name=_('Purchased by'),
blank=True, null=True, related_name='giftcertificates_purchased')
date_added = models.DateField(_("Date added"), null=True, blank=True)
valid = models.BooleanField(_('Valid'), default=True)
message = models.CharField(_('Message'), blank=True, null=True, max_length=255)
recipient_email = models.EmailField(_("Email"), blank=True, max_length=75)
start_balance = models.DecimalField(_("Starting Balance"), decimal_places=2,
max_digits=8)
objects = GiftCertificateManager()
def balance(self):
b = Decimal(self.start_balance)
for usage in self.usages.all():
log.info('usage: %s' % usage)
b = b - Decimal(usage.balance_used)
return b
balance = property(balance)
def apply_to_order(self, order):
"""Apply up to the full amount of the balance of this cert to the order.
Returns new balance.
"""
amount = min(order.balance, self.balance)
log.info('applying %s from giftcert #%i [%s] to order #%i [%s]',
moneyfmt(amount),
self.id,
moneyfmt(self.balance),
order.id,
moneyfmt(order.balance))
processor = get_processor_by_key('PAYMENT_GIFTCERTIFICATE')
orderpayment = processor.record_payment(order=order, amount=amount)
self.orderpayment = orderpayment
return self.use(amount, orderpayment=orderpayment)
def use(self, amount, notes="", orderpayment=None):
"""Use some amount of the gift cert, returning the current balance."""
u = GiftCertificateUsage(notes=notes, balance_used = amount,
orderpayment=orderpayment, giftcertificate=self)
u.save()
return self.balance
def save(self, **kwargs):
if not self.pk:
self.date_added = datetime.now()
if not self.code:
self.code = generate_certificate_code()
if not self.site:
self.site = Site.objects.get_current()
super(GiftCertificate, self).save(**kwargs)
def __unicode__(self):
sb = moneyfmt(self.start_balance)
b = moneyfmt(self.balance)
return u"Gift Cert: %s/%s" % (sb, b)
class Meta:
verbose_name = _("Gift Certificate")
verbose_name_plural = _("Gift Certificates")
class GiftCertificateUsage(models.Model):
"""Any usage of a Gift Cert is logged with one of these objects."""
usage_date = models.DateField(_("Date of usage"), null=True, blank=True)
notes = models.TextField(_('Notes'), blank=True, null=True)
balance_used = models.DecimalField(_("Amount Used"), decimal_places=2,
max_digits=8, )
orderpayment = models.ForeignKey('shop.OrderPayment', null=True, verbose_name=_('Order Payment'))
used_by = models.ForeignKey(Contact, verbose_name=_('Used by'),
blank=True, null=True, related_name='giftcertificates_used')
giftcertificate = models.ForeignKey(GiftCertificate, related_name='usages')
def __unicode__(self):
return u"GiftCertificateUsage: %s" % self.balance_used
def save(self, **kwargs):
if not self.pk:
self.usage_date = datetime.now()
super(GiftCertificateUsage, self).save(**kwargs)
class GiftCertificateProduct(models.Model):
"""
The product model for a Gift Certificate
"""
product = models.OneToOneField(Product, verbose_name=_('Product'), primary_key=True)
is_shippable = False
discountable = False
def __unicode__(self):
return u"GiftCertificateProduct: %s" % self.product.name
def _get_subtype(self):
return 'GiftCertificateProduct'
def order_success(self, order, order_item):
log.debug("Order success called, creating gift certs on order: %s", order)
message = ""
email = ""
for detl in order_item.orderitemdetail_set.all():
if detl.name == "email":
email = detl.value
elif detl.name == "message":
message = detl.value
price=order_item.line_item_price
log.debug("Creating gc for %s", price)
gc = GiftCertificate(
order = order,
start_balance= price,
purchased_by = order.contact,
valid=True,
message=message,
recipient_email=email
)
gc.save()
def save(self, **kwargs):
if hasattr(self.product,'_sub_types'):
del self.product._sub_types
super(GiftCertificateProduct, self).save(**kwargs)
class Meta:
verbose_name = _("Gift certificate product")
verbose_name_plural = _("Gift certificate products")
import config
PAYMENT_PROCESSOR=True
|
import unittest
import os
import IECore
import IECoreRI
class TeapotProcedural( IECore.ParameterisedProcedural ) :
def __init__( self ) :
IECore.ParameterisedProcedural.__init__( self )
self.boundCalled = False
self.renderCalled = False
self.renderStateCalled = False
def doBound( self, args ) :
self.boundCalled = True
return IECore.Box3f( IECore.V3f( -1 ), IECore.V3f( 1 ) )
def doRenderState( self, renderer, args ) :
self.renderStateCalled = True
renderer.setAttribute( "ri:visibility:diffuse", IECore.BoolData( 1 ) )
def doRender( self, renderer, args ) :
self.renderCalled = True
renderer.geometry( "teapot", {}, {} )
class ParameterisedProceduralTest( IECoreRI.TestCase ) :
def checkContents( self, fileName, expectedElements, unexpectedElements ) :
l = file( fileName ).readlines()
lineIndex = 0
for expected in expectedElements :
found = False
for i in range( lineIndex, len( l ) ) :
if expected in l[i] :
lineIndex = i
found = True
break
self.assert_( found )
for e in unexpectedElements :
for ll in l :
self.assert_( e not in ll )
def testNormalCall( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"AttributeBegin",
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
"Geometry \"teapot\"",
"AttributeEnd",
],
[]
)
self.assertEqual( t.renderStateCalled, True )
self.assertEqual( t.boundCalled, True )
self.assertEqual( t.renderCalled, True )
def testStateOnly( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r, inAttributeBlock=False, withState=True, withGeometry=False )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
],
[
"AttributeBegin",
"Geometry \"teapot\"",
"AttributeEnd",
],
)
self.assertEqual( t.renderStateCalled, True )
self.assertEqual( t.boundCalled, False )
self.assertEqual( t.renderCalled, False )
def testImmediateGeometryOnly( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r, inAttributeBlock=False, withState=False, withGeometry=True, immediateGeometry=True )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"Geometry \"teapot\"",
],
[
"AttributeBegin",
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
"AttributeEnd",
],
)
self.assertEqual( t.renderStateCalled, False )
self.assertEqual( t.boundCalled, False )
self.assertEqual( t.renderCalled, True )
if __name__ == "__main__":
unittest.main()
|
import dask.dataframe as dd
import pandas.util.testing as tm
import pandas as pd
from dask.dataframe.shuffle import shuffle
import partd
from dask.async import get_sync
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [1, 4, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [2, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [3, 6, 9]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
def test_shuffle():
s = shuffle(d, d.b, npartitions=2)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == 2
x = get_sync(s.dask, (s._name, 0))
y = get_sync(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert shuffle(d, d.b, npartitions=2)._name == shuffle(d, d.b, npartitions=2)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_index_with_non_series():
tm.assert_frame_equal(shuffle(d, d.b).compute(),
shuffle(d, 'b').compute())
def test_index_with_dataframe():
assert sorted(shuffle(d, d[['b']]).compute().values.tolist()) ==\
sorted(shuffle(d, ['b']).compute().values.tolist()) ==\
sorted(shuffle(d, 'b').compute().values.tolist())
def test_shuffle_from_one_partition_to_one_other():
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, 'x', i)
assert len(a.compute(get=get_sync)) == len(b.compute(get=get_sync))
|
import argparse
import os
import subprocess
import mongoengine as me
import rmc.html_snapshots.utils as utils
import rmc.shared.constants as c
def crawl_page(url):
args = [
'phantomjs',
'--disk-cache=true',
os.path.join(utils.FILE_DIR, 'phantom-server.js'),
url,
]
rendered_html = subprocess.check_output(args)
return rendered_html
def generate_snapshots(base_url, overwrite=False):
urls = utils.generate_urls()
for url in urls:
# For urls that end with a trailing slash, create them
# as the index page of a subdirectory
if url and url[0] == '/':
url = url[1:]
if not url:
file_path = 'index'
file_url = ''
elif url[-1] == '/':
file_path = url + 'index'
file_url = url
else:
file_path = url
file_url = url
file_path = os.path.join(utils.HTML_DIR, file_path)
if os.path.isdir(file_path):
print 'Cannot have file_path that is directory: %s' % file_path
if not overwrite and os.path.isfile(file_path):
continue
full_url = os.path.join(base_url, file_url)
rendered_html = crawl_page(full_url)
print 'Writing: %s' % url
utils.write(file_path, rendered_html)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Process snapshotting arguments')
parser.add_argument('base_url', type=str)
parser.add_argument('--overwrite', dest='overwrite', action='store_true')
args = parser.parse_args()
me.connect(c.MONGO_DB_RMC, host=c.MONGO_HOST, port=c.MONGO_PORT)
generate_snapshots(args.base_url, args.overwrite)
|
from sympy import symbols, Symbol, exp, log, pi, Rational, S
from sympy.codegen.cfunctions import (
expm1, log1p, exp2, log2, fma, log10, Sqrt, Cbrt, hypot
)
from sympy.core.function import expand_log
def test_expm1():
# Eval
assert expm1(0) == 0
x = Symbol('x', real=True, finite=True)
# Expand and rewrite
assert expm1(x).expand(func=True) - exp(x) == -1
assert expm1(x).rewrite('tractable') - exp(x) == -1
assert expm1(x).rewrite('exp') - exp(x) == -1
# Precision
assert not ((exp(1e-10).evalf() - 1) - 1e-10 - 5e-21) < 1e-22 # for comparison
assert abs(expm1(1e-10).evalf() - 1e-10 - 5e-21) < 1e-22
# Properties
assert expm1(x).is_real
assert expm1(x).is_finite
# Diff
assert expm1(42*x).diff(x) - 42*exp(42*x) == 0
assert expm1(42*x).diff(x) - expm1(42*x).expand(func=True).diff(x) == 0
def test_log1p():
# Eval
assert log1p(0) == 0
d = S(10)
assert expand_log(log1p(d**-1000) - log(d**1000 + 1) + log(d**1000)) == 0
x = Symbol('x', real=True, finite=True)
# Expand and rewrite
assert log1p(x).expand(func=True) - log(x + 1) == 0
assert log1p(x).rewrite('tractable') - log(x + 1) == 0
assert log1p(x).rewrite('log') - log(x + 1) == 0
# Precision
assert not abs(log(1e-99 + 1).evalf() - 1e-99) < 1e-100 # for comparison
assert abs(expand_log(log1p(1e-99)).evalf() - 1e-99) < 1e-100
# Properties
assert log1p(-2**(-S(1)/2)).is_real
assert not log1p(-1).is_finite
assert log1p(pi).is_finite
assert not log1p(x).is_positive
assert log1p(Symbol('y', positive=True)).is_positive
assert not log1p(x).is_zero
assert log1p(Symbol('z', zero=True)).is_zero
assert not log1p(x).is_nonnegative
assert log1p(Symbol('o', nonnegative=True)).is_nonnegative
# Diff
assert log1p(42*x).diff(x) - 42/(42*x + 1) == 0
assert log1p(42*x).diff(x) - log1p(42*x).expand(func=True).diff(x) == 0
def test_exp2():
# Eval
assert exp2(2) == 4
x = Symbol('x', real=True, finite=True)
# Expand
assert exp2(x).expand(func=True) - 2**x == 0
# Diff
assert exp2(42*x).diff(x) - 42*exp2(42*x)*log(2) == 0
assert exp2(42*x).diff(x) - exp2(42*x).diff(x) == 0
def test_log2():
# Eval
assert log2(8) == 3
assert log2(pi) != log(pi)/log(2) # log2 should *save* (CPU) instructions
x = Symbol('x', real=True, finite=True)
assert log2(x) != log(x)/log(2)
assert log2(2**x) == x
# Expand
assert log2(x).expand(func=True) - log(x)/log(2) == 0
# Diff
assert log2(42*x).diff() - 1/(log(2)*x) == 0
assert log2(42*x).diff() - log2(42*x).expand(func=True).diff(x) == 0
def test_fma():
x, y, z = symbols('x y z')
# Expand
assert fma(x, y, z).expand(func=True) - x*y - z == 0
expr = fma(17*x, 42*y, 101*z)
# Diff
assert expr.diff(x) - expr.expand(func=True).diff(x) == 0
assert expr.diff(y) - expr.expand(func=True).diff(y) == 0
assert expr.diff(z) - expr.expand(func=True).diff(z) == 0
assert expr.diff(x) - 17*42*y == 0
assert expr.diff(y) - 17*42*x == 0
assert expr.diff(z) - 101 == 0
def test_log10():
x = Symbol('x')
# Expand
assert log10(x).expand(func=True) - log(x)/log(10) == 0
# Diff
assert log10(42*x).diff(x) - 1/(log(10)*x) == 0
assert log10(42*x).diff(x) - log10(42*x).expand(func=True).diff(x) == 0
def test_Cbrt():
x = Symbol('x')
# Expand
assert Cbrt(x).expand(func=True) - x**Rational(1, 3) == 0
# Diff
assert Cbrt(42*x).diff(x) - 42*(42*x)**(Rational(1, 3) - 1)/3 == 0
assert Cbrt(42*x).diff(x) - Cbrt(42*x).expand(func=True).diff(x) == 0
def test_Sqrt():
x = Symbol('x')
# Expand
assert Sqrt(x).expand(func=True) - x**Rational(1, 2) == 0
# Diff
assert Sqrt(42*x).diff(x) - 42*(42*x)**(Rational(1, 2) - 1)/2 == 0
assert Sqrt(42*x).diff(x) - Sqrt(42*x).expand(func=True).diff(x) == 0
def test_hypot():
x, y = symbols('x y')
# Expand
assert hypot(x, y).expand(func=True) - (x**2 + y**2)**Rational(1, 2) == 0
# Diff
assert hypot(17*x, 42*y).diff(x).expand(func=True) - hypot(17*x, 42*y).expand(func=True).diff(x) == 0
assert hypot(17*x, 42*y).diff(y).expand(func=True) - hypot(17*x, 42*y).expand(func=True).diff(y) == 0
assert hypot(17*x, 42*y).diff(x).expand(func=True) - 2*17*17*x*((17*x)**2 + (42*y)**2)**Rational(-1, 2)/2 == 0
assert hypot(17*x, 42*y).diff(y).expand(func=True) - 2*42*42*y*((17*x)**2 + (42*y)**2)**Rational(-1, 2)/2 == 0
|
from django import template
from django.template import resolve_variable, NodeList
from django.contrib.auth.models import Group
register = template.Library()
@register.tag()
def ifusergroup(parser, token):
""" Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app and middleware.
Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins|Group1|"Group 2" %} ... {% endifusergroup %}, or
{% ifusergroup Admins %} ... {% else %} ... {% endifusergroup %}
"""
try:
_, group = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("Tag 'ifusergroup' requires 1 argument.")
nodelist_true = parser.parse(('else', 'endifusergroup'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifusergroup',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return GroupCheckNode(group, nodelist_true, nodelist_false)
class GroupCheckNode(template.Node):
def __init__(self, group, nodelist_true, nodelist_false):
self.group = group
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
def render(self, context):
user = resolve_variable('user', context)
if not user.is_authenticated():
return self.nodelist_false.render(context)
for group in self.group.split("|"):
group = group[1:-1] if group.startswith('"') and group.endswith('"') else group
try:
if Group.objects.get(name=group) in user.groups.all():
return self.nodelist_true.render(context)
except Group.DoesNotExist:
pass
return self.nodelist_false.render(context)
|
from machinekit import hal
from machinekit import rtapi as rt
from machinekit import config as c
from fdm.config import base
def hardware_read():
hal.addf('hpg.capture-position', 'servo-thread')
hal.addf('bb_gpio.read', 'servo-thread')
def hardware_write():
hal.addf('hpg.update', 'servo-thread')
hal.addf('bb_gpio.write', 'servo-thread')
def init_hardware():
watchList = []
# load low-level drivers
rt.loadrt('hal_bb_gpio', output_pins='816,822,823,824,825,826,914,923,925', input_pins='807,808,809,810,817,911,913')
prubin = '%s/%s' % (c.Config().EMC2_RTLIB_DIR, c.find('PRUCONF', 'PRUBIN'))
rt.loadrt(c.find('PRUCONF', 'DRIVER'),
pru=0, num_stepgens=6, num_pwmgens=6,
prucode=prubin, halname='hpg')
# Python user-mode HAL module to read ADC value and generate a thermostat output for PWM
defaultThermistor = 'semitec_103GT_2'
hal.loadusr('hal_temp_bbb',
name='temp',
interval=0.05,
filter_size=1,
cape_board='CRAMPS',
channels='04:%s,05:%s,02:%s,03:%s'
% (c.find('HBP', 'THERMISTOR', defaultThermistor),
c.find('EXTRUDER_0', 'THERMISTOR', defaultThermistor),
c.find('EXTRUDER_1', 'THERMISTOR', defaultThermistor),
c.find('EXTRUDER_2', 'THERMISTOR', defaultThermistor)),
wait_name='temp')
watchList.append(['temp', 0.1])
base.usrcomp_status('temp', 'temp-hw', thread='servo-thread')
base.usrcomp_watchdog(watchList, 'estop-reset', thread='servo-thread',
errorSignal='watchdog-error')
def setup_hardware(thread):
# PWM
hal.Pin('hpg.pwmgen.00.pwm_period').set(10000000) # 100Hz
hal.Pin('hpg.pwmgen.00.out.00.pin').set(811)
hal.Pin('hpg.pwmgen.00.out.01.pin').set(915)
hal.Pin('hpg.pwmgen.00.out.02.pin').set(927)
hal.Pin('hpg.pwmgen.00.out.03.pin').set(921)
hal.Pin('hpg.pwmgen.00.out.04.pin').set(941)
hal.Pin('hpg.pwmgen.00.out.05.pin').set(922)
# HBP
hal.Pin('hpg.pwmgen.00.out.00.enable').set(True)
hal.Pin('hpg.pwmgen.00.out.00.value').link('hbp-temp-pwm')
# configure extruders
for n in range(0, 3):
hal.Pin('hpg.pwmgen.00.out.%02i.enable' % (n + 1)).set(True)
hal.Pin('hpg.pwmgen.00.out.%02i.value' % (n + 1)).link('e%i-temp-pwm' % n)
# configure fans
for n in range(0, 2):
hal.Pin('hpg.pwmgen.00.out.%02i.enable' % (n + 4)).link('f%i-pwm-enable' % n)
hal.Pin('hpg.pwmgen.00.out.%02i.value' % (n + 4)).link('f%i-pwm' % n)
hal.Signal('f%i-pwm-enable' % n).set(True)
# configure leds
# none
# GPIO
hal.Pin('bb_gpio.p8.in-08').link('limit-0-home') # X
hal.Pin('bb_gpio.p8.in-07').link('limit-0-max') # X
hal.Pin('bb_gpio.p8.in-10').link('limit-1-home') # Y
hal.Pin('bb_gpio.p8.in-09').link('limit-1-max') # Y
hal.Pin('bb_gpio.p9.in-13').link('limit-2-home') # Z
hal.Pin('bb_gpio.p9.in-11').link('limit-2-max') # Z
# probe ...
# Adjust as needed for your switch polarity
hal.Pin('bb_gpio.p8.in-08.invert').set(True)
hal.Pin('bb_gpio.p8.in-07.invert').set(True)
hal.Pin('bb_gpio.p8.in-10.invert').set(True)
hal.Pin('bb_gpio.p8.in-09.invert').set(True)
hal.Pin('bb_gpio.p9.in-13.invert').set(True)
hal.Pin('bb_gpio.p9.in-11.invert').set(True)
# ADC
hal.Pin('temp.ch-04.value').link('hbp-temp-meas')
hal.Pin('temp.ch-05.value').link('e0-temp-meas')
hal.Pin('temp.ch-02.value').link('e1-temp-meas')
hal.Pin('temp.ch-03.value').link('e2-temp-meas')
# Stepper
hal.Pin('hpg.stepgen.00.steppin').set(813)
hal.Pin('hpg.stepgen.00.dirpin').set(812)
hal.Pin('hpg.stepgen.01.steppin').set(815)
hal.Pin('hpg.stepgen.01.dirpin').set(814)
hal.Pin('hpg.stepgen.02.steppin').set(819)
hal.Pin('hpg.stepgen.02.dirpin').set(818)
hal.Pin('hpg.stepgen.03.steppin').set(916)
hal.Pin('hpg.stepgen.03.dirpin').set(912)
hal.Pin('hpg.stepgen.04.steppin').set(917)
hal.Pin('hpg.stepgen.04.dirpin').set(918)
hal.Pin('hpg.stepgen.05.steppin').set(924)
hal.Pin('hpg.stepgen.05.dirpin').set(926)
# machine power
hal.Pin('bb_gpio.p9.out-23').link('emcmot-0-enable')
#hal.Pin('bb_gpio.p9.out-23.invert').set(True)
# Monitor estop input from hardware
hal.Pin('bb_gpio.p8.in-17').link('estop-in')
hal.Pin('bb_gpio.p8.in-17.invert').set(True)
# drive estop-sw
hal.Pin('bb_gpio.p8.out-26').link('estop-out')
hal.Pin('bb_gpio.p8.out-26.invert').set(True)
# Tie machine power signal to the Parport Cape LED
# Feel free to tie any other signal you like to the LED
hal.Pin('bb_gpio.p9.out-25').link('emcmot-0-enable')
# hal.Pin('bb_gpio.p9.out-25.invert').set(True)
# link emcmot.xx.enable to stepper driver enable signals
hal.Pin('bb_gpio.p9.out-14').link('emcmot-0-enable')
hal.Pin('bb_gpio.p9.out-14.invert').set(True)
|
"""
Copyright 2009 55 Minutes (http://www.55minutes.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
test_timestamp = time.strftime('%a %Y-%m-%d %H:%M %Z')
TOP = """\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />
<title>Test coverage report: %(title)s</title>
<style type="text/css" media="screen">
body
{
font-family: "Lucida Sans Unicode", "Lucida Grande", sans-serif;
font-size: 13px;
}
#content-header
{
margin-left: 50px;
}
#content-header h1
{
font-size: 18px;
margin-bottom: 0;
}
#content-header p
{
font-size: 13px;
margin: 0;
color: #909090;
}
#result-list
{
margin: 0 50px;
}
#result-list ul
{
padding-left: 13px;
list-style-position: inside;
}
</style>
</head>
<body>
"""
CONTENT_HEADER = """\
<div id="content-header">
<h1>Test Coverage Report: %(title)s</h1>"""
CONTENT_HEADER += "<p>Generated: %(test_timestamp)s</p>" %vars()
CONTENT_HEADER += "</div>"
CONTENT_BODY = """\
<div id="result-list">
<p>%(long_desc)s</p>
<ul>
%(exception_list)s
</ul>
Back to <a href="index.html">index</a>.
</div>
"""
EXCEPTION_LINE = "<li>%(module_name)s</li>"
BOTTOM = """\
</body>
</html>
"""
|
import sublime
import collections
VAR_MAP_LEADER = 'mapleader'
VAR_MAP_LOCAL_LEADER = 'maplocalleader'
_SPECIAL_STRINGS = {
'<leader>': VAR_MAP_LEADER,
'<localleader>': VAR_MAP_LOCAL_LEADER,
}
_DEFAULTS = {
VAR_MAP_LEADER: '\\',
VAR_MAP_LOCAL_LEADER: '\\'
}
_VARIABLES = {
}
def expand_keys(seq):
'''Replaces well-known variables in key names with their corresponding
values.
'''
leader = var_name = None
# TODO(guillermooo): Can these variables appear in the middle of a
# sequence instead of at the beginning only?
if seq.lower().startswith('<leader>'):
var_name = '<leader>'
leader = _VARIABLES.get('mapleader', _DEFAULTS.get('mapleader'))
if seq.lower().startswith('<localleader>'):
var = '<localleader>'
local_leader = _VARIABLES.get('maplocalleader',
_DEFAULTS.get('maplocalleader'))
try:
return leader + seq[len(var_name):]
except TypeError:
return seq
def is_key_name(name):
return name.lower() in _SPECIAL_STRINGS
def get(name):
name = name.lower()
name = _SPECIAL_STRINGS.get(name, name)
return _VARIABLES.get(name, _DEFAULTS.get(name))
def set_(name, value):
# TODO(guillermooo): Set vars in settings.
_VARIABLES[name] = value
class Variables(object):
'''Stores variables during the current Sublime Text session.
Meant to be used as a descriptor with `State`.
'''
def __get__(self, instance, owner):
self.view = instance.view
self.settings = instance.settings
return self
def get(self, name):
return get(name)
def set(self, name, value):
return set_(name, value)
|
import time
from osv import fields, osv
from tools.translate import _
class account_move_line_reconcile(osv.osv_memory):
"""
Account move line reconcile wizard, it checks for the write off the reconcile entry or directly reconcile.
"""
_name = 'account.move.line.reconcile'
_description = 'Account move line reconcile'
_columns = {
'trans_nbr': fields.integer('# of Transaction', readonly=True),
'credit': fields.float('Credit amount', readonly=True),
'debit': fields.float('Debit amount', readonly=True),
'writeoff': fields.float('Write-Off amount', readonly=True),
}
def default_get(self, cr, uid, fields, context=None):
res = super(account_move_line_reconcile, self).default_get(cr, uid, fields, context=context)
data = self.trans_rec_get(cr, uid, context['active_ids'], context)
if 'trans_nbr' in fields:
res.update({'trans_nbr':data['trans_nbr']})
if 'credit' in fields:
res.update({'credit':data['credit']})
if 'debit' in fields:
res.update({'debit':data['debit']})
if 'writeoff' in fields:
res.update({'writeoff':data['writeoff']})
return res
def trans_rec_get(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
credit = debit = 0
account_id = False
count = 0
for line in account_move_line_obj.browse(cr, uid, context['active_ids'], context=context):
if not line.reconcile_id and not line.reconcile_id.id:
count += 1
credit += line.credit
debit += line.debit
account_id = line.account_id.id
return {'trans_nbr': count, 'account_id': account_id, 'credit': credit, 'debit': debit, 'writeoff': debit - credit}
def trans_rec_addendum_writeoff(self, cr, uid, ids, context=None):
return self.pool.get('account.move.line.reconcile.writeoff').trans_rec_addendum(cr, uid, ids, context)
def trans_rec_reconcile_partial_reconcile(self, cr, uid, ids, context=None):
return self.pool.get('account.move.line.reconcile.writeoff').trans_rec_reconcile_partial(cr, uid, ids, context)
def trans_rec_reconcile_full(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
date = False
period_id = False
journal_id= False
account_id = False
if context is None:
context = {}
date = time.strftime('%Y-%m-%d')
ids = period_obj.find(cr, uid, dt=date, context=context)
if ids:
period_id = ids[0]
#stop the reconciliation process by partner (manual reconciliation) only if there is nothing more to reconcile for this partner
if 'active_ids' in context and context['active_ids']:
tmp_ml_id = account_move_line_obj.browse(cr, uid, context['active_ids'], context)[0]
partner_id = tmp_ml_id.partner_id and tmp_ml_id.partner_id.id or False
debit_ml_ids = account_move_line_obj.search(cr, uid, [('partner_id', '=', partner_id), ('account_id.reconcile', '=', True), ('reconcile_id', '=', False), ('debit', '>', 0)], context=context)
credit_ml_ids = account_move_line_obj.search(cr, uid, [('partner_id', '=', partner_id), ('account_id.reconcile', '=', True), ('reconcile_id', '=', False), ('credit', '>', 0)], context=context)
for ml_id in context['active_ids']:
if ml_id in debit_ml_ids:
debit_ml_ids.remove(ml_id)
if ml_id in credit_ml_ids:
credit_ml_ids.remove(ml_id)
if not debit_ml_ids and credit_ml_ids:
context.update({'stop_reconcile': True})
account_move_line_obj.reconcile(cr, uid, context['active_ids'], 'manual', account_id,
period_id, journal_id, context=context)
return {'type': 'ir.actions.act_window_close'}
account_move_line_reconcile()
class account_move_line_reconcile_writeoff(osv.osv_memory):
"""
It opens the write off wizard form, in that user can define the journal, account, analytic account for reconcile
"""
_name = 'account.move.line.reconcile.writeoff'
_description = 'Account move line reconcile (writeoff)'
_columns = {
'journal_id': fields.many2one('account.journal','Write-Off Journal', required=True),
'writeoff_acc_id': fields.many2one('account.account','Write-Off account', required=True),
'date_p': fields.date('Date'),
'comment': fields.char('Comment', size= 64, required=True),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account', domain=[('parent_id', '!=', False)]),
}
_defaults = {
'date_p': lambda *a: time.strftime('%Y-%m-%d'),
'comment': 'Write-off',
}
def trans_rec_addendum(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','account_move_line_reconcile_writeoff')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Reconcile Writeoff'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.move.line.reconcile.writeoff',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def trans_rec_reconcile_partial(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
account_move_line_obj.reconcile_partial(cr, uid, context['active_ids'], 'manual', context=context)
return {'type': 'ir.actions.act_window_close'}
def trans_rec_reconcile(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
if context is None:
context = {}
data = self.read(cr, uid, ids,context=context)[0]
account_id = data['writeoff_acc_id'][0]
context['date_p'] = data['date_p']
journal_id = data['journal_id'][0]
context['comment'] = data['comment']
if data['analytic_id']:
context['analytic_id'] = data['analytic_id'][0]
if context['date_p']:
date = context['date_p']
ids = period_obj.find(cr, uid, dt=date, context=context)
if ids:
period_id = ids[0]
context.update({'stop_reconcile': True})
account_move_line_obj.reconcile(cr, uid, context['active_ids'], 'manual', account_id,
period_id, journal_id, context=context)
return {'type': 'ir.actions.act_window_close'}
account_move_line_reconcile_writeoff()
|
"""Provides the constants needed for component."""
from typing import Final
SUPPORT_ALARM_ARM_HOME: Final = 1
SUPPORT_ALARM_ARM_AWAY: Final = 2
SUPPORT_ALARM_ARM_NIGHT: Final = 4
SUPPORT_ALARM_TRIGGER: Final = 8
SUPPORT_ALARM_ARM_CUSTOM_BYPASS: Final = 16
SUPPORT_ALARM_ARM_VACATION: Final = 32
CONDITION_TRIGGERED: Final = "is_triggered"
CONDITION_DISARMED: Final = "is_disarmed"
CONDITION_ARMED_HOME: Final = "is_armed_home"
CONDITION_ARMED_AWAY: Final = "is_armed_away"
CONDITION_ARMED_NIGHT: Final = "is_armed_night"
CONDITION_ARMED_VACATION: Final = "is_armed_vacation"
CONDITION_ARMED_CUSTOM_BYPASS: Final = "is_armed_custom_bypass"
|
import time
from os import listdir, unlink
from os.path import join as path_join
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.utils import hash_path, readconf
from swift.obj.diskfile import write_metadata, read_metadata, get_data_dir
from test.probe.common import ReplProbeTest
RETRIES = 5
def get_data_file_path(obj_dir):
files = []
# We might need to try a few times if a request hasn't yet settled. For
# instance, a PUT can return success when just 2 of 3 nodes has completed.
for attempt in xrange(RETRIES + 1):
try:
files = sorted(listdir(obj_dir), reverse=True)
break
except Exception:
if attempt < RETRIES:
time.sleep(1)
else:
raise
for filename in files:
return path_join(obj_dir, filename)
class TestObjectFailures(ReplProbeTest):
def _setup_data_file(self, container, obj, data):
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
client.put_object(self.url, self.token, container, obj, data)
odata = client.get_object(self.url, self.token, container, obj)[-1]
self.assertEquals(odata, data)
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
node_id = (onode['port'] - 6000) / 10
device = onode['device']
hash_str = hash_path(self.account, container, obj)
obj_server_conf = readconf(self.configs['object-server'][node_id])
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device,
get_data_dir(self.policy),
opart, hash_str[-3:], hash_str)
data_file = get_data_file_path(obj_dir)
return onode, opart, data_file
def run_quarantine(self):
container = 'container-%s' % uuid4()
obj = 'object-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'VERIFY')
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEquals(odata, 'VERIFY')
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def run_quarantine_range_etag(self):
container = 'container-range-%s' % uuid4()
obj = 'object-range-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'RANGE')
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx}
for header, result in [({'Range': 'bytes=0-2'}, 'RAN'),
({'Range': 'bytes=1-11'}, 'ANGE'),
({'Range': 'bytes=0-11'}, 'RANGE')]:
req_headers = base_headers.copy()
req_headers.update(header)
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj,
headers=req_headers)[-1]
self.assertEquals(odata, result)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def run_quarantine_zero_byte_get(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, conn_timeout=1,
response_timeout=1, headers={'X-Backend-Storage-Policy-Index':
self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def run_quarantine_zero_byte_head(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
direct_client.direct_head_object(
onode, opart, self.account, container, obj, conn_timeout=1,
response_timeout=1, headers={'X-Backend-Storage-Policy-Index':
self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def run_quarantine_zero_byte_post(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
headers = {'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two',
'X-Backend-Storage-Policy-Index': self.policy.idx}
direct_client.direct_post_object(
onode, opart, self.account,
container, obj,
headers=headers,
conn_timeout=1,
response_timeout=1)
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def test_runner(self):
self.run_quarantine()
self.run_quarantine_range_etag()
self.run_quarantine_zero_byte_get()
self.run_quarantine_zero_byte_head()
self.run_quarantine_zero_byte_post()
if __name__ == '__main__':
main()
|
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class was deprecated in version 0.18 and will be
removed in 0.20. Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/stable/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
import contextlib
import importlib
import os
from os import path
import pkgutil
import shutil
import sys
import tempfile
import threading
import unittest
from six import moves
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
from grpc_tools import protoc
from tests.unit.framework.common import test_constants
_RELATIVE_PROTO_PATH = 'relative_proto_path'
_RELATIVE_PYTHON_OUT = 'relative_python_out'
_PROTO_FILES_PATH_COMPONENTS = (
(
'beta_grpc_plugin_test',
'payload',
'test_payload.proto',
),
(
'beta_grpc_plugin_test',
'requests',
'r',
'test_requests.proto',
),
(
'beta_grpc_plugin_test',
'responses',
'test_responses.proto',
),
(
'beta_grpc_plugin_test',
'service',
'test_service.proto',
),
)
_PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2'
_REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2'
_RESPONSES_PB2 = 'beta_grpc_plugin_test.responses.test_responses_pb2'
_SERVICE_PB2 = 'beta_grpc_plugin_test.service.test_service_pb2'
SERVICER_IDENTIFIER = 'BetaTestServiceServicer'
STUB_IDENTIFIER = 'BetaTestServiceStub'
SERVER_FACTORY_IDENTIFIER = 'beta_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
@contextlib.contextmanager
def _system_path(path_insertion):
old_system_path = sys.path[:]
sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
yield
sys.path = old_system_path
def _create_directory_tree(root, path_components_sequence):
created = set()
for path_components in path_components_sequence:
thus_far = ''
for path_component in path_components:
relative_path = path.join(thus_far, path_component)
if relative_path not in created:
os.makedirs(path.join(root, relative_path))
created.add(relative_path)
thus_far = path.join(thus_far, path_component)
def _massage_proto_content(raw_proto_content):
imports_substituted = raw_proto_content.replace(
b'import "tests/protoc_plugin/protos/',
b'import "beta_grpc_plugin_test/')
package_statement_substituted = imports_substituted.replace(
b'package grpc_protoc_plugin;', b'package beta_grpc_protoc_plugin;')
return package_statement_substituted
def _packagify(directory):
for subdirectory, _, _ in os.walk(directory):
init_file_name = path.join(subdirectory, '__init__.py')
with open(init_file_name, 'wb') as init_file:
init_file.write(b'')
class _ServicerMethods(object):
def __init__(self, payload_pb2, responses_pb2):
self._condition = threading.Condition()
self._paused = False
self._fail = False
self._payload_pb2 = payload_pb2
self._responses_pb2 = responses_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = self._responses_pb2.SimpleResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = self._responses_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
@contextlib.contextmanager
def _CreateService(payload_pb2, responses_pb2, service_pb2):
"""Provides a servicer backend and a stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
servicer_methods = _ServicerMethods(payload_pb2, responses_pb2)
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield servicer_methods, stub
server.stop(0)
@contextlib.contextmanager
def _CreateIncompleteService(service_pb2):
"""Provides a servicer backend that fails to implement methods and its stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Args:
service_pb2: The service_pb2 module generated by this test.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
pass
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield None, stub
server.stop(0)
def _streaming_input_request_iterator(payload_pb2, requests_pb2):
for _ in range(3):
request = requests_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request(requests_pb2):
request = requests_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator(requests_pb2):
request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def setUp(self):
self._directory = tempfile.mkdtemp(dir='.')
self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH)
self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT)
os.makedirs(self._proto_path)
os.makedirs(self._python_out)
directories_path_components = {
proto_file_path_components[:-1]
for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS
}
_create_directory_tree(self._proto_path, directories_path_components)
self._proto_file_names = set()
for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS:
raw_proto_content = pkgutil.get_data(
'tests.protoc_plugin.protos',
path.join(*proto_file_path_components[1:]))
massaged_proto_content = _massage_proto_content(raw_proto_content)
proto_file_name = path.join(self._proto_path,
*proto_file_path_components)
with open(proto_file_name, 'wb') as proto_file:
proto_file.write(massaged_proto_content)
self._proto_file_names.add(proto_file_name)
def tearDown(self):
shutil.rmtree(self._directory)
def _protoc(self):
args = [
'',
'--proto_path={}'.format(self._proto_path),
'--python_out={}'.format(self._python_out),
'--grpc_python_out=grpc_1_0:{}'.format(self._python_out),
] + list(self._proto_file_names)
protoc_exit_code = protoc.main(args)
self.assertEqual(0, protoc_exit_code)
_packagify(self._python_out)
with _system_path([self._python_out]):
self._payload_pb2 = importlib.import_module(_PAYLOAD_PB2)
self._requests_pb2 = importlib.import_module(_REQUESTS_PB2)
self._responses_pb2 = importlib.import_module(_RESPONSES_PB2)
self._service_pb2 = importlib.import_module(_SERVICE_PB2)
def testImportAttributes(self):
self._protoc()
# check that we can access the generated module and its members.
self.assertIsNotNone(
getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(getattr(self._service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2):
self._requests_pb2.SimpleRequest(response_size=13)
def testIncompleteServicer(self):
self._protoc()
with _CreateIncompleteService(self._service_pb2) as (_, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
try:
stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
except face.AbortionError as error:
self.assertEqual(interfaces.StatusCode.UNIMPLEMENTED,
error.code)
def testUnaryCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
expected_response = methods.UnaryCall(request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallFuture(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.UnaryCall(request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testUnaryCallFutureExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testUnaryCallFutureCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallFutureFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.fail():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
expected_responses = methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
with methods.pause():
responses = stub.StreamingOutputCall(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(responses)
def testStreamingOutputCallCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(face.CancellationError):
next(responses)
def testStreamingOutputCallFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
with methods.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
with self.assertRaises(face.RemoteError):
next(responses)
def testStreamingInputCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
response = stub.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFuture(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFutureExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
def testStreamingInputCallFutureCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
def testStreamingInputCallFutureFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.fail():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
responses = stub.FullDuplexCall(
_full_duplex_request_iterator(self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_responses = methods.FullDuplexCall(
_full_duplex_request_iterator(self._requests_pb2),
'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
self._protoc()
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
responses = stub.FullDuplexCall(request_iterator,
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(responses)
def testFullDuplexCallCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(face.CancellationError):
next(responses)
def testFullDuplexCallFailed(self):
self._protoc()
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.fail():
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(responses)
with self.assertRaises(face.RemoteError):
next(responses)
def testHalfDuplexCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
def half_duplex_request_iterator():
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.LONG_TIMEOUT)
expected_responses = methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for check in moves.zip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
self._protoc()
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with wait():
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(face.ExpirationError):
next(responses)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_firewall_manager
short_description: Manage firewall configurations about an ESXi host
description:
- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Firewall settings are applied to every ESXi host system in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Firewall settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
rules:
description:
- A list of Rule set which needs to be managed.
- Each member of list is rule set name and state to be set the rule.
- Both rule name and rule state are required parameters.
- Please see examples for more information.
default: []
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enable vvold rule set for all ESXi Host in given Cluster
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Enable vvold rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Manage multiple rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: CIMHttpServer
enabled: False
delegate_to: localhost
'''
RETURN = r'''
rule_set_state:
description:
- dict with hostname as key and dict with firewall rule set facts as value
returned: success
type: dict
sample: {
"rule_set_state": {
"localhost.localdomain": {
"CIMHttpServer": {
"current_state": true,
"desired_state": true,
"previous_state": true
},
"vvold": {
"current_state": true,
"desired_state": true,
"previous_state": true
}
}
}
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareFirewallManager(PyVmomi):
def __init__(self, module):
super(VmwareFirewallManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.firewall_facts = dict()
self.rule_options = self.module.params.get("rules")
self.gather_rule_set()
def gather_rule_set(self):
for host in self.hosts:
self.firewall_facts[host.name] = {}
firewall_system = host.configManager.firewallSystem
if firewall_system:
for rule_set_obj in firewall_system.firewallInfo.ruleset:
temp_rule_dict = dict()
temp_rule_dict['enabled'] = rule_set_obj.enabled
self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
def ensure(self):
"""
Function to ensure rule set configuration
"""
fw_change_list = []
results = dict(changed=False, rule_set_state=dict())
for host in self.hosts:
firewall_system = host.configManager.firewallSystem
if firewall_system is None:
continue
results['rule_set_state'][host.name] = dict()
for rule_option in self.rule_options:
rule_name = rule_option.get('name', None)
if rule_name is None:
self.module.fail_json(msg="Please specify rule.name for rule set"
" as it is required parameter.")
if rule_name not in self.firewall_facts[host.name]:
self.module.fail_json(msg="rule named '%s' wasn't found." % rule_name)
rule_enabled = rule_option.get('enabled', None)
if rule_enabled is None:
self.module.fail_json(msg="Please specify rules.enabled for rule set"
" %s as it is required parameter." % rule_name)
current_rule_state = self.firewall_facts[host.name][rule_name]['enabled']
if current_rule_state != rule_enabled:
try:
if rule_enabled:
firewall_system.EnableRuleset(id=rule_name)
else:
firewall_system.DisableRuleset(id=rule_name)
fw_change_list.append(True)
except vim.fault.NotFound as not_found:
self.module.fail_json(msg="Failed to enable rule set %s as"
" rule set id is unknown : %s" % (rule_name,
to_native(not_found.msg)))
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to enabled rule set %s as an internal"
" error happened while reconfiguring"
" rule set : %s" % (rule_name,
to_native(host_config_fault.msg)))
results['rule_set_state'][host.name][rule_name] = dict(current_state=rule_enabled,
previous_state=current_rule_state,
desired_state=rule_enabled,
)
if any(fw_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
rules=dict(type='list', default=list(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_firewall_manager = VmwareFirewallManager(module)
vmware_firewall_manager.ensure()
if __name__ == "__main__":
main()
|
from lettuce import world, step
from common import *
from terrain.steps import reload_the_page
from selenium.common.exceptions import (
InvalidElementStateException, WebDriverException)
from nose.tools import assert_in, assert_not_in, assert_equal, assert_not_equal # pylint: disable=E0611
@step(u'I am viewing the grading settings')
def view_grading_settings(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-grading a'
world.css_click(link_css)
@step(u'I add "([^"]*)" new grade')
def add_grade(step, many):
grade_css = '.new-grade-button'
for i in range(int(many)):
world.css_click(grade_css)
@step(u'I delete a grade')
def delete_grade(step):
#grade_css = 'li.grade-specific-bar > a.remove-button'
#range_css = '.grade-specific-bar'
#world.css_find(range_css)[1].mouseover()
#world.css_click(grade_css)
world.browser.execute_script('document.getElementsByClassName("remove-button")[0].click()')
@step(u'I see I now have "([^"]*)" grades$')
def view_grade_slider(step, how_many):
grade_slider_css = '.grade-specific-bar'
all_grades = world.css_find(grade_slider_css)
assert_equal(len(all_grades), int(how_many))
@step(u'I move a grading section')
def move_grade_slider(step):
moveable_css = '.ui-resizable-e'
f = world.css_find(moveable_css).first
f.action_chains.drag_and_drop_by_offset(f._element, 100, 0).perform()
@step(u'I see that the grade range has changed')
def confirm_change(step):
range_css = '.range'
all_ranges = world.css_find(range_css)
for i in range(len(all_ranges)):
assert_not_equal(world.css_html(range_css, index=i), '0-50')
@step(u'I change assignment type "([^"]*)" to "([^"]*)"$')
def change_assignment_name(step, old_name, new_name):
name_id = '#course-grading-assignment-name'
index = get_type_index(old_name)
f = world.css_find(name_id)[index]
assert_not_equal(index, -1)
for count in range(len(old_name)):
f._element.send_keys(Keys.END, Keys.BACK_SPACE)
f._element.send_keys(new_name)
@step(u'I go back to the main course page')
def main_course_page(step):
course_name = world.scenario_dict['COURSE'].display_name.replace(' ', '_')
main_page_link = '/course/{org}.{number}.{name}/branch/draft/block/{name}'.format(
org=world.scenario_dict['COURSE'].org,
number=world.scenario_dict['COURSE'].number,
name=course_name
)
world.visit(main_page_link)
assert_in('Course Outline', world.css_text('h1.page-header'))
@step(u'I do( not)? see the assignment name "([^"]*)"$')
def see_assignment_name(step, do_not, name):
assignment_menu_css = 'ul.menu > li > a'
# First assert that it is there, make take a bit to redraw
assert_true(
world.css_find(assignment_menu_css),
msg="Could not find assignment menu"
)
assignment_menu = world.css_find(assignment_menu_css)
allnames = [item.html for item in assignment_menu]
if do_not:
assert_not_in(name, allnames)
else:
assert_in(name, allnames)
@step(u'I delete the assignment type "([^"]*)"$')
def delete_assignment_type(step, to_delete):
delete_css = '.remove-grading-data'
world.css_click(delete_css, index=get_type_index(to_delete))
@step(u'I add a new assignment type "([^"]*)"$')
def add_assignment_type(step, new_name):
add_button_css = '.add-grading-data'
world.css_click(add_button_css)
name_id = '#course-grading-assignment-name'
new_assignment = world.css_find(name_id)[-1]
new_assignment._element.send_keys(new_name)
@step(u'I set the assignment weight to "([^"]*)"$')
def set_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
weight_field = world.css_find(weight_id)[-1]
old_weight = world.css_value(weight_id, -1)
for count in range(len(old_weight)):
weight_field._element.send_keys(Keys.END, Keys.BACK_SPACE)
weight_field._element.send_keys(weight)
@step(u'the assignment weight is displayed as "([^"]*)"$')
def verify_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
assert_equal(world.css_value(weight_id, -1), weight)
@step(u'I have populated the course')
def populate_course(step):
step.given('I have added a new section')
step.given('I have added a new subsection')
@step(u'I do not see the changes persisted on refresh$')
def changes_not_persisted(step):
reload_the_page(step)
name_id = '#course-grading-assignment-name'
assert_equal(world.css_value(name_id), 'Homework')
@step(u'I see the assignment type "(.*)"$')
def i_see_the_assignment_type(_step, name):
assignment_css = '#course-grading-assignment-name'
assignments = world.css_find(assignment_css)
types = [ele['value'] for ele in assignments]
assert_in(name, types)
@step(u'I change the highest grade range to "(.*)"$')
def change_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
grade.value = range_name
@step(u'I see the highest grade range is "(.*)"$')
def i_see_highest_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
assert_equal(grade.value, range_name)
@step(u'I cannot edit the "Fail" grade range$')
def cannot_edit_fail(_step):
range_css = 'span.letter-grade'
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
# try to change the grade range -- this should throw an exception
try:
ranges.last.value = 'Failure'
except (InvalidElementStateException):
pass # We should get this exception on failing to edit the element
# check to be sure that nothing has changed
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
@step(u'I change the grace period to "(.*)"$')
def i_change_grace_period(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
ele = world.css_find(grace_period_css).first
# Sometimes it takes a moment for the JavaScript
# to populate the field. If we don't wait for
# this to happen, then we can end up with
# an invalid value (e.g. "00:0048:00")
# which prevents us from saving.
assert_true(world.css_has_value(grace_period_css, "00:00"))
# Set the new grace period
ele.value = grace_period
@step(u'I see the grace period is "(.*)"$')
def the_grace_period_is(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
# The default value is 00:00
# so we need to wait for it to change
world.wait_for(
lambda _: world.css_has_value(grace_period_css, grace_period)
)
def get_type_index(name):
name_id = '#course-grading-assignment-name'
all_types = world.css_find(name_id)
for index in range(len(all_types)):
if world.css_value(name_id, index=index) == name:
return index
return -1
|
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.cached_session(use_gpu=True):
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def testWrongNumbers(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
@test_util.run_deprecated_v1
def testBasicVec(self):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testRandomVec(self):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testBasic3Tensor(self):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def _testRandom(self, dtype, expected_err_re=None):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re)
@test_util.run_deprecated_v1
def testRandomBool(self):
self._testRandom(np.bool)
@test_util.run_deprecated_v1
def testRandomInt32(self):
self._testRandom(np.int32)
@test_util.run_deprecated_v1
def testRandomInt64(self):
self._testRandom(np.int64)
@test_util.run_deprecated_v1
def testRandomFloat(self):
self._testRandom(np.float32)
@test_util.run_deprecated_v1
def testRandomDouble(self):
self._testRandom(np.float64)
@test_util.run_deprecated_v1
def testRandomComplex64(self):
self._testRandom(np.complex64)
@test_util.run_deprecated_v1
def testRandomComplex128(self):
self._testRandom(np.complex128)
@test_util.run_deprecated_v1
def testRandomUint8(self):
self._testRandom(np.uint8)
@test_util.run_deprecated_v1
def testRandomInt8(self):
self._testRandom(np.int8)
@test_util.run_deprecated_v1
def testRandomInt16(self):
self._testRandom(np.int16)
@test_util.run_deprecated_v1
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
@test_util.run_deprecated_v1
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session(config=benchmark.benchmark_config()) as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
x.initializer.run()
y.initializer.run()
c.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
import sys, imp
from . import model, ffiplatform
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
imp.acquire_lock()
try:
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise ffiplatform.VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
finally:
imp.release_lock()
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, ffiplatform.VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
return sorted(self.ffi._parser._declarations.items())
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise ffiplatform.VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = alloca((size_t)datasize);' % (tovar,))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructType):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname), fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise ffiplatform.VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise ffiplatform.VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
typedef unsigned char _Bool;
(PyCObject_AsVoidPtr(capsule))
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
((int(*)(PyObject *))_cffi_exports[1])
((int(*)(PyObject *))_cffi_exports[2])
((int(*)(PyObject *))_cffi_exports[3])
((int(*)(PyObject *))_cffi_exports[4])
((int(*)(PyObject *))_cffi_exports[5])
((unsigned int(*)(PyObject *))_cffi_exports[6])
((long long(*)(PyObject *))_cffi_exports[7])
((unsigned long long(*)(PyObject *))_cffi_exports[8])
((int(*)(PyObject *))_cffi_exports[9])
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
((void(*)(void))_cffi_exports[13])
((void(*)(void))_cffi_exports[14])
((PyObject *(*)(char))_cffi_exports[15])
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
((wchar_t(*)(PyObject *))_cffi_exports[19])
((PyObject *(*)(wchar_t))_cffi_exports[20])
((long double(*)(PyObject *))_cffi_exports[21])
((_Bool(*)(PyObject *))_cffi_exports[22])
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
/**********/
'''
|
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
from __future__ import unicode_literals
import functools
import re
import warnings
from importlib import import_module
from threading import local
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.utils import lru_cache, six
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import cached_property, lazy
from django.utils.http import RFC3986_SUBDELIMS, urlquote
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import get_language, override
_prefixes = local()
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function-based view
self._func_path = '.'.join([func.__module__, func.__name__])
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view, can_fail=False):
"""
Return a callable corresponding to lookup_view. This function is used
by both resolve() and reverse(), so can_fail allows the caller to choose
between returning the input as is and raising an exception when the input
string can't be interpreted as an import path.
If lookup_view is already a callable, return it.
If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it.
If lookup_view is some other kind of string and can_fail is True, the string
is returned as is. If can_fail is False, an exception is raised (either
ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
if not isinstance(lookup_view, six.string_types):
raise ViewDoesNotExist(
"'%s' is not a callable or a dot-notation path" % lookup_view
)
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
if can_fail:
return lookup_view
else:
raise ImportError(
"Could not import '%s'. The path must be fully qualified." %
lookup_view)
try:
mod = import_module(mod_name)
except ImportError:
if can_fail:
return lookup_view
else:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent module %s does not exist." %
(lookup_view, mod_name))
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
if can_fail:
return lookup_view
else:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s." %
(lookup_view, mod_name))
else:
if not callable(view_func):
# For backwards compatibility this is raised regardless of can_fail
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable." %
(mod_name, func_name))
return view_func
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@property
def callback(self):
if self._callback is not None:
return self._callback
self._callback = get_callable(self._callback_str)
return self._callback
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if hasattr(pattern, '_callback_str'):
self._callback_strs.add(pattern._callback_str)
elif hasattr(pattern, '_callback'):
callback = pattern._callback
if isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
lookup_str = callback.__module__ + "." + callback.__class__.__name__
else:
lookup_str = callback.__module__ + "." + callback.__name__
self._callback_strs.add(lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults, **pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = force_text(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = match.groups() + sub_match.args
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, six.string_types):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included urlconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since django.urls imports this file
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = {k: force_text(v) for (k, v) in kwargs.items()}
if not self._populated:
self._populate()
original_lookup = lookup_view
try:
if self._is_callback(lookup_view):
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError) as e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
else:
if not callable(original_lookup) and callable(lookup_view):
warnings.warn(
'Reversing by dotted path is deprecated (%s).' % original_lookup,
RemovedInDjango110Warning, stacklevel=3
)
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, text_args))
else:
if (set(kwargs.keys()) | set(defaults.keys()) != set(params) |
set(defaults.keys())):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs, re.UNICODE):
# safe characters from `pchar` definition of RFC 3986
url = urlquote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + str('/~:@'))
# Don't allow construction of scheme relative urls.
if url.startswith('//'):
url = '/%%2F%s' % url[2:]
return url
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found. %d pattern(s) tried: %s" %
(lookup_view_s, args, kwargs, len(patterns), patterns))
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
prefix = get_script_prefix()
if not isinstance(viewname, six.string_types):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
if current_app:
current_path = current_app.split(':')
current_path.reverse()
else:
current_path = None
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
current_ns = current_path.pop() if current_path else None
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_ns and current_ns in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_ns
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
if ns != current_ns:
current_path = None
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
key)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return force_text(iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs)))
reverse_lazy = lazy(reverse, six.text_type)
def clear_url_caches():
get_callable.cache_clear()
get_resolver.cache_clear()
get_ns_resolver.cache_clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", '/')
def clear_script_prefix():
"""
Unsets the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
def is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
def translate_url(url, lang_code):
"""
Given a URL (absolute or relative), try to get its translated version in
the `lang_code` language (either by i18n_patterns or by translated regex).
Return the original URL if no translated version is found.
"""
parsed = urlsplit(url)
try:
match = resolve(parsed.path)
except Resolver404:
pass
else:
to_be_reversed = "%s:%s" % (match.namespace, match.url_name) if match.namespace else match.url_name
with override(lang_code):
try:
url = reverse(to_be_reversed, args=match.args, kwargs=match.kwargs)
except NoReverseMatch:
pass
else:
url = urlunsplit((parsed.scheme, parsed.netloc, url, parsed.query, parsed.fragment))
return url
|
"""A simple RESTful status framework on Google App Engine
This app's API should be reasonably clean and easily targeted by other
clients, like a Flex app or a desktop program.
"""
__author__ = 'Kyle Conroy'
import string
import re
import os
import cgi
import logging
from datetime import timedelta
from datetime import date
from datetime import datetime
from datetime import time
from dateutil.parser import parse
from google.appengine.api import memcache
from google.appengine.api import datastore_errors
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from handlers import restful
from time import mktime
from utils import authorized
from utils import slugify
from models import List, Status, Event, Service, Image
from wsgiref.handlers import format_date_time
def invalidate_cache():
all_pages = memcache.get("__all_pages__")
if all_pages is not None:
for page,d in all_pages.items():
if not memcache.delete(page):
logging.error("Memcache delete failed on %s", page)
if not memcache.delete("__all_pages__"):
logging.error("Memcache delete failed on __all_pages__")
taskqueue.add(url='/', method="GET")
def aware_to_naive(d):
"""Convert an aware date to an naive date, in UTC"""
offset = d.utcoffset()
if offset:
d = d.replace(tzinfo=None)
d = d - offset
return d
class NotFoundHandler(restful.Controller):
def get(self):
self.error(404, "Can't find resource")
class ListsListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = List.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
data = {"lists": data}
self.json(data)
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
if not name or not description:
self.error(400, "Bad Data: Name: %s, Description: %s" \
% (name, description))
return
slug = slugify.slugify(name)
existing_s = List.get_by_slug(slug)
if existing_s:
self.error(404, "A list with this name already exists")
return
l = List(name=name, slug=slug, description=description)
l.put()
invalidate_cache()
self.response.set_status(201)
self.json(l.rest(self.base_url(version)))
class ListInstanceHandler(restful.Controller):
def get(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "List %s does not exist" % list_slug)
return
self.json(list.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "Service %s does not exist" % list_slug)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
if description:
list.description = description
if name:
list.name = name
if name or description:
invalidate_cache()
list.put()
self.json(list.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "List %s not found" % list_slug)
return
query = Service.all()
query.filter('list =', list)
if query:
for s in query:
s.list = None
s.put()
invalidate_cache()
list.delete()
self.json(list.rest(self.base_url(version)))
class ServicesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = Service.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
data = {"services": data}
self.json(data)
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
slist = self.request.get('list', default_value=None)
l = None
if slist:
l = List.all().filter("slug =", slist).get()
if not name:
self.error(400, "Bad name: %s" % name)
return
if not description:
self.error(400, "Bad description: %s" % description)
return
if slist and not l:
self.error(400, "Bad list slug: %s" % slist)
return
slug = slugify.slugify(name)
existing_s = Service.get_by_slug(slug)
if existing_s:
self.error(404, "A sevice with this name already exists")
return
s = Service(name=name, slug=slug, description=description, list=l)
s.put()
invalidate_cache()
self.response.set_status(201)
self.json(s.rest(self.base_url(version)))
class ServiceInstanceHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s does not exist" % service_slug)
return
self.json(service.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s does not exist" % service_slug)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
list = self.request.get('list', default_value=None)
if description:
service.description = description
if name:
service.name = name
if list:
l = List.all().filter("slug = ", list).get()
if l is None:
self.error(400, "Can't find list with slug %s" % list)
return
service.list = l
if "" == list:
service.list = None
list = "removed"
if name or description or list:
invalidate_cache()
service.put()
self.json(service.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
query = Event.all()
query.filter('service =', service)
if query:
for e in query:
e.delete()
invalidate_cache()
service.delete()
self.json(service.rest(self.base_url(version)))
class EventsListHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
start = self.request.get('start', default_value=None)
end = self.request.get('end', default_value=None)
query = Event.all()
query.filter('service =', service)
if start:
try:
_start = aware_to_naive(parse(start))
query.filter("start >= ", _start)
except:
self.error(400, "Invalid Date: %s" % start)
return
if end:
try:
_end = aware_to_naive(parse(end))
query.filter("start <=", _end)
except:
self.error(400, "Invalid Date: %s" % end)
return
query.order('-start')
data = [s.rest(self.base_url(version)) for s in query]
self.json({"events": data})
@authorized.api("admin")
def post(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status_slug = self.request.get("status", default_value=None)
message = self.request.get("message", default_value=None)
informational = self.request.get("informational", default_value=None)
if not message:
self.error(400, "Event message is required")
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
if not status_slug:
event = service.current_event()
if event:
status = event.status
else:
status = Status.get_default()
else:
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "Status %s not found" % status_slug)
return
e = Event(status=status, service=service, message=message)
e.informational = informational and informational == "true"
e.put()
# Queue up a task that calls the Twitter API to make a tweet.
if self.request.get('tweet'):
logging.info('Attempting to post a tweet for the latest event via async GAE task queue.')
taskqueue.add(url='/admin/tweet', params={'service_name': service.name, 'status_name': status.name, 'message': message})
invalidate_cache()
self.json(e.rest(self.base_url(version)))
class CurrentEventHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
event = service.current_event()
if not event:
self.error(404, "No current event for Service %s" % service_slug)
return
self.json(event.rest(self.base_url(version)))
class EventInstanceHandler(restful.Controller):
def get(self, version, service_slug, sid):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
try:
event = Event.get(db.Key(sid))
except datastore_errors.BadKeyError:
self.error(404, "Event %s not found" % sid)
return
if not event or service.key() != event.service.key():
self.error(404, "No event for Service %s with sid = %s" \
% (service_slug, sid))
return
self.json(event.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, service_slug, sid):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
try:
event = Event.get(db.Key(sid))
except datastore_errors.BadKeyError:
self.error(404, "Event %s not found" % sid)
return
if not event or service.key() != event.service.key():
self.error(404, "No event for Service %s with sid = %s" \
% (service_slug, sid))
return
event.delete()
invalidate_cache()
# Why not JSON?
self.success(event.rest(self.base_url(version)))
class StatusesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = Status.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
self.json({"statuses": data})
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
image_slug = self.request.get('image', default_value=None)
default = self.request.get('default', default_value="false")
if default not in ["true", "false"]:
self.error(400, "Default must be true or false")
return
if not name or not description or not image_slug:
self.error(400, "Bad Data")
return
slug = slugify.slugify(name)
status = Status.get_by_slug(slug)
image = Image.get_by_slug(image_slug)
if status is not None:
self.error(400, "A Status with the slug %s already exists" % slug)
return
if image is None:
msg = "An Image with the slug %s doesn't exist" % image_slug
self.error(400, msg)
return
# Reset default status
if default == "true":
for stat in Status.all().filter("default", True):
stat.default = False
stat.put()
default = default == "true"
status = Status(name=name, slug=slug, description=description,
image=image.path, default=default)
status.put()
invalidate_cache()
self.response.set_status(201)
self.json(status.rest(self.base_url(version)))
class StatusInstanceHandler(restful.Controller):
def get(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "No status with the slug %s found" % status_slug)
return
self.json(status.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "No status with the slug %s found" % status_slug)
return
name = self.request.get('name', default_value=None)
image_slug = self.request.get('image', default_value=None)
image = None
default = self.request.get('default', default_value=None)
description = self.request.get('description', default_value=None)
if image_slug is not None:
image = Image.get_by_slug(image_slug)
if image is None:
self.error(400, "An Image with the "
"slug %s doesn't exist" % image_slug)
return
status.image = image.path
if description is not None:
status.description = description
if default is not None and default in ["false", "true"]:
# Reset default status
if default == "true":
for stat in Status.all().filter("default", True):
stat.default = False
stat.put()
status.default = default == "true"
if name is not None:
status.name = name
if description or name or image or default:
status.put()
invalidate_cache()
self.json(status.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "Status %s not found" % status_slug)
return
# We may want to think more about this
events = Event.all().filter('status =', status).fetch(1000)
for event in events:
event.delete()
status.delete()
self.json(status.rest(self.base_url(version)))
class LevelListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
self.json({"levels": ["NORMAL", "WARNING", "ERROR", "CRITICAL"]})
class ImagesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
host = self.request.headers.get('host', 'nohost')
images = []
for img in Image.all().fetch(1000):
image = {
"url": "http://" + host + "/images/" + img.path,
"icon_set": img.icon_set,
"name": img.slug,
}
images.append(image)
self.json({"images": images})
|
"""Precompute the polynomials for the asymptotic expansion of the
generalized exponential integral.
Sources
-------
[1] NIST, Digital Library of Mathematical Functions,
http://dlmf.nist.gov/8.20#ii
"""
from __future__ import division, print_function, absolute_import
import os
import warnings
try:
# Can remove when sympy #11255 is resolved; see
# https://github.com/sympy/sympy/issues/11255
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import sympy
from sympy import Poly
x = sympy.symbols('x')
except ImportError:
pass
def generate_A(K):
A = [Poly(1, x)]
for k in range(K):
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
return A
WARNING = """\
/* This file was automatically generated by _precompute/expn_asy.py.
* Do not edit it manually!
*/
"""
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open(fn + '.new', 'w') as f:
f.write(WARNING)
f.write("#define nA {}\n".format(len(A)))
for k, Ak in enumerate(A):
tmp = ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
f.write("double A{}[] = {{{}}};\n".format(k, tmp))
tmp = ", ".join(["A{}".format(k) for k in range(K + 1)])
f.write("double *A[] = {{{}}};\n".format(tmp))
tmp = ", ".join([str(Ak.degree()) for Ak in A])
f.write("int Adegs[] = {{{}}};\n".format(tmp))
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
|
'''
test_qgscomposerlabel.py
--------------------------------------
Date : Oct 2012
Copyright : (C) 2012 by Dr. Hugo Mercier
email : hugo dot mercier at oslandia dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
import qgis
import unittest
from utilities import getQgisTestApp, unitTestDataPath
from PyQt4.QtCore import QFileInfo, QDate, QDateTime
from qgis.core import QgsVectorLayer, QgsMapLayerRegistry, QgsMapRenderer, QgsComposition, QgsComposerLabel, QgsFeatureRequest, QgsFeature, QgsExpression
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsComposerLabel(unittest.TestCase):
def testCase(self):
TEST_DATA_DIR = unitTestDataPath()
vectorFileInfo = QFileInfo( TEST_DATA_DIR + "/france_parts.shp")
mVectorLayer = QgsVectorLayer( vectorFileInfo.filePath(), vectorFileInfo.completeBaseName(), "ogr" )
QgsMapLayerRegistry.instance().addMapLayers( [mVectorLayer] )
# create composition with composer map
mMapRenderer = QgsMapRenderer()
layerStringList = []
layerStringList.append( mVectorLayer.id() )
mMapRenderer.setLayerSet( layerStringList )
mMapRenderer.setProjectionsEnabled( False )
mComposition = QgsComposition( mMapRenderer )
mComposition.setPaperSize( 297, 210 )
mLabel = QgsComposerLabel( mComposition )
mComposition.addComposerLabel( mLabel )
self.evaluation_test( mComposition, mLabel )
self.feature_evaluation_test( mComposition, mLabel, mVectorLayer )
self.page_evaluation_test( mComposition, mLabel, mVectorLayer )
def evaluation_test( self, mComposition, mLabel ):
# $CURRENT_DATE evaluation
mLabel.setText( "__$CURRENT_DATE__" )
assert mLabel.displayText() == ( "__" + QDate.currentDate().toString() + "__" )
# $CURRENT_DATE() evaluation
mLabel.setText( "__$CURRENT_DATE(dd)(ok)__" )
expected = "__" + QDateTime.currentDateTime().toString( "dd" ) + "(ok)__"
assert mLabel.displayText() == expected
# $CURRENT_DATE() evaluation (inside an expression)
mLabel.setText( "__[%$CURRENT_DATE(dd) + 1%](ok)__" )
dd = QDate.currentDate().day()
expected = "__%d(ok)__" % (dd+1)
assert mLabel.displayText() == expected
# expression evaluation (without associated feature)
mLabel.setText( "__[%\"NAME_1\"%][%21*2%]__" )
assert mLabel.displayText() == "__[NAME_1]42__"
def feature_evaluation_test( self, mComposition, mLabel, mVectorLayer ):
provider = mVectorLayer.dataProvider()
fi = provider.getFeatures( QgsFeatureRequest() )
feat = QgsFeature()
fi.nextFeature( feat )
mLabel.setExpressionContext( feat, mVectorLayer )
mLabel.setText( "[%\"NAME_1\"||'_ok'%]")
assert mLabel.displayText() == "Basse-Normandie_ok"
fi.nextFeature( feat )
mLabel.setExpressionContext( feat, mVectorLayer )
assert mLabel.displayText() == "Bretagne_ok"
# evaluation with local variables
locs = { "$test" : "OK" }
mLabel.setExpressionContext( feat, mVectorLayer, locs )
mLabel.setText( "[%\"NAME_1\"||$test%]" )
assert mLabel.displayText() == "BretagneOK"
def page_evaluation_test( self, mComposition, mLabel, mVectorLayer ):
mComposition.setNumPages( 2 )
mLabel.setText( "[%$page||'/'||$numpages%]" )
assert mLabel.displayText() == "1/2"
# move the the second page and re-evaluate
mLabel.setItemPosition( 0, 320 )
assert mLabel.displayText() == "2/2"
# use setSpecialColumn
mLabel.setText( "[%$var1 + 1%]" )
QgsExpression.setSpecialColumn( "$var1", 41 )
assert mLabel.displayText() == "42"
QgsExpression.setSpecialColumn( "$var1", 99 )
assert mLabel.displayText() == "100"
QgsExpression.unsetSpecialColumn( "$var1" )
assert mLabel.displayText() == "[%$var1 + 1%]"
if __name__ == '__main__':
unittest.main()
|
from __future__ import print_function
import lammps
import ctypes
import traceback
import numpy as np
class LAMMPSFix(object):
def __init__(self, ptr, group_name="all"):
self.lmp = lammps.lammps(ptr=ptr)
self.group_name = group_name
class LAMMPSFixMove(LAMMPSFix):
def __init__(self, ptr, group_name="all"):
super(LAMMPSFixMove, self).__init__(ptr, group_name)
def init(self):
pass
def initial_integrate(self, vflag):
pass
def final_integrate(self):
pass
def initial_integrate_respa(self, vflag, ilevel, iloop):
pass
def final_integrate_respa(self, ilevel, iloop):
pass
def reset_dt(self):
pass
class NVE(LAMMPSFixMove):
""" Python implementation of fix/nve """
def __init__(self, ptr, group_name="all"):
super(NVE, self).__init__(ptr)
assert(self.group_name == "all")
def init(self):
dt = self.lmp.extract_global("dt", 1)
ftm2v = self.lmp.extract_global("ftm2v", 1)
self.ntypes = self.lmp.extract_global("ntypes", 0)
self.dtv = dt
self.dtf = 0.5 * dt * ftm2v
def initial_integrate(self, vflag):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
x = self.lmp.numpy.extract_atom_darray("x", nlocal, dim=3)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
for i in range(x.shape[0]):
dtfm = self.dtf / mass[int(atype[i])]
v[i,:]+= dtfm * f[i,:]
x[i,:] += self.dtv * v[i,:]
def final_integrate(self):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
for i in range(v.shape[0]):
dtfm = self.dtf / mass[int(atype[i])]
v[i,:] += dtfm * f[i,:]
class NVE_Opt(LAMMPSFixMove):
""" Performance-optimized Python implementation of fix/nve """
def __init__(self, ptr, group_name="all"):
super(NVE_Opt, self).__init__(ptr)
assert(self.group_name == "all")
def init(self):
dt = self.lmp.extract_global("dt", 1)
ftm2v = self.lmp.extract_global("ftm2v", 1)
self.ntypes = self.lmp.extract_global("ntypes", 0)
self.dtv = dt
self.dtf = 0.5 * dt * ftm2v
self.mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
def initial_integrate(self, vflag):
nlocal = self.lmp.extract_global("nlocal", 0)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
x = self.lmp.numpy.extract_atom_darray("x", nlocal, dim=3)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
dtf = self.dtf
dtv = self.dtv
mass = self.mass
dtfm = dtf / np.take(mass, atype)
dtfm.reshape((nlocal, 1))
for d in range(x.shape[1]):
v[:,d] += dtfm[:,0] * f[:,d]
x[:,d] += dtv * v[:,d]
def final_integrate(self):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
dtf = self.dtf
dtv = self.dtv
mass = self.mass
dtfm = dtf / np.take(mass, atype)
dtfm.reshape((nlocal, 1))
for d in range(v.shape[1]):
v[:,d] += dtfm[:,0] * f[:,d]
|
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy.orm import exc as orm_exc
from neutron.common import exceptions as n_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.ryu.db import models_v2 as ryu_models_v2
LOG = logging.getLogger(__name__)
def network_all_tenant_list():
session = db.get_session()
return session.query(models_v2.Network).all()
def get_port_from_device(port_id):
LOG.debug(_("get_port_from_device() called:port_id=%s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id == port_id)
port_and_sgs = query.all()
if not port_and_sgs:
return None
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict[ext_sg.SECURITYGROUPS] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']]
return port_dict
class TunnelKey(object):
# VLAN: 12 bits
# GRE, VXLAN: 24bits
# TODO(yamahata): STT: 64bits
_KEY_MIN_HARD = 1
_KEY_MAX_HARD = 0xffffffff
def __init__(self, key_min=_KEY_MIN_HARD, key_max=_KEY_MAX_HARD):
self.key_min = key_min
self.key_max = key_max
if (key_min < self._KEY_MIN_HARD or key_max > self._KEY_MAX_HARD or
key_min > key_max):
raise ValueError(_('Invalid tunnel key options '
'tunnel_key_min: %(key_min)d '
'tunnel_key_max: %(key_max)d. '
'Using default value') % {'key_min': key_min,
'key_max': key_max})
def _last_key(self, session):
try:
return session.query(ryu_models_v2.TunnelKeyLast).one()
except orm_exc.MultipleResultsFound:
max_key = session.query(
func.max(ryu_models_v2.TunnelKeyLast.last_key))
if max_key > self.key_max:
max_key = self.key_min
session.query(ryu_models_v2.TunnelKeyLast).delete()
last_key = ryu_models_v2.TunnelKeyLast(last_key=max_key)
except orm_exc.NoResultFound:
last_key = ryu_models_v2.TunnelKeyLast(last_key=self.key_min)
session.add(last_key)
session.flush()
return session.query(ryu_models_v2.TunnelKeyLast).one()
def _find_key(self, session, last_key):
"""Try to find unused tunnel key.
Trying to find unused tunnel key in TunnelKey table starting
from last_key + 1.
When all keys are used, raise sqlalchemy.orm.exc.NoResultFound
"""
# key 0 is used for special meanings. So don't allocate 0.
# sqlite doesn't support
# '(select order by limit) union all (select order by limit) '
# 'order by limit'
# So do it manually
# new_key = session.query("new_key").from_statement(
# # If last_key + 1 isn't used, it's the result
# 'SELECT new_key '
# 'FROM (SELECT :last_key + 1 AS new_key) q1 '
# 'WHERE NOT EXISTS '
# '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) '
#
# 'UNION ALL '
#
# # if last_key + 1 used,
# # find the least unused key from last_key + 1
# '(SELECT t.tunnel_key + 1 AS new_key '
# 'FROM tunnelkeys t '
# 'WHERE NOT EXISTS '
# '(SELECT 1 FROM tunnelkeys ti '
# ' WHERE ti.tunnel_key = t.tunnel_key + 1) '
# 'AND t.tunnel_key >= :last_key '
# 'ORDER BY new_key LIMIT 1) '
#
# 'ORDER BY new_key LIMIT 1'
# ).params(last_key=last_key).one()
try:
new_key = session.query("new_key").from_statement(
# If last_key + 1 isn't used, it's the result
'SELECT new_key '
'FROM (SELECT :last_key + 1 AS new_key) q1 '
'WHERE NOT EXISTS '
'(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) '
).params(last_key=last_key).one()
except orm_exc.NoResultFound:
new_key = session.query("new_key").from_statement(
# if last_key + 1 used,
# find the least unused key from last_key + 1
'(SELECT t.tunnel_key + 1 AS new_key '
'FROM tunnelkeys t '
'WHERE NOT EXISTS '
'(SELECT 1 FROM tunnelkeys ti '
' WHERE ti.tunnel_key = t.tunnel_key + 1) '
'AND t.tunnel_key >= :last_key '
'ORDER BY new_key LIMIT 1) '
).params(last_key=last_key).one()
new_key = new_key[0] # the result is tuple.
LOG.debug(_("last_key %(last_key)s new_key %(new_key)s"),
{'last_key': last_key, 'new_key': new_key})
if new_key > self.key_max:
LOG.debug(_("No key found"))
raise orm_exc.NoResultFound()
return new_key
def _allocate(self, session, network_id):
last_key = self._last_key(session)
try:
new_key = self._find_key(session, last_key.last_key)
except orm_exc.NoResultFound:
new_key = self._find_key(session, self.key_min)
tunnel_key = ryu_models_v2.TunnelKey(network_id=network_id,
tunnel_key=new_key)
last_key.last_key = new_key
session.add(tunnel_key)
return new_key
_TRANSACTION_RETRY_MAX = 16
def allocate(self, session, network_id):
count = 0
while True:
session.begin(subtransactions=True)
try:
new_key = self._allocate(session, network_id)
session.commit()
break
except sa_exc.SQLAlchemyError:
session.rollback()
count += 1
if count > self._TRANSACTION_RETRY_MAX:
# if this happens too often, increase _TRANSACTION_RETRY_MAX
LOG.warn(_("Transaction retry exhausted (%d). "
"Abandoned tunnel key allocation."), count)
raise n_exc.ResourceExhausted()
return new_key
def delete(self, session, network_id):
session.query(ryu_models_v2.TunnelKey).filter_by(
network_id=network_id).delete()
session.flush()
def all_list(self):
session = db.get_session()
return session.query(ryu_models_v2.TunnelKey).all()
def set_port_status(session, port_id, status):
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except orm_exc.NoResultFound:
raise n_exc.PortNotFound(port_id=port_id)
|
__all__ = (
'Mock',
'MagicMock',
'mocksignature',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
)
__version__ = '0.8.0'
import pprint
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
from functools import wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
next
except NameError:
def next(obj):
return obj.next()
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
_super = super
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
FILTER_DIR = True
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst):
if inspect is None:
raise ImportError('inspect module not available')
if inspect.isclass(func):
func = func.__init__
# will have a self arg
skipfirst = True
elif not (inspect.ismethod(func) or inspect.isfunction(func)):
func = func.__call__
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
# instance methods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
_msg = ("_mock_ is a reserved argument name, can't mock signatures using "
"_mock_")
assert '_mock_' not in regargs, _msg
if varargs is not None:
assert '_mock_' not in varargs, _msg
if varkwargs is not None:
assert '_mock_' not in varkwargs, _msg
if skipfirst:
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _getsignature2(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature2(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original. This is effectively mocksignature2.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature2(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
context = {'_mock_': mock}
checksig = eval(src, context)
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'checksig': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
checksig(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def mocksignature(func, mock=None, skipfirst=False):
"""
mocksignature(func, mock=None, skipfirst=False)
Create a new function with the same signature as `func` that delegates
to `mock`. If `skipfirst` is True the first argument is skipped, useful
for methods where `self` needs to be omitted from the new function.
If you don't pass in a `mock` then one will be created for you.
The mock is set as the `mock` attribute of the returned function for easy
access.
Functions returned by `mocksignature` have many of the same attributes
and assert methods as a mock object.
`mocksignature` can also be used with classes. It copies the signature of
the `__init__` method.
When used with callable objects (instances) it copies the signature of the
`__call__` method.
"""
if mock is None:
mock = Mock()
signature, func = _getsignature(func, skipfirst)
src = "lambda %(signature)s: _mock_(%(signature)s)" % {
'signature': signature
}
funcopy = eval(src, dict(_mock_=mock))
_copy_func_details(func, funcopy)
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_signature = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if not inPy3k:
ClassTypes = (type, ClassType)
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _mock_signature_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_signature
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_signature
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_signature'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_signature is not None:
ret = self._mock_signature.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_signature is not None:
self._mock_signature.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _mock_signature_property('called')
call_count = _mock_signature_property('call_count')
call_args = _mock_signature_property('call_args')
call_args_list = _mock_signature_property('call_args_list')
mock_calls = _mock_signature_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_signature
if sig is None:
return self._mock_side_effect
return sig.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
sig = self._mock_signature
if sig is None:
self._mock_side_effect = value
else:
sig.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members.
XXXX
"""
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
real = lambda *args, **kw: original(self, *args, **kw)
value = mocksignature(value, real, skipfirst=True)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
return object.__delattr__(self, name)
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
return next(effect)
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None
then calling the Mock will pass the call through to the wrapped object
(returning the real result and ignoring `return_value`). Attribute
access on the mock will return a Mock object that wraps the corresponding
attribute of the wrapped object (so attempting to access an attribute that
doesn't exist will raise an `AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
def __init__(
self, getter, attribute, new, spec, create,
mocksignature, spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not False:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.mocksignature = mocksignature
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.mocksignature, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with Python 2.4)
extra_args = []
entered_patchers = []
# can't use try...except...finally because of Python 2.4
# compatibility
try:
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__()
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
original, local = self.get_original()
if new is DEFAULT and autospec is False:
inherit = False
if spec_set == True:
spec_set = original
elif spec == True:
# set spec to the object we are replacing
spec = original
if (spec or spec_set) is not None:
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif (spec or spec_set) is not None:
if not _callable(spec or spec_set):
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
if (not _is_list(spec or spec_set) and not
_instance_callable(spec or spec_set)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not False:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool? mocksignature should also not be used. Should we
# check this?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
if self.mocksignature:
new_attr = mocksignature(original, new)
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *_):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__()
start = __enter__
stop = __exit__
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, mocksignature=False, spec_set=None, autospec=False,
new_callable=None, **kwargs
):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
mocksignature=False, spec_set=None, autospec=False,
new_callable=None, **kwargs)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `mocksignature`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create, mocksignature,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False,
mocksignature=False, spec_set=None, autospec=False,
new_callable=None, **kwargs
):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`, `mocksignature`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, mocksignature, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, mocksignature, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
mocksignature=False, spec_set=None, autospec=False,
new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
(specified in the form `'package.module.ClassName'`) is patched
with a `new` object. When the function/with statement exits the patch is
undone.
The `target` is imported and the specified attribute patched with the new
object, so it must be importable from the environment you are calling the
decorator from. The target is imported when the decorated function is
executed, not at decoration time.
If `new` is omitted, then a new `MagicMock` is created and passed in as an
extra argument to the decorated function.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being mocked
will have their arguments checked and will raise a `TypeError` if they are
called with the wrong signature (similar to `mocksignature`). For mocks
replacing a class, their return value (the 'instance') will have the same
spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
If `mocksignature` is True then the patch will be done with a function
created by mocking the one being replaced. If the object being replaced is
a class then the signature of `__init__` will be copied. If the object
being replaced is a callable object then the signature of `__call__` will
be copied.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create, mocksignature,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex truediv rtruediv '
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, basestring):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, basestring):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, basestring):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, basestring):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked in a
similar way to `mocksignature` to check that they are called with the
correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, ClassTypes)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
# XXXX could give a name to the return_value mock?
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
# allow a mock to actually be a function from mocksignature
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions on being fetched
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
original = getattr(spec, entry)
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with mocksignature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
# can't use type because of old style classes
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return is_type
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# in Python 2, _sre.SRE_Pattern objects have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_doc',
'func_globals',
'func_name',
])
|
try:
import ovirtsdk4 as sdk
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.ovirt import *
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_host_pm
short_description: Module to manage power management of hosts in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage power management of hosts in oVirt."
options:
name:
description:
- "Name of the the host to manage."
required: true
aliases: ['host']
state:
description:
- "Should the host be present/absent."
choices: ['present', 'absent']
default: present
address:
description:
- "Address of the power management interface."
username:
description:
- "Username to be used to connect to power management interface."
password:
description:
- "Password of the user specified in C(username) parameter."
type:
description:
- "Type of the power management. oVirt predefined values are I(drac5), I(ipmilan), I(rsa),
I(bladecenter), I(alom), I(apc), I(apc_snmp), I(eps), I(wti), I(rsb), I(cisco_ucs),
I(drac7), I(hpblade), I(ilo), I(ilo2), I(ilo3), I(ilo4), I(ilo_ssh),
but user can have defined custom type."
port:
description:
- "Power management interface port."
slot:
description:
- "Power management slot."
options:
description:
- "Dictionary of additional fence agent options."
- "Additional information about options can be found at U(https://fedorahosted.org/cluster/wiki/FenceArguments)."
encrypt_options:
description:
- "If (true) options will be encrypted when send to agent."
aliases: ['encrypt']
order:
description:
- "Integer value specifying, by default it's added at the end."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
- ovirt_host_pm:
name: myhost
address: 1.2.3.4
options:
myoption1: x
myoption2: y
username: admin
password: admin
port: 3333
type: ipmilan
- ovirt_host_pm:
state: absent
name: myhost
address: 1.2.3.4
type: ipmilan
'''
RETURN = '''
id:
description: ID of the agent which is managed
returned: On success if agent is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
agent:
description: "Dictionary of all the agent attributes. Agent attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/agent."
returned: On success if agent is found.
'''
class HostModule(BaseModule):
def build_entity(self):
return otypes.Host(
power_management=otypes.PowerManagement(
enabled=True,
),
)
def update_check(self, entity):
return equal(True, entity.power_management.enabled)
class HostPmModule(BaseModule):
def build_entity(self):
return otypes.Agent(
address=self._module.params['address'],
encrypt_options=self._module.params['encrypt_options'],
options=[
otypes.Option(
name=name,
value=value,
) for name, value in self._module.params['options'].items()
] if self._module.params['options'] else None,
password=self._module.params['password'],
port=self._module.params['port'],
type=self._module.params['type'],
username=self._module.params['username'],
order=self._module.params.get('order', 100),
)
def update_check(self, entity):
return (
equal(self._module.params.get('address'), entity.address) and
equal(self._module.params.get('encrypt_options'), entity.encrypt_options) and
equal(self._module.params.get('password'), entity.password) and
equal(self._module.params.get('username'), entity.username) and
equal(self._module.params.get('port'), entity.port) and
equal(self._module.params.get('type'), entity.type)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True, aliases=['host']),
address=dict(default=None),
username=dict(default=None),
password=dict(default=None),
type=dict(default=None),
port=dict(default=None, type='int'),
slot=dict(default=None),
options=dict(default=None, type='dict'),
encrypt_options=dict(default=None, type='bool', aliases=['encrypt']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
hosts_service = connection.system_service().hosts_service()
host = search_by_name(hosts_service, module.params['name'])
fence_agents_service = hosts_service.host_service(host.id).fence_agents_service()
host_pm_module = HostPmModule(
connection=connection,
module=module,
service=fence_agents_service,
)
host_module = HostModule(
connection=connection,
module=module,
service=hosts_service,
)
state = module.params['state']
if state == 'present':
agent = host_pm_module.search_entity(
search_params={
'address': module.params['address'],
'type': module.params['type'],
}
)
ret = host_pm_module.create(entity=agent)
# Enable Power Management, if it's not enabled:
host_module.create(entity=host)
elif state == 'absent':
agent = host_pm_module.search_entity(
search_params={
'address': module.params['address'],
'type': module.params['type'],
}
)
ret = host_pm_module.remove(entity=agent)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e))
finally:
connection.close(logout=False)
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
|
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
from __future__ import print_function
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import logging
import socket
import sys
import webbrowser
from six.moves import BaseHTTPServer
from six.moves import urllib
from oauth2client import client
from oauth2client import util
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
def _CreateArgumentParser():
try:
import argparse
except ImportError:
return None
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
parser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
parser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level of detail.')
return parser
argparser = _CreateArgumentParser()
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(urllib.parse.parse_qsl(query))
self.server.query_params = query
self.wfile.write("<html><head><title>Authentication Status</title></head>")
self.wfile.write("<body><p>The authentication flow has completed.</p>")
self.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the run() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_name: Host name to use when running a local web server
to handle redirects during OAuth authorization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
The tools module defines an ArgumentParser the already contains the flag
definitions that run() requires. You can pass that ArgumentParser to your
ArgumentParser constructor:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
flags: argparse.ArgumentParser, the command-line flags.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print('Failed to start a local webserver listening on either port 8080')
print('or port 9090. Please check your firewall settings and locally')
print('running programs that may be blocking or using those ports.')
print()
print('Falling back to --noauth_local_webserver and continuing with')
print('authorization.')
print()
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
print('If your browser is on a different machine then exit and re-run this')
print('application with the command-line parameter ')
print()
print(' --noauth_local_webserver')
print()
else:
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters of the redirect.')
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print('Authentication successful.')
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from oauth2client.old_run import run
from oauth2client.old_run import FLAGS
except ImportError:
def run(*args, **kwargs):
raise NotImplementedError(
'The gflags library must be installed to use tools.run(). '
'Please install gflags or preferrably switch to using '
'tools.run_flow().')
|
import os
from chainer import serializers
from chainer import utils
def save_and_load(src, dst, filename, saver, loader):
"""Saves ``src`` and loads it to ``dst`` using a de/serializer.
This function simply runs a serialization and deserialization to check if
the serialization code is correctly implemented. The save and load are
done within a temporary directory.
Args:
src: An object to save from.
dst: An object to load into.
filename (str): File name used during the save/load.
saver (callable): Function that saves the source object.
loader (callable): Function that loads the file into the destination
object.
"""
with utils.tempdir() as tempdir:
path = os.path.join(tempdir, filename)
saver(path, src)
loader(path, dst)
def save_and_load_npz(src, dst):
"""Saves ``src`` to an NPZ file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using NPZ de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_load(src, dst, 'tmp.npz',
serializers.save_npz, serializers.load_npz)
def save_and_load_hdf5(src, dst):
"""Saves ``src`` to an HDF5 file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using HDF5 de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_load(src, dst, 'tmp.h5',
serializers.save_hdf5, serializers.load_hdf5)
|
import urllib
def basic_authentication(username=None, password=None, protocol="http"):
from .fixtures import server_config, url
build_url = url(server_config())
query = {}
return build_url("/webdriver/tests/support/authentication.py",
query=urllib.urlencode(query),
protocol=protocol)
def main(request, response):
user = request.auth.username
password = request.auth.password
if user == "user" and password == "password":
return "Authentication done"
realm = "test"
if "realm" in request.GET:
realm = request.GET.first("realm")
return ((401, "Unauthorized"),
[("WWW-Authenticate", 'Basic realm="' + realm + '"')],
"Please login with credentials 'user' and 'password'")
|
"""Visual Studio user preferences file writer."""
import common
import os
import re
import socket # for gethostname
import xml.dom
import xml_fix
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.doc = None
def Create(self, name):
"""Creates the user file document.
Args:
name: Name of the user file.
"""
self.name = name
# Create XML doc
xml_impl = xml.dom.getDOMImplementation()
self.doc = xml_impl.createDocument(None, 'VisualStudioUserFile', None)
# Add attributes to root element
self.n_root = self.doc.documentElement
self.n_root.setAttribute('Version', self.version.ProjectVersion())
self.n_root.setAttribute('Name', self.name)
# Add configurations section
self.n_configs = self.doc.createElement('Configurations')
self.n_root.appendChild(self.n_configs)
def _AddConfigToNode(self, parent, config_type, config_name):
"""Adds a configuration to the parent node.
Args:
parent: Destination node.
config_type: Type of configuration node.
config_name: Configuration name.
"""
# Add configuration node and its attributes
n_config = self.doc.createElement(config_type)
n_config.setAttribute('Name', config_name)
parent.appendChild(n_config)
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self._AddConfigToNode(self.n_configs, 'Configuration', name)
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
n_cmd = self.doc.createElement('DebugSettings')
abs_command = _FindCommandInPath(command[0])
n_cmd.setAttribute('Command', abs_command)
n_cmd.setAttribute('WorkingDirectory', working_directory)
n_cmd.setAttribute('CommandArguments', " ".join(command[1:]))
n_cmd.setAttribute('RemoteMachine', socket.gethostname())
if environment and isinstance(environment, dict):
n_cmd.setAttribute('Environment',
" ".join(['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]))
else:
n_cmd.setAttribute('Environment', '')
n_cmd.setAttribute('EnvironmentMerge', 'true')
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
n_cmd.setAttribute('Attach', 'false')
n_cmd.setAttribute('DebuggerType', '3') # 'auto' debugger
n_cmd.setAttribute('Remote', '1')
n_cmd.setAttribute('RemoteCommand', '')
n_cmd.setAttribute('HttpUrl', '')
n_cmd.setAttribute('PDBPath', '')
n_cmd.setAttribute('SQLDebugging', '')
n_cmd.setAttribute('DebuggerFlavor', '0')
n_cmd.setAttribute('MPIRunCommand', '')
n_cmd.setAttribute('MPIRunArguments', '')
n_cmd.setAttribute('MPIRunWorkingDirectory', '')
n_cmd.setAttribute('ApplicationCommand', '')
n_cmd.setAttribute('ApplicationArguments', '')
n_cmd.setAttribute('ShimCommand', '')
n_cmd.setAttribute('MPIAcceptMode', '')
n_cmd.setAttribute('MPIAcceptFilter', '')
# Find the config, and add it if it doesn't exist.
found = False
for config in self.n_configs.childNodes:
if config.getAttribute("Name") == config_name:
found = True
if not found:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
for config in self.n_configs.childNodes:
if config.getAttribute("Name") == config_name:
config.appendChild(n_cmd)
break
def Write(self, writer=common.WriteOnDiff):
"""Writes the user file."""
f = writer(self.user_file_path)
self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n')
f.close()
|
"""
Test that two targets with the same name generates an error.
"""
import os
import sys
import TestGyp
import TestCmd
test = TestGyp.TestGyp()
stderr = ('gyp: Duplicate target definitions for '
'.*duplicate_targets.gyp:foo#target\n')
test.run_gyp('duplicate_targets.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
stderr = ('.*: Unable to find targets in build file .*missing_targets.gyp.*')
test.run_gyp('missing_targets.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = ('gyp: rule bar exists in duplicate, target '
'.*duplicate_rule.gyp:foo#target\n')
test.run_gyp('duplicate_rule.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
stderr = ("gyp: Key 'targets' repeated at level 1 with key path '' while "
"reading .*duplicate_node.gyp.*")
test.run_gyp('duplicate_node.gyp', '--check', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = (".*target0.*target1.*target2.*target0.*")
test.run_gyp('dependency_cycle.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = (".*file_cycle0.*file_cycle1.*file_cycle0.*")
test.run_gyp('file_cycle0.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
test.run_gyp('duplicate_basenames.gyp', status=1, stderr=stderr)
if ((test.format == 'make' and sys.platform == 'darwin') or
(test.format == 'msvs' and
int(os.environ.get('GYP_MSVS_VERSION', 2010)) < 2010)):
stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
test.run_gyp('duplicate_basenames.gyp', '--no-duplicate-basename-check',
status=1, stderr=stderr)
else:
test.run_gyp('duplicate_basenames.gyp', '--no-duplicate-basename-check')
stderr = ("gyp: Dependency '.*missing_dep.gyp:missing.gyp#target' not found "
"while trying to load target .*missing_dep.gyp:foo#target\n")
test.run_gyp('missing_dep.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
test.pass_test()
|
from functools import partial
from rebulk.pattern import StringPattern
from ..validators import chars_before, chars_after, chars_surround, validators
chars = ' _.'
left = partial(chars_before, chars)
right = partial(chars_after, chars)
surrounding = partial(chars_surround, chars)
def test_left_chars():
matches = list(StringPattern("word", validator=left).matches("xxxwordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=left).matches("xxx_wordxxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=left).matches("wordxxx"))
assert len(matches) == 1
def test_right_chars():
matches = list(StringPattern("word", validator=right).matches("xxxwordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=right).matches("xxxword.xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=right).matches("xxxword"))
assert len(matches) == 1
def test_surrounding_chars():
matches = list(StringPattern("word", validator=surrounding).matches("xxxword xxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=surrounding).matches("xxx.wordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=surrounding).matches("xxx word_xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=surrounding).matches("word"))
assert len(matches) == 1
def test_chain():
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxxword xxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx.wordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx word_xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=validators(left, right)).matches("word"))
assert len(matches) == 1
|
"""
Copyright (C) 2012 University of Dundee & Open Microscopy Environment.
All Rights Reserved.
Copyright 2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
pytest fixtures used as defined in conftest.py:
- gatewaywrapper
"""
import omero
from omero.rtypes import rstring
from omero.cmd import State, ERR, OK
from omero.callbacks import CmdCallbackI
PRIVATE = 'rw----'
READONLY = 'rwr---'
COLLAB = 'rwrw--'
def doChange(gateway, obj_type, obj_ids, group_id, container_id=None,
test_should_pass=True, return_complete=True):
"""
Performs the change-group action, waits on completion and checks that the
result is not an error.
"""
prx = gateway.chgrpObjects(obj_type, obj_ids, group_id, container_id)
if not return_complete:
return prx
cb = CmdCallbackI(gateway.c, prx)
try:
for i in range(10):
cb.loop(20, 500)
if prx.getResponse() is not None:
break
assert prx.getResponse() is not None
prx.getStatus()
rsp = prx.getResponse()
if test_should_pass:
assert not isinstance(rsp, ERR), \
"Found ERR when test_should_pass==true: %s (%s) params=%s" \
% (rsp.category, rsp.name, rsp.parameters)
assert State.FAILURE not in prx.getStatus().flags
else:
assert not isinstance(rsp, OK), \
"Found OK when test_should_pass==false: %s" % rsp
assert State.FAILURE in prx.getStatus().flags
return rsp
finally:
cb.close(True)
def testImageChgrp(gatewaywrapper):
"""
Create a new group with the User as member. Test move the Image to new
group.
"""
gatewaywrapper.loginAsAuthor()
image = gatewaywrapper.createTestImage()
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"chgrp-test-%s" % uuid, member_Ids=[ctx.userId], perms=COLLAB)
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject("Image", image.id) is not None
# Do the Chgrp
doChange(gatewaywrapper.gateway, "Image", [image.getId()], gid)
# Image should no-longer be available in current group
assert gatewaywrapper.gateway.getObject("Image", image.id) is None, \
"Image should not be available in original group"
# Switch to new group - confirm that image is there.
gatewaywrapper.gateway.setGroupForSession(gid)
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, "Image should be available in new group"
assert img.getDetails().getGroup().id == gid, \
"Image group.id should match new group"
def testDatasetChgrp(gatewaywrapper):
"""
Create a new group with the User as member. Test move the Dataset/Image to
new group.
"""
gatewaywrapper.loginAsAuthor()
dataset = gatewaywrapper.createPDTree(dataset="testDatasetChgrp")
image = gatewaywrapper.createTestImage(dataset=dataset)
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"chgrp-test-%s" % uuid, member_Ids=[ctx.userId], perms=PRIVATE)
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject("Image", image.id) is not None
# Do the Chgrp
doChange(gatewaywrapper.gateway, "Dataset", [dataset.id], gid)
# Dataset should no-longer be available in current group
assert gatewaywrapper.gateway.getObject("Dataset", dataset.id) is None, \
"Dataset should not be available in original group"
# Switch to new group - confirm that Dataset, Image is there.
gatewaywrapper.gateway.setGroupForSession(gid)
ds = gatewaywrapper.gateway.getObject("Dataset", dataset.id)
assert ds is not None, "Dataset should be available in new group"
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, "Image should be available in new group"
assert img.getDetails().getGroup().id == gid, \
"Image group.id should match new group"
def testPDIChgrp(gatewaywrapper):
"""
Create a new group with the User as member. Test move the
Project/Dataset/Image to new group.
"""
gatewaywrapper.loginAsAuthor()
link = gatewaywrapper.createPDTree(project="testPDIChgrp",
dataset="testPDIChgrp")
dataset = link.getChild() # DatasetWrapper
# omero.model.ProjectI - link.getParent() overwritten - returns None
project = link.parent
image = gatewaywrapper.createTestImage(dataset=dataset)
grp = project.details.group
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"chgrp-test-%s" % uuid, member_Ids=[ctx.userId], perms=COLLAB)
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject("Image", image.id) is not None
try:
# Do the Chgrp
doChange(gatewaywrapper.gateway, "Project", [project.id.val], gid)
# Image should no-longer be available in current group
assert gatewaywrapper.gateway.getObject("Image", image.id) is None, \
"Image should not be available in original group"
# Switch to new group - confirm that Project, Dataset, Image is there.
gatewaywrapper.gateway.setGroupForSession(gid)
prj = gatewaywrapper.gateway.getObject("Project", project.id.val)
assert prj is not None, "Project should be available in new group"
ds = gatewaywrapper.gateway.getObject("Dataset", dataset.id)
assert ds is not None, "Dataset should be available in new group"
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, "Image should be available in new group"
assert img.getDetails().getGroup().id == gid, \
"Image group.id should match new group"
finally:
# Change it all back
gatewaywrapper.loginAsAuthor()
# Do the Chgrp
doChange(gatewaywrapper.gateway, "Project", [project.id.val],
grp.id.val)
# Image should again be available in current group
assert gatewaywrapper.gateway.getObject("Image", image.id) \
is not None, "Image should be available in original group"
def testTwoDatasetsChgrpToProject(gatewaywrapper):
"""
Create a new group with the User as member. Image has 2 Dataset Parents.
Test move one Dataset to new group. Image does not move. Move 2nd Dataset
- Image moves.
"""
gatewaywrapper.loginAsAuthor()
dataset = gatewaywrapper.createPDTree(
dataset="testTwoDatasetsChgrpToProject")
image = gatewaywrapper.createTestImage(dataset=dataset)
orig_gid = dataset.details.group.id.val
new_ds = gatewaywrapper.createPDTree(
dataset="testTwoDatasetsChgrp-parent2")
update = gatewaywrapper.gateway.getUpdateService()
link = omero.model.DatasetImageLinkI()
link.setParent(omero.model.DatasetI(new_ds.id, False))
link.setChild(omero.model.ImageI(image.id, False))
update.saveObject(link)
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup("chgrp-test-%s" % uuid,
member_Ids=[ctx.userId])
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject("Dataset", dataset.id) is not None
# create Project in destination group
gatewaywrapper.gateway.setGroupForSession(gid)
p = omero.model.ProjectI()
p.name = rstring("testTwoDatasetsChgrpToProject")
p = gatewaywrapper.gateway.getUpdateService().saveAndReturnObject(p)
assert p.details.group.id.val == gid, \
"Project should be created in target group"
gatewaywrapper.gateway.setGroupForSession(orig_gid) # switch back
# Do the Chgrp with one of the parents
doChange(gatewaywrapper.gateway, "Dataset", [new_ds.id], gid)
# Dataset should no-longer be available in current group
assert gatewaywrapper.gateway.getObject("Dataset", new_ds.id) is None, \
"Dataset should not be available in original group"
assert gatewaywrapper.gateway.getObject("Dataset", dataset.getId()) \
is not None, "Other Dataset should still be in original group"
# But Image should
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, \
"Image should still be available in original group"
# Do the Chgrp with the OTHER parent
# switch BEFORE doChange to allow Project link Save
gatewaywrapper.gateway.setGroupForSession(gid)
doChange(gatewaywrapper.gateway, "Dataset", [dataset.id], gid,
container_id=p.id.val)
# Confirm that Dataset AND Image is now in new group
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
ds = gatewaywrapper.gateway.getObject("Dataset", dataset.id)
projects = list(ds.listParents())
assert len(projects) == 1, \
"Dataset should have one parent Project in new group"
assert projects[0].getId() == p.id.val, \
"Check Dataset parent is Project created above"
assert ds is not None, "Dataset should now be available in new group"
assert ds.getDetails().getGroup().id == gid, \
"Dataset group.id should match new group"
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, "Image should now be available in new group"
assert img.getDetails().getGroup().id == gid, \
"Image group.id should match new group"
def testMultiDatasetDoAll(gatewaywrapper):
"""
Need to enable chgrp independently of EventContext group being the
destination group.
Other tests that do not set omero.group require this for DoAll Save to
work.
"""
gatewaywrapper.loginAsAuthor()
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
update = gatewaywrapper.gateway.getUpdateService()
new_ds = omero.model.DatasetI()
new_ds.name = rstring("testMultiDatasetDoAll")
new_ds = update.saveAndReturnObject(new_ds)
new_ds2 = omero.model.DatasetI()
new_ds2.name = rstring("testMultiDatasetDoAll2")
new_ds2 = update.saveAndReturnObject(new_ds2)
# new group
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"testMultiDatasetDoAll-%s" % uuid, member_Ids=[ctx.userId])
gatewaywrapper.loginAsAuthor()
# create Project in new group
gatewaywrapper.gateway.SERVICE_OPTS.setOmeroGroup(gid)
p = omero.model.ProjectI()
p.name = rstring("testMultiChgrp")
p = gatewaywrapper.gateway.getUpdateService().saveAndReturnObject(
p, gatewaywrapper.gateway.SERVICE_OPTS)
assert p.details.group.id.val == gid, \
"Project should be created in target group"
# Test that this works whichever group you're in
gatewaywrapper.gateway.SERVICE_OPTS.setOmeroGroup(ctx.groupId)
dsIds = [new_ds.id.val, new_ds2.id.val]
# Chgrp
doChange(gatewaywrapper.gateway, "Dataset", dsIds, gid,
container_id=p.id.val)
# Check all objects in destination group
# we can get objects from either group...
gatewaywrapper.gateway.SERVICE_OPTS.setOmeroGroup(-1)
p = gatewaywrapper.gateway.getObject("Project", p.id.val)
datasets = list(p.listChildren())
assert len(datasets) == 2, "Project should have 2 new Datasets"
for d in datasets:
assert d.details.group.id.val == gid, "Dataset should be in new group"
assert d.getId() in dsIds, "Checking Datasets by ID"
|
"""
Stubouts, mocks and fixtures for the test suite
"""
import uuid
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import utils
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type':
{'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 1024,
'flavorid': 1,
'rxtx_factor': 1}
}
def get_fake_image_data(project_id, user_id):
return {'name': 'image1',
'id': 1,
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type': 'm1.tiny',
}
def get_fake_volume_info_data(target_portal, volume_id):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
'target_portal': target_portal,
'target_lun': 1,
'auth_method': 'CHAP',
}
}
def get_fake_block_device_info(target_portal, volume_id):
return {'block_device_mapping': [{'connection_info': {
'driver_volume_type': 'iscsi',
'data': {'target_lun': 1,
'volume_id': volume_id,
'target_iqn':
'iqn.2010-10.org.openstack:volume-' +
volume_id,
'target_portal': target_portal,
'target_discovered': False}},
'mount_device': 'vda',
'delete_on_termination': False}],
'root_device_name': None,
'ephemerals': [],
'swap': None
}
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def get(self, key, default=None):
if key in self.values:
return self.values[key]
else:
return default
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.values[key] = value
def __str__(self):
return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
if 'instance_type' not in values:
return
instance_type = values['instance_type']
base_options = {
'name': values['name'],
'id': values['id'],
'uuid': str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'instance_type': instance_type,
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': instance_type['root_gb'],
}
return FakeModel(base_options)
def fake_instance_type_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
return {}
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'flavor_get_all', fake_instance_type_get_all)
stubs.Set(db, 'flavor_get_by_name', fake_instance_type_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
|
from django.conf.urls import url
from wagtail.documents.views import serve
urlpatterns = [
url(r'^(\d+)/(.*)$', serve.serve, name='wagtaildocs_serve'),
url(r'^authenticate_with_password/(\d+)/$', serve.authenticate_with_password,
name='wagtaildocs_authenticate_with_password'),
]
|
from __future__ import division
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Jesse Stombaugh"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "jesse.stombaugh@colorado.edu"
from qiime.util import parse_command_line_parameters, make_option
from cogent.parse.tree import DndParser
from cogent.core.tree import PhyloNode
from qiime.clean_raxml_parsimony_tree import decorate_numtips, decorate_depth,\
get_insert_dict, drop_duplicate_nodes
scoring_methods = ['depth', 'numtips']
script_info = {}
script_info['brief_description'] = "Remove duplicate tips from Raxml Tree"
script_info[
'script_description'] = "This script allows the user to remove specific duplicate tips from a Raxml tree."
script_info['script_usage'] = []
script_info['script_usage'].append(
("Example (depth):",
"For this case the user can pass in input Raxml tree, duplicate tips, and define an output filepath. When using the depth option, only the deepest replicate is kept. ",
" %prog -i raxml_v730_final_placement.tre -t 6 -o raxml_v730_final_placement_depth.tre"))
script_info['script_usage'].append(
("Example (numtips):",
"For this case the user can pass in input Raxml tree, duplicate tips, and define an output filepath. When using the numtips option, the replicate with the fewest siblings is kept. ",
" %prog -i raxml_v730_final_placement.tre -t 6 -o raxml_v730_final_placement_numtips.tre -s numtips"))
script_info['output_description'] = ""
script_info['required_options'] = [
make_option(
'-i',
'--input_tree',
type="existing_filepath",
help='the input raxml parsimony tree'),
make_option(
'-t',
'--tips_to_keep',
type="string",
help='the input tips to score and retain (comma-separated list)'),
make_option(
'-o',
'--output_fp',
type="new_filepath",
help='the output filepath'),
]
script_info['optional_options'] = [
make_option(
'-s',
'--scoring_method',
type="choice",
help='the scoring method either depth or numtips [default: %default]',
default='depth',
choices=scoring_methods),
]
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
# get options
tree_fp = opts.input_tree
tips_to_keep = opts.tips_to_keep.split(',')
scoring_method = opts.scoring_method
# load tree
tree = DndParser(open(tree_fp, 'U'), constructor=PhyloNode)
# decorate measurements onto tree (either by depth or by number of
# children)
if scoring_method == 'depth':
tree2 = decorate_depth(tree)
elif scoring_method == 'numtips':
tree2 = decorate_numtips(tree)
# get the nodes for the inserted sequences
nodes_dict = get_insert_dict(tree2, set(tips_to_keep))
# remove nodes accordingly
final_tree = drop_duplicate_nodes(tree2, nodes_dict)
# final_tree.nameUnnamedNodes()
# write out the resulting tree
open_outpath = open(opts.output_fp, 'w')
open_outpath.write(final_tree.getNewick(with_distances=True))
open_outpath.close()
if __name__ == "__main__":
main()
|
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.admin.info import constants
from openstack_dashboard.dashboards.admin.info import tables
class ServicesTab(tabs.TableTab):
table_classes = (tables.ServicesTable,)
name = _("Services")
slug = "services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
def get_services_data(self):
request = self.tab_group.request
services = []
for i, service in enumerate(request.user.service_catalog):
service['id'] = i
services.append(
keystone.Service(service, request.user.services_region))
return services
class NovaServicesTab(tabs.TableTab):
table_classes = (tables.NovaServicesTable,)
name = _("Compute Services")
slug = "nova_services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
permissions = ('openstack.services.compute',)
def get_nova_services_data(self):
try:
services = nova.service_list(self.tab_group.request)
except Exception:
msg = _('Unable to get nova services list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
services = []
return services
class CinderServicesTab(tabs.TableTab):
table_classes = (tables.CinderServicesTable,)
name = _("Block Storage Services")
slug = "cinder_services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
permissions = ('openstack.services.volume',)
def get_cinder_services_data(self):
try:
services = cinder.service_list(self.tab_group.request)
except Exception:
msg = _('Unable to get cinder services list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
services = []
return services
class NetworkAgentsTab(tabs.TableTab):
table_classes = (tables.NetworkAgentsTable,)
name = _("Network Agents")
slug = "network_agents"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
def allowed(self, request):
try:
return (base.is_service_enabled(request, 'network') and
neutron.is_extension_supported(request, 'agent'))
except Exception:
exceptions.handle(request, _('Unable to get network agents info.'))
return False
def get_network_agents_data(self):
try:
agents = neutron.agent_list(self.tab_group.request)
except Exception:
msg = _('Unable to get network agents list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
agents = []
return agents
class SystemInfoTabs(tabs.TabGroup):
slug = "system_info"
tabs = (ServicesTab, NovaServicesTab, CinderServicesTab,
NetworkAgentsTab)
sticky = True
|
import collections
import logging
import time
from mock import MagicMock, patch
from . import unittest
from kafka import KafkaClient, SimpleProducer
from kafka.common import (
AsyncProducerQueueFull, FailedPayloadsError, NotLeaderForPartitionError,
ProduceResponse, RetryOptions, TopicAndPartition
)
from kafka.producer.base import Producer, _send_upstream
from kafka.protocol import CODEC_NONE
import threading
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
try:
xrange
except NameError:
xrange = range
class TestKafkaProducer(unittest.TestCase):
def test_producer_message_types(self):
producer = Producer(MagicMock())
topic = b"test-topic"
partition = 0
bad_data_types = (u'你怎么样?', 12, ['a', 'list'], ('a', 'tuple'), {'a': 'dict'})
for m in bad_data_types:
with self.assertRaises(TypeError):
logging.debug("attempting to send message of type %s", type(m))
producer.send_messages(topic, partition, m)
good_data_types = (b'a string!',)
for m in good_data_types:
# This should not raise an exception
producer.send_messages(topic, partition, m)
def test_topic_message_types(self):
client = MagicMock()
def partitions(topic):
return [0, 1]
client.get_partition_ids_for_topic = partitions
producer = SimpleProducer(client, random_start=False)
topic = b"test-topic"
producer.send_messages(topic, b'hi')
assert client.send_produce_request.called
@patch('kafka.producer.base._send_upstream')
def test_producer_async_queue_overfilled(self, mock):
queue_size = 2
producer = Producer(MagicMock(), async=True,
async_queue_maxsize=queue_size)
topic = b'test-topic'
partition = 0
message = b'test-message'
with self.assertRaises(AsyncProducerQueueFull):
message_list = [message] * (queue_size + 1)
producer.send_messages(topic, partition, *message_list)
self.assertEqual(producer.queue.qsize(), queue_size)
for _ in xrange(producer.queue.qsize()):
producer.queue.get()
def test_producer_sync_fail_on_error(self):
error = FailedPayloadsError('failure')
with patch.object(KafkaClient, 'load_metadata_for_topics'):
with patch.object(KafkaClient, 'get_partition_ids_for_topic', return_value=[0, 1]):
with patch.object(KafkaClient, '_send_broker_aware_request', return_value = [error]):
client = KafkaClient(MagicMock())
producer = SimpleProducer(client, async=False, sync_fail_on_error=False)
# This should not raise
(response,) = producer.send_messages('foobar', b'test message')
self.assertEqual(response, error)
producer = SimpleProducer(client, async=False, sync_fail_on_error=True)
with self.assertRaises(FailedPayloadsError):
producer.send_messages('foobar', b'test message')
class TestKafkaProducerSendUpstream(unittest.TestCase):
def setUp(self):
self.client = MagicMock()
self.queue = Queue()
def _run_process(self, retries_limit=3, sleep_timeout=1):
# run _send_upstream process with the queue
stop_event = threading.Event()
retry_options = RetryOptions(limit=retries_limit,
backoff_ms=50,
retry_on_timeouts=False)
self.thread = threading.Thread(
target=_send_upstream,
args=(self.queue, self.client, CODEC_NONE,
0.3, # batch time (seconds)
3, # batch length
Producer.ACK_AFTER_LOCAL_WRITE,
Producer.DEFAULT_ACK_TIMEOUT,
retry_options,
stop_event))
self.thread.daemon = True
self.thread.start()
time.sleep(sleep_timeout)
stop_event.set()
def test_wo_retries(self):
# lets create a queue and add 10 messages for 1 partition
for i in range(10):
self.queue.put((TopicAndPartition("test", 0), "msg %i", "key %i"))
self._run_process()
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 4 non-void cals:
# 3 batches of 3 msgs each + 1 batch of 1 message
self.assertEqual(self.client.send_produce_request.call_count, 4)
def test_first_send_failed(self):
# lets create a queue and add 10 messages for 10 different partitions
# to show how retries should work ideally
for i in range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i", "key %i"))
# Mock offsets counter for closure
offsets = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
self.client.is_first_time = True
def send_side_effect(reqs, *args, **kwargs):
if self.client.is_first_time:
self.client.is_first_time = False
return [FailedPayloadsError(req) for req in reqs]
responses = []
for req in reqs:
offset = offsets[req.topic][req.partition]
offsets[req.topic][req.partition] += len(req.messages)
responses.append(
ProduceResponse(req.topic, req.partition, 0, offset)
)
return responses
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(2)
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 5 non-void calls: 1st failed batch of 3 msgs
# plus 3 batches of 3 msgs each + 1 batch of 1 message
self.assertEqual(self.client.send_produce_request.call_count, 5)
def test_with_limited_retries(self):
# lets create a queue and add 10 messages for 10 different partitions
# to show how retries should work ideally
for i in range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i" % i, "key %i" % i))
def send_side_effect(reqs, *args, **kwargs):
return [FailedPayloadsError(req) for req in reqs]
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(3, 3)
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 16 non-void calls:
# 3 initial batches of 3 msgs each + 1 initial batch of 1 msg +
# 3 retries of the batches above = (1 + 3 retries) * 4 batches = 16
self.assertEqual(self.client.send_produce_request.call_count, 16)
def test_async_producer_not_leader(self):
for i in range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i", "key %i"))
# Mock offsets counter for closure
offsets = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
self.client.is_first_time = True
def send_side_effect(reqs, *args, **kwargs):
if self.client.is_first_time:
self.client.is_first_time = False
return [ProduceResponse(req.topic, req.partition,
NotLeaderForPartitionError.errno, -1)
for req in reqs]
responses = []
for req in reqs:
offset = offsets[req.topic][req.partition]
offsets[req.topic][req.partition] += len(req.messages)
responses.append(
ProduceResponse(req.topic, req.partition, 0, offset)
)
return responses
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(2)
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 5 non-void calls: 1st failed batch of 3 msgs
# + 3 batches of 3 msgs each + 1 batch of 1 msg = 1 + 3 + 1 = 5
self.assertEqual(self.client.send_produce_request.call_count, 5)
def tearDown(self):
for _ in xrange(self.queue.qsize()):
self.queue.get()
|
""" Additional extras go here.
"""
|
""" Modules dependency graph. """
import os, sys, imp
from os.path import join as opj
import itertools
import zipimport
import openerp
import openerp.osv as osv
import openerp.tools as tools
import openerp.tools.osutil as osutil
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import zipfile
import openerp.release as release
import re
import base64
from zipfile import PyZipFile, ZIP_DEFLATED
from cStringIO import StringIO
import logging
_logger = logging.getLogger(__name__)
class Graph(dict):
""" Modules dependency graph.
The graph is a mapping from module name to Nodes.
"""
def add_node(self, name, info):
max_depth, father = 0, None
for n in [Node(x, self, None) for x in info['depends']]:
if n.depth >= max_depth:
father = n
max_depth = n.depth
if father:
return father.add_child(name, info)
else:
return Node(name, self, info)
def update_from_db(self, cr):
if not len(self):
return
# update the graph with values from the database (if exist)
## First, we set the default values for each package in graph
additional_data = dict.fromkeys(self.keys(), {'id': 0, 'state': 'uninstalled', 'dbdemo': False, 'installed_version': None})
## Then we get the values from the database
cr.execute('SELECT name, id, state, demo AS dbdemo, latest_version AS installed_version'
' FROM ir_module_module'
' WHERE name IN %s',(tuple(additional_data),)
)
## and we update the default values with values from the database
additional_data.update(dict([(x.pop('name'), x) for x in cr.dictfetchall()]))
for package in self.values():
for k, v in additional_data[package.name].items():
setattr(package, k, v)
def add_module(self, cr, module, force=None):
self.add_modules(cr, [module], force)
def add_modules(self, cr, module_list, force=None):
if force is None:
force = []
packages = []
len_graph = len(self)
# force additional dependencies for the upgrade process if given
# in config file
forced_deps = tools.config.get_misc('openupgrade', 'force_deps', '{}')
forced_deps = tools.config.get_misc('openupgrade',
'force_deps_' + release.version,
forced_deps)
forced_deps = tools.safe_eval.safe_eval(forced_deps)
for module in module_list:
# This will raise an exception if no/unreadable descriptor file.
# NOTE The call to load_information_from_description_file is already
# done by db.initialize, so it is possible to not do it again here.
info = openerp.modules.module.load_information_from_description_file(module)
if info and info['installable']:
info['depends'].extend(forced_deps.get(module, []))
packages.append((module, info)) # TODO directly a dict, like in get_modules_with_version
else:
_logger.warning('module %s: not installable, skipped', module)
dependencies = dict([(p, info['depends']) for p, info in packages])
current, later = set([p for p, info in packages]), set()
while packages and current > later:
package, info = packages[0]
deps = info['depends']
# if all dependencies of 'package' are already in the graph, add 'package' in the graph
if reduce(lambda x, y: x and y in self, deps, True):
if not package in current:
packages.pop(0)
continue
later.clear()
current.remove(package)
node = self.add_node(package, info)
node.data = info
for kind in ('init', 'demo', 'update'):
if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force:
setattr(node, kind, True)
else:
later.add(package)
packages.append((package, info))
packages.pop(0)
self.update_from_db(cr)
for package in later:
unmet_deps = filter(lambda p: p not in self, dependencies[package])
_logger.error('module %s: Unmet dependencies: %s', package, ', '.join(unmet_deps))
result = len(self) - len_graph
if result != len(module_list):
_logger.warning('Some modules were not loaded.')
return result
def __iter__(self):
level = 0
done = set(self.keys())
while done:
level_modules = sorted((name, module) for name, module in self.items() if module.depth==level)
for name, module in level_modules:
done.remove(name)
yield module
level += 1
class Singleton(object):
def __new__(cls, name, graph, info):
if name in graph:
inst = graph[name]
else:
inst = object.__new__(cls)
inst.name = name
inst.info = info
graph[name] = inst
return inst
class Node(Singleton):
""" One module in the modules dependency graph.
Node acts as a per-module singleton. A node is constructed via
Graph.add_module() or Graph.add_modules(). Some of its fields are from
ir_module_module (setted by Graph.update_from_db()).
"""
def __init__(self, name, graph, info):
self.graph = graph
if not hasattr(self, 'children'):
self.children = []
if not hasattr(self, 'depth'):
self.depth = 0
def add_child(self, name, info):
node = Node(name, self.graph, info)
node.depth = self.depth + 1
if node not in self.children:
self.children.append(node)
for attr in ('init', 'update', 'demo'):
if hasattr(self, attr):
setattr(node, attr, True)
self.children.sort(lambda x, y: cmp(x.name, y.name))
return node
def __setattr__(self, name, value):
super(Singleton, self).__setattr__(name, value)
if name in ('init', 'update', 'demo'):
tools.config[name][self.name] = 1
for child in self.children:
setattr(child, name, value)
if name == 'depth':
for child in self.children:
setattr(child, name, value + 1)
def __iter__(self):
return itertools.chain(iter(self.children), *map(iter, self.children))
def __str__(self):
return self._pprint()
def _pprint(self, depth=0):
s = '%s\n' % self.name
for c in self.children:
s += '%s`-> %s' % (' ' * depth, c._pprint(depth+1))
return s
|
from boto.mashups.interactive import interactive_shell
import boto
import os
import time
import shutil
import StringIO
import paramiko
import socket
import subprocess
class SSHClient(object):
def __init__(self, server,
host_key_file='~/.ssh/known_hosts',
uname='root', timeout=None, ssh_pwd=None):
self.server = server
self.host_key_file = host_key_file
self.uname = uname
self._timeout = timeout
self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file,
password=ssh_pwd)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect()
def connect(self, num_retries=5):
retry = 0
while retry < num_retries:
try:
self._ssh_client.connect(self.server.hostname,
username=self.uname,
pkey=self._pkey,
timeout=self._timeout)
return
except socket.error, (value, message):
if value in (51, 61, 111):
print 'SSH Connection refused, will retry in 5 seconds'
time.sleep(5)
retry += 1
else:
raise
except paramiko.BadHostKeyException:
print "%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname
print 'Edit that file to remove the entry and then hit return to try again'
raw_input('Hit Enter when ready')
retry += 1
except EOFError:
print 'Unexpected Error from SSH Connection, retry in 5 seconds'
time.sleep(5)
retry += 1
print 'Could not establish SSH connection'
def open_sftp(self):
return self._ssh_client.open_sftp()
def get_file(self, src, dst):
sftp_client = self.open_sftp()
sftp_client.get(src, dst)
def put_file(self, src, dst):
sftp_client = self.open_sftp()
sftp_client.put(src, dst)
def open(self, filename, mode='r', bufsize=-1):
"""
Open a file on the remote system and return a file-like object.
"""
sftp_client = self.open_sftp()
return sftp_client.open(filename, mode, bufsize)
def listdir(self, path):
sftp_client = self.open_sftp()
return sftp_client.listdir(path)
def isdir(self, path):
status = self.run('[ -d %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def exists(self, path):
status = self.run('[ -a %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def shell(self):
"""
Start an interactive shell session on the remote host.
"""
channel = self._ssh_client.invoke_shell()
interactive_shell(channel)
def run(self, command):
"""
Execute a command on the remote host. Return a tuple containing
an integer status and two strings, the first containing stdout
and the second containing stderr from the command.
"""
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
status = 0
try:
t = self._ssh_client.exec_command(command)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
boto.log.debug('stdout: %s' % std_out)
boto.log.debug('stderr: %s' % std_err)
return (status, std_out, std_err)
def run_pty(self, command):
"""
Execute a command on the remote host with a pseudo-terminal.
Returns a string containing the output of the command.
"""
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel
def close(self):
transport = self._ssh_client.get_transport()
transport.close()
self.server.reset_cmdshell()
class LocalClient(object):
def __init__(self, server, host_key_file=None, uname='root'):
self.server = server
self.host_key_file = host_key_file
self.uname = uname
def get_file(self, src, dst):
shutil.copyfile(src, dst)
def put_file(self, src, dst):
shutil.copyfile(src, dst)
def listdir(self, path):
return os.listdir(path)
def isdir(self, path):
return os.path.isdir(path)
def exists(self, path):
return os.path.exists(path)
def shell(self):
raise NotImplementedError('shell not supported with LocalClient')
def run(self):
boto.log.info('running:%s' % self.command)
log_fp = StringIO.StringIO()
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while process.poll() is None:
time.sleep(1)
t = process.communicate()
log_fp.write(t[0])
log_fp.write(t[1])
boto.log.info(log_fp.getvalue())
boto.log.info('output: %s' % log_fp.getvalue())
return (process.returncode, log_fp.getvalue())
def close(self):
pass
class FakeServer(object):
"""
A little class to fake out SSHClient (which is expecting a
:class`boto.manage.server.Server` instance. This allows us
to
"""
def __init__(self, instance, ssh_key_file):
self.instance = instance
self.ssh_key_file = ssh_key_file
self.hostname = instance.dns_name
self.instance_id = self.instance.id
def start(server):
instance_id = boto.config.get('Instance', 'instance-id', None)
if instance_id == server.instance_id:
return LocalClient(server)
else:
return SSHClient(server)
def sshclient_from_instance(instance, ssh_key_file,
host_key_file='~/.ssh/known_hosts',
user_name='root', ssh_pwd=None):
"""
Create and return an SSHClient object given an
instance object.
:type instance: :class`boto.ec2.instance.Instance` object
:param instance: The instance object.
:type ssh_key_file: str
:param ssh_key_file: A path to the private key file used
to log into instance.
:type host_key_file: str
:param host_key_file: A path to the known_hosts file used
by the SSH client.
Defaults to ~/.ssh/known_hosts
:type user_name: str
:param user_name: The username to use when logging into
the instance. Defaults to root.
:type ssh_pwd: str
:param ssh_pwd: The passphrase, if any, associated with
private key.
"""
s = FakeServer(instance, ssh_key_file)
return SSHClient(s, host_key_file, user_name, ssh_pwd)
|
import time
from ajenti.com import *
from ajenti.ui import *
from ajenti.utils import shell, str_fsize
class BSDIfconfig(Plugin):
platform = ['FreeBSD']
def get_info(self, iface):
ui = UI.Container(
UI.Formline(
UI.HContainer(
UI.Image(file='/dl/network/%s.png'%('up' if iface.up else 'down')),
UI.Label(text=iface.name, bold=True)
),
text='Interface',
),
UI.Formline(
UI.Label(text=self.get_ip(iface)),
text='Address',
),
UI.Formline(
UI.Label(text='Up %s, down %s' % (
str_fsize(self.get_tx(iface)),
str_fsize(self.get_rx(iface)),
)),
text='Traffic',
),
)
return ui
def get_tx(self, iface):
s = shell('netstat -bI %s | grep -v Link | grep -v pkts'%iface.name)
try:
s = s.split()[10]
except:
s = '0'
return int(s)
def get_rx(self, iface):
s = shell('netstat -bI %s | grep -v Link | grep -v pkts'%iface.name)
try:
s = s.split()[7]
except:
s = '0'
return int(s)
def get_ip(self, iface):
s = shell('ifconfig %s | grep \'inet \''%iface.name)
try:
s = s.split()[1]
except:
s = '0.0.0.0'
return s
def detect_dev_class(self, iface):
if iface.name[:-1] == 'gif':
return 'tunnel'
if iface.name == 'lo':
return 'loopback'
return 'ethernet'
def detect_iface_bits(self, iface):
r = ['bsd-basic']
cls = self.detect_dev_class(iface)
if iface.addressing == 'static':
r.append('bsd-ipv4')
if cls == 'tunnel':
r.append('bsd-tunnel')
return r
def up(self, iface):
shell('ifconfig %s up' % iface.name)
time.sleep(1)
def down(self, iface):
shell('ifconfig %s down' % iface.name)
time.sleep(1)
|
"""Defines the available providers."""
__author__ = 'jason.stredwick@gmail.com (Jason Stredwick)'
class Provider(object):
"""Define available providers."""
DATASTORE = 'datastore'
ISSUETRACKER = 'issuetracker'
|
"""correct Vxlan Endpoint primary key
Revision ID: 4eba2f05c2f4
Revises: 884573acbf1c
Create Date: 2014-07-07 22:48:38.544323
"""
revision = '4eba2f05c2f4'
down_revision = '884573acbf1c'
from alembic import op
TABLE_NAME = 'ml2_vxlan_endpoints'
PK_NAME = 'ml2_vxlan_endpoints_pkey'
def upgrade():
op.drop_constraint(PK_NAME, TABLE_NAME, type_='primary')
op.create_primary_key(PK_NAME, TABLE_NAME, cols=['ip_address'])
def downgrade():
op.drop_constraint(PK_NAME, TABLE_NAME, type_='primary')
op.create_primary_key(PK_NAME, TABLE_NAME, cols=['ip_address', 'udp_port'])
|
from storer import Storer
import sys
s = Storer()
if s.get_value() != 0:
print('Initial value incorrect.')
sys.exit(1)
s.set_value(42)
if s.get_value() != 42:
print('Setting value failed.')
sys.exit(1)
try:
s.set_value('not a number')
print('Using wrong argument type did not fail.')
sys.exit(1)
except TypeError:
pass
|
__version__ = "0.1"
from PIL import Image, ImageFile, _binary
i16 = _binary.i16le
def _accept(prefix):
return prefix[:4] in [b"DanM", b"LinS"]
class MspImageFile(ImageFile.ImageFile):
format = "MSP"
format_description = "Windows Paint"
def _open(self):
# Header
s = self.fp.read(32)
if s[:4] not in [b"DanM", b"LinS"]:
raise SyntaxError("not an MSP file")
# Header checksum
sum = 0
for i in range(0, 32, 2):
sum = sum ^ i16(s[i:i+2])
if sum != 0:
raise SyntaxError("bad MSP checksum")
self.mode = "1"
self.size = i16(s[4:]), i16(s[6:])
if s[:4] == b"DanM":
self.tile = [("raw", (0,0)+self.size, 32, ("1", 0, 1))]
else:
self.tile = [("msp", (0,0)+self.size, 32+2*self.size[1], None)]
o16 = _binary.o16le
def _save(im, fp, filename):
if im.mode != "1":
raise IOError("cannot write mode %s as MSP" % im.mode)
# create MSP header
header = [0] * 16
header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1
header[2], header[3] = im.size
header[4], header[5] = 1, 1
header[6], header[7] = 1, 1
header[8], header[9] = im.size
sum = 0
for h in header:
sum = sum ^ h
header[12] = sum # FIXME: is this the right field?
# header
for h in header:
fp.write(o16(h))
# image body
ImageFile._save(im, fp, [("raw", (0,0)+im.size, 32, ("1", 0, 1))])
Image.register_open("MSP", MspImageFile, _accept)
Image.register_save("MSP", _save)
Image.register_extension("MSP", ".msp")
|
from .wiki import *
|
"""SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://sympy.org
"""
from __future__ import absolute_import, print_function
from sympy.release import __version__
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
from .plotting import plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, mathematica_code, octave_code, \
latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
|
try:
from uarray import array
except ImportError:
try:
from array import array
except ImportError:
print("SKIP")
raise SystemExit
print(bytearray(array('b', [1, 2])))
print(bytearray(array('h', [0x101, 0x202])))
|
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
class DNSDomain(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'domain': fields.StringField(),
'scope': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, vif, db_vif):
for field in vif.fields:
vif[field] = db_vif[field]
vif._context = context
vif.obj_reset_changes()
return vif
@base.remotable_classmethod
def get_by_domain(cls, context, domain):
db_dnsd = db.dnsdomain_get(context, domain)
if db_dnsd:
return cls._from_db_object(context, cls(), db_dnsd)
@base.remotable_classmethod
def register_for_zone(cls, context, domain, zone):
db.dnsdomain_register_for_zone(context, domain, zone)
@base.remotable_classmethod
def register_for_project(cls, context, domain, project):
db.dnsdomain_register_for_project(context, domain, project)
@base.remotable_classmethod
def delete_by_domain(cls, context, domain):
db.dnsdomain_unregister(context, domain)
class DNSDomainList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('DNSDomain'),
}
child_versions = {
'1.0': '1.0',
}
@base.remotable_classmethod
def get_all(cls, context):
db_domains = db.dnsdomain_get_all(context)
return base.obj_make_list(context, cls(context), objects.DNSDomain,
db_domains)
|
try:
from collections import OrderedDict
import json
except ImportError:
from ordereddict import OrderedDict
import simplejson as json
import itertools
import six
from csvkit import CSVKitWriter
def parse_object(obj, path=''):
"""
Recursively parse JSON objects and a dictionary of paths/keys and values.
Inspired by JSONPipe (https://github.com/dvxhouse/jsonpipe).
"""
if isinstance(obj, dict):
iterator = obj.items()
elif isinstance(obj, (list, tuple)):
iterator = enumerate(obj)
else:
return { path.strip('/'): obj }
d = {}
for key, value in iterator:
key = six.text_type(key)
d.update(parse_object(value, path + key + '/'))
return d
def ndjson2csv(f, key=None, **kwargs):
"""
Convert a JSON document into CSV format.
Supports both JSON and "Newline-delimited JSON".
The top-level element of the input must be a list or a dictionary. If it is a dictionary, a key must be provided which is an item of the dictionary which contains a list.
"""
first_line = f.readline()
first_row = json.loads(first_line, object_pairs_hook=OrderedDict)
js = itertools.chain((first_row, ), (json.loads(l, object_pairs_hook=OrderedDict) for l in f))
fields = []
flat = []
for obj in js:
flat.append(parse_object(obj))
for key in obj.keys():
if key not in fields:
fields.append(key)
o = six.StringIO()
writer = CSVKitWriter(o)
writer.writerow(fields)
for i in flat:
row = []
for field in fields:
row.append(i.get(field, None))
writer.writerow(row)
output = o.getvalue()
o.close()
return output
|
'''
This is a one-off command aimed at fixing a temporary problem encountered where input_state was added to
the same dict object in capa problems, so was accumulating. The fix is simply to remove input_state entry
from state for all problems in the affected date range.
'''
import json
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from courseware.models import StudentModule, StudentModuleHistory
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
'''
The fix here is to remove the "input_state" entry in the StudentModule objects of any problems that
contain them. No problem is yet making use of this, and the code should do the right thing if it's
missing (by recreating an empty dict for its value).
To narrow down the set of problems that might need fixing, the StudentModule
objects to be checked is filtered down to those:
created < '2013-03-29 16:30:00' (the problem must have been answered before the buggy code was reverted,
on Prod and Edge)
modified > '2013-03-28 22:00:00' (the problem must have been visited after the bug was introduced
on Prod and Edge)
state like '%input_state%' (the problem must have "input_state" set).
This filtering is done on the production database replica, so that the larger select queries don't lock
the real production database. The list of id values for Student Modules is written to a file, and the
file is passed into this command. The sql file passed to mysql contains:
select sm.id from courseware_studentmodule sm
where sm.modified > "2013-03-28 22:00:00"
and sm.created < "2013-03-29 16:30:00"
and sm.state like "%input_state%"
and sm.module_type = 'problem';
'''
num_visited = 0
num_changed = 0
num_hist_visited = 0
num_hist_changed = 0
option_list = BaseCommand.option_list + (
make_option('--save',
action='store_true',
dest='save_changes',
default=False,
help='Persist the changes that were encountered. If not set, no changes are saved.'),
)
def fix_studentmodules_in_list(self, save_changes, idlist_path):
'''Read in the list of StudentModule objects that might need fixing, and then fix each one'''
# open file and read id values from it:
for line in open(idlist_path, 'r'):
student_module_id = line.strip()
# skip the header, if present:
if student_module_id == 'id':
continue
try:
module = StudentModule.objects.get(id=student_module_id)
except StudentModule.DoesNotExist:
LOG.error(u"Unable to find student module with id = %s: skipping... ", student_module_id)
continue
self.remove_studentmodule_input_state(module, save_changes)
hist_modules = StudentModuleHistory.objects.filter(student_module_id=student_module_id)
for hist_module in hist_modules:
self.remove_studentmodulehistory_input_state(hist_module, save_changes)
if self.num_visited % 1000 == 0:
LOG.info(" Progress: updated {0} of {1} student modules".format(self.num_changed, self.num_visited))
LOG.info(" Progress: updated {0} of {1} student history modules".format(self.num_hist_changed,
self.num_hist_visited))
@transaction.autocommit
def remove_studentmodule_input_state(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_visited += 1
if 'input_state' not in state_dict:
pass
elif save_changes:
# make the change and persist
del state_dict['input_state']
module.state = json.dumps(state_dict)
module.save()
self.num_changed += 1
else:
# don't make the change, but increment the count indicating the change would be made
self.num_changed += 1
@transaction.autocommit
def remove_studentmodulehistory_input_state(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_hist_visited += 1
if 'input_state' not in state_dict:
pass
elif save_changes:
# make the change and persist
del state_dict['input_state']
module.state = json.dumps(state_dict)
module.save()
self.num_hist_changed += 1
else:
# don't make the change, but increment the count indicating the change would be made
self.num_hist_changed += 1
def handle(self, *args, **options):
'''Handle management command request'''
if len(args) != 1:
raise CommandError("missing idlist file")
idlist_path = args[0]
save_changes = options['save_changes']
LOG.info("Starting run: reading from idlist file {0}; save_changes = {1}".format(idlist_path, save_changes))
self.fix_studentmodules_in_list(save_changes, idlist_path)
LOG.info("Finished run: updating {0} of {1} student modules".format(self.num_changed, self.num_visited))
LOG.info("Finished run: updating {0} of {1} student history modules".format(self.num_hist_changed,
self.num_hist_visited))
|
import os
import re
import traceback
from ansible.module_utils.ansible_release import __version__
from ansible.module_utils.basic import missing_required_lib, env_fallback
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.six import string_types, binary_type, text_type
from ansible.module_utils.common.dict_transformations import (
camel_dict_to_snake_dict, snake_dict_to_camel_dict,
_camel_to_snake, _snake_to_camel,
)
BOTO_IMP_ERR = None
try:
import boto
import boto.ec2 # boto does weird import stuff
HAS_BOTO = True
except ImportError:
BOTO_IMP_ERR = traceback.format_exc()
HAS_BOTO = False
BOTO3_IMP_ERR = None
try:
import boto3
import botocore
HAS_BOTO3 = True
except Exception:
BOTO3_IMP_ERR = traceback.format_exc()
HAS_BOTO3 = False
try:
# Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
# uses this (and it works as expected). Python 2.6 will trigger the ImportError.
from functools import cmp_to_key
PY3_COMPARISON = True
except ImportError:
PY3_COMPARISON = False
class AnsibleAWSError(Exception):
pass
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
class AWSRetry(CloudRetry):
base_class = _botocore_exception_maybe()
@staticmethod
def status_code_from_exception(error):
return error.response['Error']['Code']
@staticmethod
def found(response_code, catch_extra_error_codes=None):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
#
# TooManyRequestsException comes from inside botocore when it
# does retrys, unfortunately however it does not try long
# enough to allow some services such as API Gateway to
# complete configuration. At the moment of writing there is a
# botocore/boto3 bug open to fix this.
#
# https://github.com/boto/boto3/issues/876 (and linked PRs etc)
retry_on = [
'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
'InternalFailure', 'InternalError', 'TooManyRequestsException',
'Throttling'
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
not_found = re.compile(r'^\w+.NotFound')
return response_code in retry_on or not_found.search(response_code)
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError as e:
module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
module.fail_json(msg=to_native(e))
except botocore.exceptions.NoRegionError as e:
module.fail_json(msg="The %s module requires a region and none was found in configuration, "
"environment variables or module parameters" % module._name)
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
if conn_type not in ['both', 'resource', 'client']:
raise ValueError('There is an issue in the calling code. You '
'must specify either both, resource, or client to '
'the conn_type parameter in the boto3_conn function '
'call')
if params.get('config'):
config = params.pop('config')
config.user_agent_extra = 'Ansible/{0}'.format(__version__)
else:
config = botocore.config.Config(
user_agent_extra='Ansible/{0}'.format(__version__),
)
session = boto3.session.Session(
profile_name=profile,
)
if conn_type == 'resource':
return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
elif conn_type == 'client':
return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
else:
client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
boto3_inventory_conn = _boto3_conn
def boto_exception(err):
"""
Extracts the error message from a boto exception.
:param err: Exception from boto
:return: Error message
"""
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
def aws_common_argument_spec():
return dict(
debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if os.environ.get('AWS_ACCESS_KEY_ID'):
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif os.environ.get('AWS_ACCESS_KEY'):
access_key = os.environ['AWS_ACCESS_KEY']
elif os.environ.get('EC2_ACCESS_KEY'):
access_key = os.environ['EC2_ACCESS_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
access_key = boto.config.get('Credentials', 'aws_access_key_id')
elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
access_key = boto.config.get('default', 'aws_access_key_id')
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if os.environ.get('AWS_SECRET_ACCESS_KEY'):
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif os.environ.get('AWS_SECRET_KEY'):
secret_key = os.environ['AWS_SECRET_KEY']
elif os.environ.get('EC2_SECRET_KEY'):
secret_key = os.environ['EC2_SECRET_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
secret_key = boto.config.get('default', 'aws_secret_access_key')
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'AWS_DEFAULT_REGION' in os.environ:
region = os.environ['AWS_DEFAULT_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
else:
if not boto3:
if HAS_BOTO:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
else:
module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
elif HAS_BOTO3:
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
try:
region = botocore.session.Session(profile=profile_name).get_config_variable('region')
except botocore.exceptions.ProfileNotFound as e:
pass
else:
module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
if not security_token:
if os.environ.get('AWS_SECURITY_TOKEN'):
security_token = os.environ['AWS_SECURITY_TOKEN']
elif os.environ.get('AWS_SESSION_TOKEN'):
security_token = os.environ['AWS_SESSION_TOKEN']
elif os.environ.get('EC2_SECURITY_TOKEN'):
security_token = os.environ['EC2_SECURITY_TOKEN']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
security_token = boto.config.get('Credentials', 'aws_security_token')
elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
security_token = boto.config.get('default', 'aws_security_token')
else:
# in case secret_token came in as empty string
security_token = None
if HAS_BOTO3 and boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
boto_params['verify'] = validate_certs
if profile_name:
boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# only set profile_name if passed as an argument
if profile_name:
boto_params['profile_name'] = profile_name
boto_params['validate_certs'] = validate_certs
for param, value in boto_params.items():
if isinstance(value, binary_type):
boto_params[param] = text_type(value, 'utf-8', 'strict')
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
try:
conn = aws_module.connect_to_region(region, **params)
except(boto.provider.ProfileNotFoundError):
raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
"boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
if tag_name_key_name and tag_value_key_name:
tag_candidates = {tag_name_key_name: tag_value_key_name}
else:
tag_candidates = {'key': 'value', 'Key': 'Value'}
if not tags_list:
return {}
for k, v in tag_candidates.items():
if k in tags_list[0] and v in tags_list[0]:
return dict((tag[k], tag[v]) for tag in tags_list)
raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
""" Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_dict = {'MyTagKey': 'MyTagValue'}
>>> ansible_dict_to_boto3_tag_list(tags_dict)
{
'MyTagKey': 'MyTagValue'
}
Returns:
List: List of dicts containing tag keys and values
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
"""
tags_list = []
for k, v in tags_dict.items():
tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
return tags_list
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
""" Return list of security group IDs from security group names. Note that security group names are not unique
across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
a try block
"""
def get_sg_name(sg, boto3):
if boto3:
return sg['GroupName']
else:
return sg.name
def get_sg_id(sg, boto3):
if boto3:
return sg['GroupId']
else:
return sg.id
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
sec_group_list = [sec_group_list]
# Get all security groups
if boto3:
if vpc_id:
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
]
all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
else:
all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
else:
if vpc_id:
filters = {'vpc-id': vpc_id}
all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
else:
all_sec_groups = ec2_connection.get_all_security_groups()
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
import re
sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
if len(still_unmatched) > 0:
raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
return sec_group_id_list
def _hashable_policy(policy, policy_list):
"""
Takes a policy and returns a list, the contents of which are all hashable and sorted.
Example input policy:
{'Version': '2012-10-17',
'Statement': [{'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}]}
Returned value:
[('Statement', ((('Action', (u's3:PutObjectAcl',)),
('Effect', (u'Allow',)),
('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
('Version', (u'2012-10-17',)))]
"""
if isinstance(policy, list):
for each in policy:
tupleified = _hashable_policy(each, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
policy = to_text(policy)
# convert root account ARNs to just account IDs
if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
policy = policy.split(':')[4]
return [policy]
elif isinstance(policy, dict):
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
tupleified = _hashable_policy(policy[key], [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append((key, tupleified))
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
if isinstance(policy_list, list):
if PY3_COMPARISON:
policy_list.sort(key=cmp_to_key(py3cmp))
else:
policy_list.sort()
return policy_list
def py3cmp(a, b):
""" Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
try:
if a > b:
return 1
elif a < b:
return -1
else:
return 0
except TypeError as e:
# check to see if they're tuple-string
# always say strings are less than tuples (to maintain compatibility with python2)
str_ind = to_text(e).find('str')
tup_ind = to_text(e).find('tuple')
if -1 not in (str_ind, tup_ind):
if str_ind < tup_ind:
return -1
elif tup_ind < str_ind:
return 1
raise
def compare_policies(current_policy, new_policy):
""" Compares the existing policy and the updated policy
Returns True if there is a difference between policies.
"""
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
def sort_json_policy_dict(policy_dict):
""" Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
Basic Usage:
>>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
>>> sort_json_policy_dict(my_iam_policy)
Returns:
Dict: Will return a copy of the policy as a Dict but any List will be sorted
{
'Principle': {
'AWS': [ '7', '14', '31', '101' ]
}
}
"""
def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
checked_list.append(sort_json_policy_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
# Sort list. If it's a list of dictionaries, sort by tuple of key-value
# pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
return checked_list
ordered_policy_dict = {}
for key, value in policy_dict.items():
if isinstance(value, dict):
ordered_policy_dict[key] = sort_json_policy_dict(value)
elif isinstance(value, list):
ordered_policy_dict[key] = value_is_list(value)
else:
ordered_policy_dict[key] = value
return ordered_policy_dict
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type
def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
"""
Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
these may not be able to be used out of the box.
:param current_tags_dict:
:param new_tags_dict:
:param purge_tags:
:return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
:return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
"""
tag_key_value_pairs_to_set = {}
tag_keys_to_unset = []
for key in current_tags_dict.keys():
if key not in new_tags_dict and purge_tags:
tag_keys_to_unset.append(key)
for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
tag_key_value_pairs_to_set[key] = new_tags_dict[key]
return tag_key_value_pairs_to_set, tag_keys_to_unset
|
from robot.api.deco import keyword
def defined_twice():
1/0
@keyword('Defined twice')
def this_time_using_custom_name():
2/0
def defined_thrice():
1/0
def definedThrice():
2/0
def Defined_Thrice():
3/0
@keyword('Embedded ${arguments} twice')
def embedded1(arg):
1/0
@keyword('Embedded ${arguments match} TWICE')
def embedded2(arg):
2/0
|
import sys
import apiutil
apiutil.CopyrightDef()
print """DESCRIPTION ""
EXPORTS
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in apiutil.AllSpecials( 'state' ):
print "crState%s" % func_name
for func_name in apiutil.AllSpecials( 'state_feedback' ):
print "crStateFeedback%s" % func_name
for func_name in apiutil.AllSpecials( 'state_select' ):
print "crStateSelect%s" % func_name
print """crStateInit
crStateReadPixels
crStateGetChromiumParametervCR
crStateCreateContext
crStateCreateContextEx
crStateDestroyContext
crStateDiffContext
crStateSwitchContext
crStateMakeCurrent
crStateSetCurrent
crStateFlushFunc
crStateFlushArg
crStateDiffAPI
crStateSetCurrentPointers
crStateResetCurrentPointers
crStateCurrentRecover
crStateTransformUpdateTransform
crStateColorMaterialRecover
crStateError
crStateUpdateColorBits
crStateClientInit
crStateGetCurrent
crStateLimitsInit
crStateMergeExtensions
crStateRasterPosUpdate
crStateTextureCheckDirtyImages
crStateExtensionsInit
crStateSetExtensionString
crStateUseServerArrays
crStateUseServerArrayElements
crStateComputeVersion
crStateTransformXformPointMatrixf
crStateTransformXformPointMatrixd
crStateInitMatrixStack
crStateLoadMatrix
__currentBits
"""
|
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class HelloSignHookTests(WebhookTestCase):
STREAM_NAME = 'hellosign'
URL_TEMPLATE = "/api/v1/external/hellosign?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'hellosign'
def test_signatures_message(self):
# type: () -> None
expected_subject = "NDA with Acme Co."
expected_message = ("The NDA with Acme Co. is awaiting the signature of "
"Jack and was just signed by Jill.")
self.send_and_test_stream_message('signatures', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data("hellosign", fixture_name, file_type="json")
|
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError
from tornado.log import gen_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.web import Application, RequestHandler
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
class EchoHandler(WebSocketHandler):
def initialize(self, close_future):
self.close_future = close_future
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
def on_close(self):
self.close_future.set_result(None)
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class WebSocketTest(AsyncHTTPTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
])
@gen_test
def test_websocket_gen(self):
ws = yield websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port(),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
def test_websocket_callbacks(self):
websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(
'ws://localhost:%d/notfound' % self.get_http_port(),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield websocket_connect(
'ws://localhost:%d/non_ws' % self.get_http_port(),
io_loop=self.io_loop)
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(HTTPError) as cm:
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://localhost:%d/' % port,
io_loop=self.io_loop,
connect_timeout=0.01)
self.assertEqual(cm.exception.code, 599)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
ws.stream.close()
yield self.close_future
|
import logging
import os
import sys
import unittest
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.unittest import gtest_testrunner
from telemetry.unittest import options_for_unittests
def Discover(start_dir, top_level_dir=None, pattern='test*.py'):
loader = unittest.defaultTestLoader
loader.suiteClass = gtest_testrunner.GTestTestSuite
subsuites = []
modules = discover.DiscoverModules(start_dir, top_level_dir, pattern)
for module in modules:
if hasattr(module, 'suite'):
new_suite = module.suite()
else:
new_suite = loader.loadTestsFromModule(module)
if new_suite.countTestCases():
subsuites.append(new_suite)
return gtest_testrunner.GTestTestSuite(subsuites)
def FilterSuite(suite, predicate):
new_suite = suite.__class__()
for x in suite:
if isinstance(x, unittest.TestSuite):
subsuite = FilterSuite(x, predicate)
if subsuite.countTestCases() == 0:
continue
new_suite.addTest(subsuite)
continue
assert isinstance(x, unittest.TestCase)
if predicate(x):
new_suite.addTest(x)
return new_suite
def DiscoverAndRunTests(
dir_name, args, top_level_dir,
runner=None, run_disabled_tests=False):
if not runner:
runner = gtest_testrunner.GTestTestRunner(inner=True)
suite = Discover(dir_name, top_level_dir, '*_unittest.py')
def IsTestSelected(test):
if len(args) != 0:
found = False
for name in args:
if name in test.id():
found = True
if not found:
return False
if hasattr(test, '_testMethodName'):
method = getattr(test, test._testMethodName) # pylint: disable=W0212
if hasattr(method, '_requires_browser_types'):
types = method._requires_browser_types # pylint: disable=W0212
if options_for_unittests.GetBrowserType() not in types:
logging.debug('Skipping test %s because it requires %s' %
(test.id(), types))
return False
if hasattr(method, '_disabled_test'):
if not run_disabled_tests:
return False
return True
filtered_suite = FilterSuite(suite, IsTestSelected)
test_result = runner.run(filtered_suite)
return test_result
def Main(args, start_dir, top_level_dir, runner=None):
"""Unit test suite that collects all test cases for telemetry."""
# Add unittest_data to the path so we can import packages from it.
unittest_data_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', 'unittest_data'))
sys.path.append(unittest_data_dir)
default_options = browser_options.BrowserOptions()
default_options.browser_type = 'any'
parser = default_options.CreateParser('run_tests [options] [test names]')
parser.add_option('--repeat-count', dest='run_test_repeat_count',
type='int', default=1,
help='Repeats each a provided number of times.')
parser.add_option('-d', '--also-run-disabled-tests',
dest='run_disabled_tests',
action='store_true', default=False,
help='Also run tests decorated with @DisabledTest.')
_, args = parser.parse_args(args)
logging_level = logging.getLogger().getEffectiveLevel()
if default_options.verbosity == 0:
logging.getLogger().setLevel(logging.WARN)
from telemetry.core import browser_finder
try:
browser_to_create = browser_finder.FindBrowser(default_options)
except browser_finder.BrowserFinderException, ex:
logging.error(str(ex))
return 1
if browser_to_create == None:
logging.error('No browser found of type %s. Cannot run tests.',
default_options.browser_type)
logging.error('Re-run with --browser=list to see available browser types.')
return 1
options_for_unittests.Set(default_options,
browser_to_create.browser_type)
olddir = os.getcwd()
try:
os.chdir(top_level_dir)
success = True
for _ in range(
default_options.run_test_repeat_count): # pylint: disable=E1101
success = success and DiscoverAndRunTests(
start_dir, args, top_level_dir,
runner, default_options.run_disabled_tests)
if success:
return 0
finally:
os.chdir(olddir)
options_for_unittests.Set(None, None)
if default_options.verbosity == 0:
# Restore logging level.
logging.getLogger().setLevel(logging_level)
return 1
|
"""A Transform that parses serialized tensorflow.Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import parsing_ops
class ExampleParser(transform.TensorFlowTransform):
"""A Transform that parses serialized `tensorflow.Example` protos."""
def __init__(self, features):
"""Initialize `ExampleParser`.
The `features` argument must be an object that can be converted to an
`OrderedDict`. The keys should be strings and will be used to name the
output. Values should be either `VarLenFeature` or `FixedLenFeature`. If
`features` is a dict, it will be sorted by key.
Args:
features: An object that can be converted to an `OrderedDict` mapping
column names to feature definitions.
"""
super(ExampleParser, self).__init__()
if isinstance(features, dict):
self._ordered_features = collections.OrderedDict(sorted(features.items(
), key=lambda f: f[0]))
else:
self._ordered_features = collections.OrderedDict(features)
@property
def name(self):
return "ExampleParser"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return list(self._ordered_features.keys())
@transform._parameter # pylint: disable=protected-access
def feature_definitions(self):
return self._ordered_features
def _apply_transform(self, input_tensors, **kwargs):
parsed_values = parsing_ops.parse_example(input_tensors[0],
features=self._ordered_features)
# pylint: disable=not-callable
return self.return_type(**parsed_values)
|
"""
Basic CLexer Test
~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
import os
from pygments.token import Text, Number
from pygments.lexers import CLexer
class CLexerTest(unittest.TestCase):
def setUp(self):
self.lexer = CLexer()
def testNumbers(self):
code = '42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23'
wanted = []
for item in zip([Number.Integer, Number.Float, Number.Float,
Number.Float, Number.Oct, Number.Hex,
Number.Float, Number.Float], code.split()):
wanted.append(item)
wanted.append((Text, ' '))
wanted = [(Text, '')] + wanted[:-1] + [(Text, '\n')]
self.assertEqual(list(self.lexer.get_tokens(code)), wanted)
|
from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
from django.conf import settings
from django.utils import translation
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.validate(display_num_errors=True)
try:
self.check_migrations()
except ImproperlyConfigured:
pass
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write((
"%(started_at)s\n"
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"started_at": now,
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
BaseRunserverCommand = Command
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0005_merge'),
]
operations = [
migrations.RunSQL(
[
"""
CREATE INDEX fileversion_metadata_sha_arch_vault_index
ON osf_fileversion ((osf_fileversion.metadata -> 'sha256'), (osf_fileversion.metadata -> 'archive'), (
osf_fileversion.metadata -> 'vault'));
"""
],
[
"""
DROP INDEX fileversion_metadata_sha_arch_vault_index;
"""
]
)
]
|
from __future__ import absolute_import, division, print_function
from collections import namedtuple
from cryptography import utils
from cryptography.exceptions import InternalError
from cryptography.hazmat.backends.commoncrypto.ciphers import (
_CipherContext, _GCMCipherContext
)
from cryptography.hazmat.backends.commoncrypto.hashes import _HashContext
from cryptography.hazmat.backends.commoncrypto.hmac import _HMACContext
from cryptography.hazmat.backends.interfaces import (
CipherBackend, HMACBackend, HashBackend, PBKDF2HMACBackend
)
from cryptography.hazmat.bindings.commoncrypto.binding import Binding
from cryptography.hazmat.primitives.ciphers.algorithms import (
AES, ARC4, Blowfish, CAST5, TripleDES
)
from cryptography.hazmat.primitives.ciphers.modes import (
CBC, CFB, CFB8, CTR, ECB, GCM, OFB
)
HashMethods = namedtuple(
"HashMethods", ["ctx", "hash_init", "hash_update", "hash_final"]
)
@utils.register_interface(CipherBackend)
@utils.register_interface(HashBackend)
@utils.register_interface(HMACBackend)
@utils.register_interface(PBKDF2HMACBackend)
class Backend(object):
"""
CommonCrypto API wrapper.
"""
name = "commoncrypto"
def __init__(self):
self._binding = Binding()
self._ffi = self._binding.ffi
self._lib = self._binding.lib
self._cipher_registry = {}
self._register_default_ciphers()
self._hash_mapping = {
"md5": HashMethods(
"CC_MD5_CTX *", self._lib.CC_MD5_Init,
self._lib.CC_MD5_Update, self._lib.CC_MD5_Final
),
"sha1": HashMethods(
"CC_SHA1_CTX *", self._lib.CC_SHA1_Init,
self._lib.CC_SHA1_Update, self._lib.CC_SHA1_Final
),
"sha224": HashMethods(
"CC_SHA256_CTX *", self._lib.CC_SHA224_Init,
self._lib.CC_SHA224_Update, self._lib.CC_SHA224_Final
),
"sha256": HashMethods(
"CC_SHA256_CTX *", self._lib.CC_SHA256_Init,
self._lib.CC_SHA256_Update, self._lib.CC_SHA256_Final
),
"sha384": HashMethods(
"CC_SHA512_CTX *", self._lib.CC_SHA384_Init,
self._lib.CC_SHA384_Update, self._lib.CC_SHA384_Final
),
"sha512": HashMethods(
"CC_SHA512_CTX *", self._lib.CC_SHA512_Init,
self._lib.CC_SHA512_Update, self._lib.CC_SHA512_Final
),
}
self._supported_hmac_algorithms = {
"md5": self._lib.kCCHmacAlgMD5,
"sha1": self._lib.kCCHmacAlgSHA1,
"sha224": self._lib.kCCHmacAlgSHA224,
"sha256": self._lib.kCCHmacAlgSHA256,
"sha384": self._lib.kCCHmacAlgSHA384,
"sha512": self._lib.kCCHmacAlgSHA512,
}
self._supported_pbkdf2_hmac_algorithms = {
"sha1": self._lib.kCCPRFHmacAlgSHA1,
"sha224": self._lib.kCCPRFHmacAlgSHA224,
"sha256": self._lib.kCCPRFHmacAlgSHA256,
"sha384": self._lib.kCCPRFHmacAlgSHA384,
"sha512": self._lib.kCCPRFHmacAlgSHA512,
}
def hash_supported(self, algorithm):
return algorithm.name in self._hash_mapping
def hmac_supported(self, algorithm):
return algorithm.name in self._supported_hmac_algorithms
def create_hash_ctx(self, algorithm):
return _HashContext(self, algorithm)
def create_hmac_ctx(self, key, algorithm):
return _HMACContext(self, key, algorithm)
def cipher_supported(self, cipher, mode):
return (type(cipher), type(mode)) in self._cipher_registry
def create_symmetric_encryption_ctx(self, cipher, mode):
if isinstance(mode, GCM):
return _GCMCipherContext(
self, cipher, mode, self._lib.kCCEncrypt
)
else:
return _CipherContext(self, cipher, mode, self._lib.kCCEncrypt)
def create_symmetric_decryption_ctx(self, cipher, mode):
if isinstance(mode, GCM):
return _GCMCipherContext(
self, cipher, mode, self._lib.kCCDecrypt
)
else:
return _CipherContext(self, cipher, mode, self._lib.kCCDecrypt)
def pbkdf2_hmac_supported(self, algorithm):
return algorithm.name in self._supported_pbkdf2_hmac_algorithms
def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
key_material):
alg_enum = self._supported_pbkdf2_hmac_algorithms[algorithm.name]
buf = self._ffi.new("char[]", length)
res = self._lib.CCKeyDerivationPBKDF(
self._lib.kCCPBKDF2,
key_material,
len(key_material),
salt,
len(salt),
alg_enum,
iterations,
buf,
length
)
self._check_cipher_response(res)
return self._ffi.buffer(buf)[:]
def _register_cipher_adapter(self, cipher_cls, cipher_const, mode_cls,
mode_const):
if (cipher_cls, mode_cls) in self._cipher_registry:
raise ValueError("Duplicate registration for: {0} {1}.".format(
cipher_cls, mode_cls)
)
self._cipher_registry[cipher_cls, mode_cls] = (cipher_const,
mode_const)
def _register_default_ciphers(self):
for mode_cls, mode_const in [
(CBC, self._lib.kCCModeCBC),
(ECB, self._lib.kCCModeECB),
(CFB, self._lib.kCCModeCFB),
(CFB8, self._lib.kCCModeCFB8),
(OFB, self._lib.kCCModeOFB),
(CTR, self._lib.kCCModeCTR),
(GCM, self._lib.kCCModeGCM),
]:
self._register_cipher_adapter(
AES,
self._lib.kCCAlgorithmAES128,
mode_cls,
mode_const
)
for mode_cls, mode_const in [
(CBC, self._lib.kCCModeCBC),
(ECB, self._lib.kCCModeECB),
(CFB, self._lib.kCCModeCFB),
(CFB8, self._lib.kCCModeCFB8),
(OFB, self._lib.kCCModeOFB),
]:
self._register_cipher_adapter(
TripleDES,
self._lib.kCCAlgorithm3DES,
mode_cls,
mode_const
)
for mode_cls, mode_const in [
(CBC, self._lib.kCCModeCBC),
(ECB, self._lib.kCCModeECB),
(CFB, self._lib.kCCModeCFB),
(OFB, self._lib.kCCModeOFB)
]:
self._register_cipher_adapter(
Blowfish,
self._lib.kCCAlgorithmBlowfish,
mode_cls,
mode_const
)
for mode_cls, mode_const in [
(CBC, self._lib.kCCModeCBC),
(ECB, self._lib.kCCModeECB),
(CFB, self._lib.kCCModeCFB),
(OFB, self._lib.kCCModeOFB),
(CTR, self._lib.kCCModeCTR)
]:
self._register_cipher_adapter(
CAST5,
self._lib.kCCAlgorithmCAST,
mode_cls,
mode_const
)
self._register_cipher_adapter(
ARC4,
self._lib.kCCAlgorithmRC4,
type(None),
self._lib.kCCModeRC4
)
def _check_cipher_response(self, response):
if response == self._lib.kCCSuccess:
return
elif response == self._lib.kCCAlignmentError:
# This error is not currently triggered due to a bug filed as
# rdar://15589470
raise ValueError(
"The length of the provided data is not a multiple of "
"the block length."
)
else:
raise InternalError(
"The backend returned an unknown error, consider filing a bug."
" Code: {0}.".format(response),
response
)
def _release_cipher_ctx(self, ctx):
"""
Called by the garbage collector and used to safely dereference and
release the context.
"""
if ctx[0] != self._ffi.NULL:
res = self._lib.CCCryptorRelease(ctx[0])
self._check_cipher_response(res)
ctx[0] = self._ffi.NULL
backend = Backend()
|
import os
import platform
import sys
from logging.handlers import SysLogHandler
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def get_logger_config(log_dir,
logging_env="no_env",
tracking_filename="tracking.log",
edx_filename="edx.log",
dev_env=False,
syslog_addr=None,
debug=False,
local_loglevel='INFO',
console_loglevel=None,
service_variant=None):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings. The reason it's done
this way instead of registering directly is because I didn't want to worry
about resetting the logging state if this is called multiple times when
settings are extended.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, tracking and application logs will be dropped in log_dir.
"tracking_filename" and "edx_filename" are ignored unless dev_env
is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in LOG_LEVELS:
local_loglevel = 'INFO'
if console_loglevel is None or console_loglevel not in LOG_LEVELS:
console_loglevel = 'DEBUG' if debug else 'INFO'
if service_variant is None:
# default to a blank string so that if SERVICE_VARIANT is not
# set we will not log to a sub directory
service_variant = ''
hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(service_variant=service_variant,
logging_env=logging_env,
hostname=hostname)
handlers = ['console', 'local'] if debug else ['console',
'syslogger-remote', 'local']
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'handlers': {
'console': {
'level': console_loglevel,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr,
},
'syslogger-remote': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': syslog_addr,
'formatter': 'syslog_format',
},
'newrelic': {
'level': 'ERROR',
'class': 'lms.lib.newrelic_logging.NewRelicHandler',
'formatter': 'raw',
}
},
'loggers': {
'tracking': {
'handlers': ['tracking'],
'level': 'DEBUG',
'propagate': False,
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
}
}
if dev_env:
tracking_file_loc = os.path.join(log_dir, tracking_filename)
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': tracking_file_loc,
'formatter': 'raw',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
# for production environments we will only
# log INFO and up
logger_config['loggers']['']['level'] = 'INFO'
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': SysLogHandler.LOG_LOCAL1,
'formatter': 'raw',
},
})
return logger_config
|
from nose.tools import ok_
from nose.tools import eq_
import networkx as nx
from networkx.algorithms.approximation import min_weighted_dominating_set
from networkx.algorithms.approximation import min_edge_dominating_set
class TestMinWeightDominatingSet:
def test_min_weighted_dominating_set(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
graph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(3, 6)
graph.add_edge(5, 6)
vertices = set([1, 2, 3, 4, 5, 6])
# due to ties, this might be hard to test tight bounds
dom_set = min_weighted_dominating_set(graph)
for vertex in vertices - dom_set:
neighbors = set(graph.neighbors(vertex))
ok_(len(neighbors & dom_set) > 0, "Non dominating set found!")
def test_star_graph(self):
"""Tests that an approximate dominating set for the star graph,
even when the center node does not have the smallest integer
label, gives just the center node.
For more information, see #1527.
"""
# Create a star graph in which the center node has the highest
# label instead of the lowest.
G = nx.star_graph(10)
G = nx.relabel_nodes(G, {0: 9, 9: 0})
eq_(min_weighted_dominating_set(G), {9})
def test_min_edge_dominating_set(self):
graph = nx.path_graph(5)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
graph = nx.complete_graph(10)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
|
"""
Tests for file field behavior, and specifically #639, in which Model.save()
gets called *again* for each FileField. This test will fail if calling a
ModelForm's save() method causes Model.save() to be called more than once.
"""
from __future__ import absolute_import
import os
import shutil
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import unittest
from .models import Photo, PhotoForm, temp_storage_dir
class Bug639Test(unittest.TestCase):
def testBug639(self):
"""
Simulate a file upload and check how many times Model.save() gets
called.
"""
# Grab an image for testing.
filename = os.path.join(os.path.dirname(__file__), "test.jpg")
img = open(filename, "rb").read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.jpg', img, 'image/jpeg')}
form = PhotoForm(data=data, files=files)
p = form.save()
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
def tearDown(self):
"""
Make sure to delete the "uploaded" file to avoid clogging /tmp.
"""
p = Photo.objects.get()
p.image.delete(save=False)
shutil.rmtree(temp_storage_dir)
|
"""Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import dummy_thread as _thread
import time
import Queue
import random
import unittest
from test import test_support
DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as
# the 'thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.failUnless(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.failUnless(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.failUnlessRaises(_thread.error, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.failUnless(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.failUnless(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.failUnless(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.failUnless(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if test_support.verbose:
print
print "*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY
self.lock.acquire()
end_time = int(time.time())
if test_support.verbose:
print "done"
self.failUnless((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.failUnlessRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.failUnless(isinstance(_thread.get_ident(), int),
"_thread.get_ident() returned a non-integer")
self.failUnless(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.failUnless(isinstance(_thread.allocate_lock(), _thread.LockType),
"_thread.LockType is not an instance of what is "
"returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.failUnlessRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.failUnlessRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = Queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = Queue.Queue(thread_count)
if test_support.verbose:
print
print "*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count)
for count in xrange(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if test_support.verbose:
print 'done'
self.failUnless(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if test_support.verbose:
print
print "*** Using %s as _thread module ***" % _thread
test_support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main()
|
from statsmodels.regression.linear_model import GLS
gls = GLS.from_formula
from statsmodels.regression.linear_model import WLS
wls = WLS.from_formula
from statsmodels.regression.linear_model import OLS
ols = OLS.from_formula
from statsmodels.regression.linear_model import GLSAR
glsar = GLSAR.from_formula
from statsmodels.regression.mixed_linear_model import MixedLM
mixedlm = MixedLM.from_formula
from statsmodels.genmod.generalized_linear_model import GLM
glm = GLM.from_formula
from statsmodels.robust.robust_linear_model import RLM
rlm = RLM.from_formula
from statsmodels.discrete.discrete_model import MNLogit
mnlogit = MNLogit.from_formula
from statsmodels.discrete.discrete_model import Logit
logit = Logit.from_formula
from statsmodels.discrete.discrete_model import Probit
probit = Probit.from_formula
from statsmodels.discrete.discrete_model import Poisson
poisson = Poisson.from_formula
from statsmodels.discrete.discrete_model import NegativeBinomial
negativebinomial = NegativeBinomial.from_formula
from statsmodels.regression.quantile_regression import QuantReg
quantreg = QuantReg.from_formula
from statsmodels.duration.hazard_regression import PHReg
phreg = PHReg.from_formula
from statsmodels.genmod.generalized_estimating_equations import (GEE,
OrdinalGEE, NominalGEE)
gee = GEE.from_formula
ordinal_gee = OrdinalGEE.from_formula
nominal_gee = NominalGEE.from_formula
|
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import network
ALIAS = 'os-floating-ip-pools'
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_floating_ip_view(pool_name):
return {
'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
'floating_ip_pools': [_translate_floating_ip_view(pool_name)
for pool_name in pools]
}
class FloatingIPPoolsController(wsgi.Controller):
"""The Floating IP Pool API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API(skip_policy_check=True)
super(FloatingIPPoolsController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
"""Return a list of pools."""
context = req.environ['nova.context']
authorize(context)
pools = self.network_api.get_floating_ip_pools(context)
return _translate_floating_ip_pools_view(pools)
class FloatingIpPools(extensions.V21APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIpPools"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPPoolsController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
import os
ARCH = 'arm'
CPU = 'cortex-m3'
CROSS_TOOL = 'gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'C:\Program Files (x86)\CodeSourcery\Sourcery G++ Lite\bin'
#EXEC_PATH = 'C:\Program Files (x86)\yagarto\bin'
elif CROSS_TOOL == 'keil':
print '================ERROR============================'
print 'Not support keil yet!'
print '================================================='
exit(0)
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
EFM32_BOARD = 'EFM32GG_DK3750'
if EFM32_BOARD == 'EFM32_G8XX_STK':
EFM32_FAMILY = 'Gecko'
EFM32_TYPE = 'EFM32G890F128'
EFM32_LCD = 'none'
elif EFM32_BOARD == 'EFM32_GXXX_DK':
EFM32_FAMILY = 'Gecko'
EFM32_TYPE = 'EFM32G290F128'
EFM32_LCD = 'none'
elif EFM32_BOARD == 'EFM32GG_DK3750':
EFM32_FAMILY = 'Giant Gecko'
EFM32_TYPE = 'EFM32GG990F1024'
EFM32_LCD = 'LCD_DIRECT'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-efm32.map,-cref,-u,__cs3_reset -T'
if EFM32_BOARD == 'EFM32_G8XX_STK' or EFM32_BOARD == 'EFM32_GXXX_DK':
LFLAGS += ' efm32g_rom.ld'
elif EFM32_BOARD == 'EFM32GG_DK3750':
LFLAGS += ' efm32gg_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
|
"""engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
__revision__ = "src/engine/SCons/Platform/darwin.py 5023 2010/06/14 22:05:46 scons"
import posix
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/sw/bin'
|
"""Example: statsmodels.OLS
"""
from statsmodels.datasets.longley import load
import statsmodels.api as sm
from statsmodels.iolib.table import SimpleTable, default_txt_fmt
import numpy as np
data = load()
data_orig = (data.endog.copy(), data.exog.copy())
rescale = 0
rescale_ratio = data.endog.std() / data.exog.std(0)
if rescale > 0:
# rescaling
data.endog -= data.endog.mean()
data.exog -= data.exog.mean(0)
if rescale > 1:
data.endog /= data.endog.std()
data.exog /= data.exog.std(0)
data.exog = sm.tools.add_constant(data.exog, prepend=False)
ols_model = sm.OLS(data.endog, data.exog)
ols_results = ols_model.fit()
resparams = np.nan * np.ones((7, 7))
res = sm.OLS(data.endog, data.exog).fit()
resparams[:, 0] = res.params
indall = range(7)
for i in range(6):
ind = indall[:]
del ind[i]
res = sm.OLS(data.endog, data.exog[:, ind]).fit()
resparams[ind, i + 1] = res.params
if rescale == 1:
pass
if rescale == 3:
resparams[:-1, :] *= rescale_ratio[:, None]
txt_fmt1 = default_txt_fmt
numformat = '%10.4f'
txt_fmt1 = dict(data_fmts=[numformat])
rowstubs = data.names[1:] + ['const']
headers = ['all'] + ['drop %s' % name for name in data.names[1:]]
tabl = SimpleTable(resparams, headers, rowstubs, txt_fmt=txt_fmt1)
nanstring = numformat % np.nan
nn = len(nanstring)
nanrep = ' ' * (nn - 1)
nanrep = nanrep[:nn // 2] + '-' + nanrep[nn // 2:]
print('Longley data - sensitivity to dropping an explanatory variable')
print(str(tabl).replace(nanstring, nanrep))
|
import optparse
import os
import subprocess
import sys
from util import build_utils
def DoGcc(options):
build_utils.MakeDirectory(os.path.dirname(options.output))
gcc_cmd = [
'gcc', # invoke host gcc.
'-E', # stop after preprocessing.
'-D', 'ANDROID', # Specify ANDROID define for pre-processor.
'-x', 'c-header', # treat sources as C header files
'-P', # disable line markers, i.e. '#line 309'
'-I', options.include_path,
'-o', options.output,
options.template
]
build_utils.CheckCallDie(gcc_cmd)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--include-path', help='Include path for gcc.')
parser.add_option('--template', help='Path to template.')
parser.add_option('--output', help='Path for generated file.')
parser.add_option('--stamp', help='Path to touch on success.')
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
parser.add_option('--ignore', help='Ignored.')
options, _ = parser.parse_args()
DoGcc(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
from framework.routing import Rule, json_renderer
from website.addons.github import views
settings_routes = {
'rules': [
# Configuration
Rule(
[
'/project/<pid>/github/settings/',
'/project/<pid>/node/<nid>/github/settings/',
],
'post',
views.config.github_set_config,
json_renderer,
),
Rule(
[
'/project/<pid>/github/tarball/',
'/project/<pid>/node/<nid>/github/tarball/',
],
'get',
views.crud.github_download_starball,
json_renderer,
{'archive': 'tar'},
endpoint_suffix='__tar',
),
Rule(
[
'/project/<pid>/github/zipball/',
'/project/<pid>/node/<nid>/github/zipball/',
],
'get',
views.crud.github_download_starball,
json_renderer,
{'archive': 'zip'},
endpoint_suffix='__zip',
),
Rule(
[
'/project/<pid>/github/hook/',
'/project/<pid>/node/<nid>/github/hook/',
],
'post',
views.hooks.github_hook_callback,
json_renderer,
),
# OAuth: User
Rule(
'/settings/github/oauth/',
'get',
views.auth.github_oauth_start,
json_renderer,
endpoint_suffix='__user',
),
Rule(
'/settings/github/oauth/',
'delete',
views.auth.github_oauth_delete_user,
json_renderer,
),
# OAuth: Node
Rule(
[
'/project/<pid>/github/oauth/',
'/project/<pid>/node/<nid>/github/oauth/',
],
'get',
views.auth.github_oauth_start,
json_renderer,
),
Rule(
[
'/project/<pid>/github/user_auth/',
'/project/<pid>/node/<nid>/github/user_auth/',
],
'post',
views.auth.github_add_user_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/github/oauth/',
'/project/<pid>/node/<nid>/github/oauth/',
'/project/<pid>/github/config/',
'/project/<pid>/node/<nid>/github/config/'
],
'delete',
views.auth.github_oauth_deauthorize_node,
json_renderer,
),
# OAuth: General
Rule(
[
'/addons/github/callback/<uid>/',
'/addons/github/callback/<uid>/<nid>/',
],
'get',
views.auth.github_oauth_callback,
json_renderer,
),
],
'prefix': '/api/v1',
}
api_routes = {
'rules': [
Rule(
'/github/repo/create/',
'post',
views.repos.github_create_repo,
json_renderer,
),
Rule(
[
'/project/<pid>/github/hgrid/root/',
'/project/<pid>/node/<nid>/github/hgrid/root/',
],
'get',
views.hgrid.github_root_folder_public,
json_renderer,
),
],
'prefix': '/api/v1'
}
|
class PythonVarArgsConstructor:
def __init__(self, mandatory, *varargs):
self.mandatory = mandatory
self.varargs = varargs
def get_args(self):
return self.mandatory, ' '.join(self.varargs)
|
from __future__ import print_function, division
from sympy.core.numbers import nan
from .function import Function
class Mod(Function):
"""Represents a modulo operation on symbolic expressions.
Receives two arguments, dividend p and divisor q.
The convention used is the same as Python's: the remainder always has the
same sign as the divisor.
Examples
========
>>> from sympy.abc import x, y
>>> x**2 % y
Mod(x**2, y)
>>> _.subs({x: 5, y: 6})
1
"""
@classmethod
def eval(cls, p, q):
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.exprtools import gcd_terms
from sympy.polys.polytools import gcd
def doit(p, q):
"""Try to return p % q if both are numbers or +/-p is known
to be less than or equal q.
"""
if p.is_infinite or q.is_infinite or p is nan or q is nan:
return nan
if (p == q or p == -q or
p.is_Pow and p.exp.is_Integer and p.base == q or
p.is_integer and q == 1):
return S.Zero
if q.is_Number:
if p.is_Number:
return (p % q)
if q == 2:
if p.is_even:
return S.Zero
elif p.is_odd:
return S.One
# by ratio
r = p/q
try:
d = int(r)
except TypeError:
pass
else:
if type(d) is int:
rv = p - d*q
if (rv*q < 0) == True:
rv += q
return rv
# by difference
d = p - q
if d.is_negative:
if q.is_negative:
return d
elif q.is_positive:
return p
rv = doit(p, q)
if rv is not None:
return rv
# denest
if p.func is cls:
# easy
qinner = p.args[1]
if qinner == q:
return p
# XXX other possibilities?
# extract gcd; any further simplification should be done by the user
G = gcd(p, q)
if G != 1:
p, q = [
gcd_terms(i/G, clear=False, fraction=False) for i in (p, q)]
pwas, qwas = p, q
# simplify terms
# (x + y + 2) % x -> Mod(y + 2, x)
if p.is_Add:
args = []
for i in p.args:
a = cls(i, q)
if a.count(cls) > i.count(cls):
args.append(i)
else:
args.append(a)
if args != list(p.args):
p = Add(*args)
else:
# handle coefficients if they are not Rational
# since those are not handled by factor_terms
# e.g. Mod(.6*x, .3*y) -> 0.3*Mod(2*x, y)
cp, p = p.as_coeff_Mul()
cq, q = q.as_coeff_Mul()
ok = False
if not cp.is_Rational or not cq.is_Rational:
r = cp % cq
if r == 0:
G *= cq
p *= int(cp/cq)
ok = True
if not ok:
p = cp*p
q = cq*q
# simple -1 extraction
if p.could_extract_minus_sign() and q.could_extract_minus_sign():
G, p, q = [-i for i in (G, p, q)]
# check again to see if p and q can now be handled as numbers
rv = doit(p, q)
if rv is not None:
return rv*G
# put 1.0 from G on inside
if G.is_Float and G == 1:
p *= G
return cls(p, q, evaluate=False)
elif G.is_Mul and G.args[0].is_Float and G.args[0] == 1:
p = G.args[0]*p
G = Mul._from_args(G.args[1:])
return G*cls(p, q, evaluate=(p, q) != (pwas, qwas))
def _eval_is_integer(self):
from sympy.core.logic import fuzzy_and, fuzzy_not
p, q = self.args
if fuzzy_and([p.is_integer, q.is_integer, fuzzy_not(q.is_zero)]):
return True
def _eval_is_nonnegative(self):
if self.args[1].is_positive:
return True
def _eval_is_nonpositive(self):
if self.args[1].is_negative:
return True
|
"""
Unit tests for preference APIs.
"""
import datetime
import ddt
import unittest
from mock import patch
from pytz import UTC
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from dateutil.parser import parse as parse_datetime
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ...accounts.api import create_account
from ...errors import UserNotFound, UserNotAuthorized, PreferenceValidationError, PreferenceUpdateError
from ...models import UserProfile, UserOrgTag
from ...preferences.api import (
get_user_preference, get_user_preferences, set_user_preference, update_user_preferences, delete_user_preference,
update_email_opt_in
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Account APIs are only supported in LMS')
class TestPreferenceAPI(TestCase):
"""
These tests specifically cover the parts of the API methods that are not covered by test_views.py.
This includes the specific types of error raised, and default behavior when optional arguments
are not specified.
"""
password = "test"
def setUp(self):
super(TestPreferenceAPI, self).setUp()
self.user = UserFactory.create(password=self.password)
self.different_user = UserFactory.create(password=self.password)
self.staff_user = UserFactory(is_staff=True, password=self.password)
self.no_such_user = UserFactory.create(password=self.password)
self.no_such_user.username = "no_such_user"
self.test_preference_key = "test_key"
self.test_preference_value = "test_value"
set_user_preference(self.user, self.test_preference_key, self.test_preference_value)
def test_get_user_preference(self):
"""
Verifies the basic behavior of get_user_preference.
"""
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
self.test_preference_value
)
self.assertEqual(
get_user_preference(self.staff_user, self.test_preference_key, username=self.user.username),
self.test_preference_value
)
def test_get_user_preference_errors(self):
"""
Verifies that get_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
get_user_preference(self.user, self.test_preference_key, username="no_such_user")
with self.assertRaises(UserNotFound):
get_user_preference(self.no_such_user, self.test_preference_key)
with self.assertRaises(UserNotAuthorized):
get_user_preference(self.different_user, self.test_preference_key, username=self.user.username)
def test_get_user_preferences(self):
"""
Verifies the basic behavior of get_user_preferences.
"""
expected_user_preferences = {
self.test_preference_key: self.test_preference_value,
}
self.assertEqual(get_user_preferences(self.user), expected_user_preferences)
self.assertEqual(get_user_preferences(self.staff_user, username=self.user.username), expected_user_preferences)
def test_get_user_preferences_errors(self):
"""
Verifies that get_user_preferences returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
get_user_preferences(self.user, username="no_such_user")
with self.assertRaises(UserNotFound):
get_user_preferences(self.no_such_user)
with self.assertRaises(UserNotAuthorized):
get_user_preferences(self.different_user, username=self.user.username)
def test_set_user_preference(self):
"""
Verifies the basic behavior of set_user_preference.
"""
test_key = u'ⓟⓡⓔⓕⓔⓡⓔⓝⓒⓔ_ⓚⓔⓨ'
test_value = u'ǝnןɐʌ_ǝɔuǝɹǝɟǝɹd'
set_user_preference(self.user, test_key, test_value)
self.assertEqual(get_user_preference(self.user, test_key), test_value)
set_user_preference(self.user, test_key, "new_value", username=self.user.username)
self.assertEqual(get_user_preference(self.user, test_key), "new_value")
@patch('openedx.core.djangoapps.user_api.models.UserPreference.save')
def test_set_user_preference_errors(self, user_preference_save):
"""
Verifies that set_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
set_user_preference(self.user, self.test_preference_key, "new_value", username="no_such_user")
with self.assertRaises(UserNotFound):
set_user_preference(self.no_such_user, self.test_preference_key, "new_value")
with self.assertRaises(UserNotAuthorized):
set_user_preference(self.staff_user, self.test_preference_key, "new_value", username=self.user.username)
with self.assertRaises(UserNotAuthorized):
set_user_preference(self.different_user, self.test_preference_key, "new_value", username=self.user.username)
too_long_key = "x" * 256
with self.assertRaises(PreferenceValidationError) as context_manager:
set_user_preference(self.user, too_long_key, "new_value")
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[too_long_key],
{
"developer_message": get_expected_validation_developer_message(too_long_key, "new_value"),
"user_message": get_expected_key_error_user_message(too_long_key, "new_value"),
}
)
for empty_value in (None, "", " "):
with self.assertRaises(PreferenceValidationError) as context_manager:
set_user_preference(self.user, self.test_preference_key, empty_value)
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[self.test_preference_key],
{
"developer_message": get_empty_preference_message(self.test_preference_key),
"user_message": get_empty_preference_message(self.test_preference_key),
}
)
user_preference_save.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
set_user_preference(self.user, u"new_key_ȻħȺɍłɇs", u"new_value_ȻħȺɍłɇs")
self.assertEqual(
context_manager.exception.developer_message,
u"Save failed for user preference 'new_key_ȻħȺɍłɇs' with value 'new_value_ȻħȺɍłɇs': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Save failed for user preference 'new_key_ȻħȺɍłɇs' with value 'new_value_ȻħȺɍłɇs'."
)
def test_update_user_preferences(self):
"""
Verifies the basic behavior of update_user_preferences.
"""
expected_user_preferences = {
self.test_preference_key: "new_value",
}
set_user_preference(self.user, self.test_preference_key, "new_value")
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
set_user_preference(self.user, self.test_preference_key, "new_value", username=self.user.username)
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
@patch('openedx.core.djangoapps.user_api.models.UserPreference.save')
def test_update_user_preferences_errors(self, user_preference_save, user_preference_delete):
"""
Verifies that set_user_preferences returns appropriate errors.
"""
update_data = {
self.test_preference_key: "new_value"
}
with self.assertRaises(UserNotFound):
update_user_preferences(self.user, update_data, username="no_such_user")
with self.assertRaises(UserNotFound):
update_user_preferences(self.no_such_user, update_data)
with self.assertRaises(UserNotAuthorized):
update_user_preferences(self.staff_user, update_data, username=self.user.username)
with self.assertRaises(UserNotAuthorized):
update_user_preferences(self.different_user, update_data, username=self.user.username)
too_long_key = "x" * 256
with self.assertRaises(PreferenceValidationError) as context_manager:
update_user_preferences(self.user, {too_long_key: "new_value"})
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[too_long_key],
{
"developer_message": get_expected_validation_developer_message(too_long_key, "new_value"),
"user_message": get_expected_key_error_user_message(too_long_key, "new_value"),
}
)
for empty_value in ("", " "):
with self.assertRaises(PreferenceValidationError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: empty_value})
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[self.test_preference_key],
{
"developer_message": get_empty_preference_message(self.test_preference_key),
"user_message": get_empty_preference_message(self.test_preference_key),
}
)
user_preference_save.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: "new_value"})
self.assertEqual(
context_manager.exception.developer_message,
u"Save failed for user preference 'test_key' with value 'new_value': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Save failed for user preference 'test_key' with value 'new_value'."
)
user_preference_delete.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: None})
self.assertEqual(
context_manager.exception.developer_message,
u"Delete failed for user preference 'test_key': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Delete failed for user preference 'test_key'."
)
def test_delete_user_preference(self):
"""
Verifies the basic behavior of delete_user_preference.
"""
self.assertTrue(delete_user_preference(self.user, self.test_preference_key))
set_user_preference(self.user, self.test_preference_key, self.test_preference_value)
self.assertTrue(delete_user_preference(self.user, self.test_preference_key, username=self.user.username))
self.assertFalse(delete_user_preference(self.user, "no_such_key"))
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
def test_delete_user_preference_errors(self, user_preference_delete):
"""
Verifies that delete_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
delete_user_preference(self.user, self.test_preference_key, username="no_such_user")
with self.assertRaises(UserNotFound):
delete_user_preference(self.no_such_user, self.test_preference_key)
with self.assertRaises(UserNotAuthorized):
delete_user_preference(self.staff_user, self.test_preference_key, username=self.user.username)
with self.assertRaises(UserNotAuthorized):
delete_user_preference(self.different_user, self.test_preference_key, username=self.user.username)
user_preference_delete.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
delete_user_preference(self.user, self.test_preference_key)
self.assertEqual(
context_manager.exception.developer_message,
u"Delete failed for user preference 'test_key': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Delete failed for user preference 'test_key'."
)
@ddt.ddt
class UpdateEmailOptInTests(ModuleStoreTestCase):
USERNAME = u'frank-underwood'
PASSWORD = u'ṕáśśẃőŕd'
EMAIL = u'frank+underwood@example.com'
@ddt.data(
# Check that a 27 year old can opt-in
(27, True, u"True"),
# Check that a 32-year old can opt-out
(32, False, u"False"),
# Check that someone 14 years old can opt-in
(14, True, u"True"),
# Check that someone 13 years old cannot opt-in (must have turned 13 before this year)
(13, True, u"False"),
# Check that someone 12 years old cannot opt-in
(12, True, u"False")
)
@ddt.unpack
@override_settings(EMAIL_OPTIN_MINIMUM_AGE=13)
def test_update_email_optin(self, age, option, expected_result):
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Set year of birth
user = User.objects.get(username=self.USERNAME)
profile = UserProfile.objects.get(user=user)
year_of_birth = datetime.datetime.now().year - age # pylint: disable=maybe-no-member
profile.year_of_birth = year_of_birth
profile.save()
update_email_opt_in(user, course.id.org, option)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, expected_result)
def test_update_email_optin_no_age_set(self):
# Test that the API still works if no age is specified.
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
user = User.objects.get(username=self.USERNAME)
update_email_opt_in(user, course.id.org, True)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, u"True")
def test_update_email_optin_anonymous_user(self):
"""Verify that the API raises an exception for a user with no profile."""
course = CourseFactory.create()
no_profile_user, __ = User.objects.get_or_create(username="no_profile_user", password=self.PASSWORD)
with self.assertRaises(UserNotFound):
update_email_opt_in(no_profile_user, course.id.org, True)
@ddt.data(
# Check that a 27 year old can opt-in, then out.
(27, True, False, u"False"),
# Check that a 32-year old can opt-out, then in.
(32, False, True, u"True"),
# Check that someone 13 years old can opt-in, then out.
(13, True, False, u"False"),
# Check that someone 12 years old cannot opt-in, then explicitly out.
(12, True, False, u"False")
)
@ddt.unpack
@override_settings(EMAIL_OPTIN_MINIMUM_AGE=13)
def test_change_email_optin(self, age, option, second_option, expected_result):
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Set year of birth
user = User.objects.get(username=self.USERNAME)
profile = UserProfile.objects.get(user=user)
year_of_birth = datetime.datetime.now(UTC).year - age # pylint: disable=maybe-no-member
profile.year_of_birth = year_of_birth
profile.save()
update_email_opt_in(user, course.id.org, option)
update_email_opt_in(user, course.id.org, second_option)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, expected_result)
def _assert_is_datetime(self, timestamp):
if not timestamp:
return False
try:
parse_datetime(timestamp)
except ValueError:
return False
else:
return True
def get_expected_validation_developer_message(preference_key, preference_value):
"""
Returns the expected dict of validation messages for the specified key.
"""
return u"Value '{preference_value}' not valid for preference '{preference_key}': {error}".format(
preference_key=preference_key,
preference_value=preference_value,
error={
"key": [u"Ensure this value has at most 255 characters (it has 256)."]
}
)
def get_expected_key_error_user_message(preference_key, preference_value):
"""
Returns the expected user message for an invalid key.
"""
return u"Invalid user preference key '{preference_key}'.".format(preference_key=preference_key)
def get_empty_preference_message(preference_key):
"""
Returns the validation message shown for an empty preference.
"""
return "Preference '{preference_key}' cannot be set to an empty value.".format(preference_key=preference_key)
|
callback_classes = [
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'ns3::Address const&', 'ns3::Address const&', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
from __future__ import unicode_literals
from frappe.model import update_users_report_view_settings
from erpnext.patches.v4_0.fields_to_be_renamed import rename_map
def execute():
for dt, field_list in rename_map.items():
for field in field_list:
update_users_report_view_settings(dt, field[0], field[1])
|
"""Given the output of -t commands from a ninja build for a gyp and GN generated
build, report on differences between the command lines."""
import os
import shlex
import subprocess
import sys
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
g_total_differences = 0
def FindAndRemoveArgWithValue(command_line, argname):
"""Given a command line as a list, remove and return the value of an option
that takes a value as a separate entry.
Modifies |command_line| in place.
"""
if argname not in command_line:
return ''
location = command_line.index(argname)
value = command_line[location + 1]
command_line[location:location + 2] = []
return value
def MergeSpacedArgs(command_line, argname):
"""Combine all arguments |argname| with their values, separated by a space."""
i = 0
result = []
while i < len(command_line):
arg = command_line[i]
if arg == argname:
result.append(arg + ' ' + command_line[i + 1])
i += 1
else:
result.append(arg)
i += 1
return result
def NormalizeSymbolArguments(command_line):
"""Normalize -g arguments.
If there's no -g args, it's equivalent to -g0. -g2 is equivalent to -g.
Modifies |command_line| in place.
"""
# Strip -g0 if there's no symbols.
have_some_symbols = False
for x in command_line:
if x.startswith('-g') and x != '-g0':
have_some_symbols = True
if not have_some_symbols and '-g0' in command_line:
command_line.remove('-g0')
# Rename -g2 to -g.
if '-g2' in command_line:
command_line[index('-g2')] = '-g'
def GetFlags(lines):
"""Turn a list of command lines into a semi-structured dict."""
flags_by_output = {}
for line in lines:
# TODO(scottmg): Hacky way of getting only cc for now.
if 'clang' not in line:
continue
command_line = shlex.split(line.strip())[1:]
output_name = FindAndRemoveArgWithValue(command_line, '-o')
dep_name = FindAndRemoveArgWithValue(command_line, '-MF')
NormalizeSymbolArguments(command_line)
command_line = MergeSpacedArgs(command_line, '-Xclang')
defines = [x for x in command_line if x.startswith('-D')]
include_dirs = [x for x in command_line if x.startswith('-I')]
dash_f = [x for x in command_line if x.startswith('-f')]
warnings = [x for x in command_line if x.startswith('-W')]
cc_file = [x for x in command_line if x.endswith('.cc') or
x.endswith('.c') or
x.endswith('.cpp')]
if len(cc_file) != 1:
print 'Skipping %s' % command_line
continue
assert len(cc_file) == 1
others = [x for x in command_line if x not in defines and \
x not in include_dirs and \
x not in dash_f and \
x not in warnings and \
x not in cc_file]
# Filter for libFindBadConstructs.so having a relative path in one and
# absolute path in the other.
others_filtered = []
for x in others:
if x.startswith('-Xclang ') and x.endswith('libFindBadConstructs.so'):
others_filtered.append(
'-Xclang ' +
os.path.join(os.getcwd(),
os.path.normpath(
os.path.join('out/gn_flags', x.split(' ', 1)[1]))))
elif x.startswith('-B'):
others_filtered.append(
'-B' +
os.path.join(os.getcwd(),
os.path.normpath(os.path.join('out/gn_flags', x[2:]))))
else:
others_filtered.append(x)
others = others_filtered
flags_by_output[cc_file[0]] = {
'output': output_name,
'depname': dep_name,
'defines': sorted(defines),
'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.
'dash_f': sorted(dash_f),
'warnings': sorted(warnings),
'other': sorted(others),
}
return flags_by_output
def CompareLists(gyp, gn, name, dont_care_gyp=None, dont_care_gn=None):
"""Return a report of any differences between gyp and gn lists, ignoring
anything in |dont_care_{gyp|gn}| respectively."""
global g_total_differences
if not dont_care_gyp:
dont_care_gyp = []
if not dont_care_gn:
dont_care_gn = []
output = ''
if gyp[name] != gn[name]:
gyp_set = set(gyp[name])
gn_set = set(gn[name])
missing_in_gyp = gyp_set - gn_set
missing_in_gn = gn_set - gyp_set
missing_in_gyp -= set(dont_care_gyp)
missing_in_gn -= set(dont_care_gn)
if missing_in_gyp or missing_in_gn:
output += ' %s differ:\n' % name
if missing_in_gyp:
output += ' In gyp, but not in GN:\n %s' % '\n '.join(
sorted(missing_in_gyp)) + '\n'
g_total_differences += len(missing_in_gyp)
if missing_in_gn:
output += ' In GN, but not in gyp:\n %s' % '\n '.join(
sorted(missing_in_gn)) + '\n\n'
g_total_differences += len(missing_in_gn)
return output
def Run(command_line):
"""Run |command_line| as a subprocess and return stdout. Raises on error."""
return subprocess.check_output(command_line, shell=True)
def main():
if len(sys.argv) != 2 and len(sys.argv) != 3:
print 'usage: %s gyp_target gn_target' % __file__
print ' or: %s target' % __file__
return 1
if len(sys.argv) == 2:
sys.argv.append(sys.argv[1])
print >>sys.stderr, 'Regenerating...'
# Currently only Release, non-component.
Run('gn gen out/gn_flags --args="is_debug=false is_component_build=false"')
os.environ.pop('GYP_DEFINES', None)
Run('python build/gyp_chromium -Goutput_dir=out_gyp_flags -Gconfig=Release')
gn = Run('ninja -C out/gn_flags -t commands %s' % sys.argv[2])
gyp = Run('ninja -C out_gyp_flags/Release -t commands %s' % sys.argv[1])
all_gyp_flags = GetFlags(gyp.splitlines())
all_gn_flags = GetFlags(gn.splitlines())
gyp_files = set(all_gyp_flags.keys())
gn_files = set(all_gn_flags.keys())
different_source_list = gyp_files != gn_files
if different_source_list:
print 'Different set of sources files:'
print ' In gyp, not in GN:\n %s' % '\n '.join(
sorted(gyp_files - gn_files))
print ' In GN, not in gyp:\n %s' % '\n '.join(
sorted(gn_files - gyp_files))
print '\nNote that flags will only be compared for files in both sets.\n'
file_list = gyp_files & gn_files
files_with_given_differences = {}
for filename in sorted(file_list):
gyp_flags = all_gyp_flags[filename]
gn_flags = all_gn_flags[filename]
differences = CompareLists(gyp_flags, gn_flags, 'dash_f')
differences += CompareLists(gyp_flags, gn_flags, 'defines')
differences += CompareLists(gyp_flags, gn_flags, 'include_dirs')
differences += CompareLists(gyp_flags, gn_flags, 'warnings', dont_care_gn=[
# More conservative warnings in GN we consider to be OK.
'-Wendif-labels',
'-Wextra',
'-Wsign-compare',
])
differences += CompareLists(gyp_flags, gn_flags, 'other')
if differences:
files_with_given_differences.setdefault(differences, []).append(filename)
for diff, files in files_with_given_differences.iteritems():
print '\n'.join(sorted(files))
print diff
print 'Total differences:', g_total_differences
# TODO(scottmg): Return failure on difference once we're closer to identical.
return 0
if __name__ == '__main__':
sys.exit(main())
|
"""
Test the QgsSettings class
Run with: ctest -V -R PyQgsSettings
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import tempfile
from qgis.core import QgsSettings, QgsTolerance, QgsMapLayerProxyModel
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QSettings, QVariant
from pathlib import Path
__author__ = 'Alessandro Pasotti'
__date__ = '02/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
start_app()
class TestQgsSettings(unittest.TestCase):
cnt = 0
def setUp(self):
self.cnt += 1
h, path = tempfile.mkstemp('.ini')
Path(path).touch()
assert QgsSettings.setGlobalSettingsPath(path)
self.settings = QgsSettings('testqgissettings', 'testqgissettings%s' % self.cnt)
self.globalsettings = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat)
self.globalsettings.sync()
assert os.path.exists(self.globalsettings.fileName())
def tearDown(self):
settings_file = self.settings.fileName()
settings_default_file = self.settings.globalSettingsPath()
del(self.settings)
try:
os.unlink(settings_file)
except:
pass
try:
os.unlink(settings_default_file)
except:
pass
def addToDefaults(self, key, value):
self.globalsettings.setValue(key, value)
self.globalsettings.sync()
def addArrayToDefaults(self, prefix, key, values):
defaults = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat) # NOQA
self.globalsettings.beginWriteArray(prefix)
i = 0
for v in values:
self.globalsettings.setArrayIndex(i)
self.globalsettings.setValue(key, v)
i += 1
self.globalsettings.endArray()
self.globalsettings.sync()
def addGroupToDefaults(self, prefix, kvp):
defaults = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat) # NOQA
self.globalsettings.beginGroup(prefix)
for k, v in kvp.items():
self.globalsettings.setValue(k, v)
self.globalsettings.endGroup()
self.globalsettings.sync()
def test_basic_functionality(self):
self.assertEqual(self.settings.value('testqgissettings/doesnotexists', 'notexist'), 'notexist')
self.settings.setValue('testqgissettings/name', 'qgisrocks')
self.settings.sync()
self.assertEqual(self.settings.value('testqgissettings/name'), 'qgisrocks')
def test_defaults(self):
self.assertIsNone(self.settings.value('testqgissettings/name'))
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.assertEqual(self.settings.value('testqgissettings/name'), 'qgisrocks')
def test_allkeys(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.addToDefaults('testqgissettings/name2', 'qgisrocks2')
self.settings.setValue('nepoti/eman', 'osaple')
self.assertEqual(3, len(self.settings.allKeys()))
self.assertIn('testqgissettings/name', self.settings.allKeys())
self.assertIn('nepoti/eman', self.settings.allKeys())
self.assertEqual('qgisrocks', self.settings.value('testqgissettings/name'))
self.assertEqual('qgisrocks2', self.settings.value('testqgissettings/name2'))
self.assertEqual('qgisrocks', self.globalsettings.value('testqgissettings/name'))
self.assertEqual('osaple', self.settings.value('nepoti/eman'))
self.assertEqual(3, len(self.settings.allKeys()))
self.assertEqual(2, len(self.globalsettings.allKeys()))
def test_precedence_simple(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/names/name1', 'qgisrocks1')
self.settings.setValue('testqgissettings/names/name1', 'qgisrocks-1')
self.assertEqual(self.settings.value('testqgissettings/names/name1'), 'qgisrocks-1')
def test_precedence_group(self):
"""Test if user can override a group value"""
self.assertEqual(self.settings.allKeys(), [])
self.addGroupToDefaults('connections-xyz', {
'OSM': 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png',
'OSM-b': 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png',
})
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override edit
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: the global value will be resumed!!!
self.settings.beginGroup('connections-xyz')
self.settings.remove('OSM')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: store a blank!
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', '')
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), '')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
# Override remove: store a None: will resume the global setting!
self.settings.beginGroup('connections-xyz')
self.settings.setValue('OSM', None)
self.settings.endGroup()
# Check it again!
self.settings.beginGroup('connections-xyz')
self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png')
self.settings.endGroup()
def test_uft8(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/names/namèé↓1', 'qgisrocks↓1')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓1'), 'qgisrocks↓1')
self.settings.setValue('testqgissettings/names/namèé↓2', 'qgisrocks↓2')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓2'), 'qgisrocks↓2')
self.settings.setValue('testqgissettings/names/namèé↓1', 'qgisrocks↓-1')
self.assertEqual(self.settings.value('testqgissettings/names/namèé↓1'), 'qgisrocks↓-1')
def test_groups(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/names/name1', 'qgisrocks1')
self.addToDefaults('testqgissettings/names/name2', 'qgisrocks2')
self.addToDefaults('testqgissettings/names/name3', 'qgisrocks3')
self.addToDefaults('testqgissettings/name', 'qgisrocks')
self.settings.beginGroup('testqgissettings')
self.assertEqual(self.settings.group(), 'testqgissettings')
self.assertEqual(['names'], self.settings.childGroups())
self.settings.setValue('surnames/name1', 'qgisrocks-1')
self.assertEqual(['surnames', 'names'], self.settings.childGroups())
self.settings.setValue('names/name1', 'qgisrocks-1')
self.assertEqual('qgisrocks-1', self.settings.value('names/name1'))
self.settings.endGroup()
self.assertEqual(self.settings.group(), '')
self.settings.beginGroup('testqgissettings/names')
self.assertEqual(self.settings.group(), 'testqgissettings/names')
self.settings.setValue('name4', 'qgisrocks-4')
keys = sorted(self.settings.childKeys())
self.assertEqual(keys, ['name1', 'name2', 'name3', 'name4'])
self.settings.endGroup()
self.assertEqual(self.settings.group(), '')
self.assertEqual('qgisrocks-1', self.settings.value('testqgissettings/names/name1'))
self.assertEqual('qgisrocks-4', self.settings.value('testqgissettings/names/name4'))
def test_global_groups(self):
self.assertEqual(self.settings.allKeys(), [])
self.assertEqual(self.globalsettings.allKeys(), [])
self.addToDefaults('testqgissettings/foo/first', 'qgis')
self.addToDefaults('testqgissettings/foo/last', 'rocks')
self.settings.beginGroup('testqgissettings')
self.assertEqual(self.settings.group(), 'testqgissettings')
self.assertEqual(['foo'], self.settings.childGroups())
self.assertEqual(['foo'], self.settings.globalChildGroups())
self.settings.endGroup()
self.assertEqual(self.settings.group(), '')
self.settings.setValue('testqgissettings/bar/first', 'qgis')
self.settings.setValue('testqgissettings/bar/last', 'rocks')
self.settings.beginGroup('testqgissettings')
self.assertEqual(sorted(['bar', 'foo']), sorted(self.settings.childGroups()))
self.assertEqual(['foo'], self.settings.globalChildGroups())
self.settings.endGroup()
self.globalsettings.remove('testqgissettings/foo')
self.settings.beginGroup('testqgissettings')
self.assertEqual(['bar'], self.settings.childGroups())
self.assertEqual([], self.settings.globalChildGroups())
self.settings.endGroup()
def test_group_section(self):
# Test group by using Section
self.settings.beginGroup('firstgroup', section=QgsSettings.Core)
self.assertEqual(self.settings.group(), 'core/firstgroup')
self.assertEqual([], self.settings.childGroups())
self.settings.setValue('key', 'value')
self.settings.setValue('key2/subkey1', 'subvalue1')
self.settings.setValue('key2/subkey2', 'subvalue2')
self.settings.setValue('key3', 'value3')
self.assertEqual(['key', 'key2/subkey1', 'key2/subkey2', 'key3'], self.settings.allKeys())
self.assertEqual(['key', 'key3'], self.settings.childKeys())
self.assertEqual(['key2'], self.settings.childGroups())
self.settings.endGroup()
self.assertEqual(self.settings.group(), '')
# Set value by writing the group manually
self.settings.setValue('firstgroup/key4', 'value4', section=QgsSettings.Core)
# Checking the value that have been set
self.assertEqual(self.settings.value('firstgroup/key', section=QgsSettings.Core), 'value')
self.assertEqual(self.settings.value('firstgroup/key2/subkey1', section=QgsSettings.Core), 'subvalue1')
self.assertEqual(self.settings.value('firstgroup/key2/subkey2', section=QgsSettings.Core), 'subvalue2')
self.assertEqual(self.settings.value('firstgroup/key3', section=QgsSettings.Core), 'value3')
self.assertEqual(self.settings.value('firstgroup/key4', section=QgsSettings.Core), 'value4')
# Clean up firstgroup
self.settings.remove('firstgroup', section=QgsSettings.Core)
def test_array(self):
self.assertEqual(self.settings.allKeys(), [])
self.addArrayToDefaults('testqgissettings', 'key', ['qgisrocks1', 'qgisrocks2', 'qgisrocks3'])
self.assertEqual(self.settings.allKeys(), ['testqgissettings/1/key', 'testqgissettings/2/key', 'testqgissettings/3/key', 'testqgissettings/size'])
self.assertEqual(self.globalsettings.allKeys(), ['testqgissettings/1/key', 'testqgissettings/2/key', 'testqgissettings/3/key', 'testqgissettings/size'])
self.assertEqual(3, self.globalsettings.beginReadArray('testqgissettings'))
self.globalsettings.endArray()
self.assertEqual(3, self.settings.beginReadArray('testqgissettings'))
values = []
for i in range(3):
self.settings.setArrayIndex(i)
values.append(self.settings.value("key"))
self.assertEqual(values, ['qgisrocks1', 'qgisrocks2', 'qgisrocks3'])
def test_array_overrides(self):
"""Test if an array completely shadows the global one"""
self.assertEqual(self.settings.allKeys(), [])
self.addArrayToDefaults('testqgissettings', 'key', ['qgisrocks1', 'qgisrocks2', 'qgisrocks3'])
self.assertEqual(self.settings.allKeys(), ['testqgissettings/1/key', 'testqgissettings/2/key', 'testqgissettings/3/key', 'testqgissettings/size'])
self.assertEqual(self.globalsettings.allKeys(), ['testqgissettings/1/key', 'testqgissettings/2/key', 'testqgissettings/3/key', 'testqgissettings/size'])
self.assertEqual(3, self.globalsettings.beginReadArray('testqgissettings'))
self.globalsettings.endArray()
self.assertEqual(3, self.settings.beginReadArray('testqgissettings'))
# Now override!
self.settings.beginWriteArray('testqgissettings')
self.settings.setArrayIndex(0)
self.settings.setValue('key', 'myqgisrocksmore1')
self.settings.setArrayIndex(1)
self.settings.setValue('key', 'myqgisrocksmore2')
self.settings.endArray()
# Check it!
self.assertEqual(2, self.settings.beginReadArray('testqgissettings'))
values = []
for i in range(2):
self.settings.setArrayIndex(i)
values.append(self.settings.value("key"))
self.assertEqual(values, ['myqgisrocksmore1', 'myqgisrocksmore2'])
def test_section_getters_setters(self):
self.assertEqual(self.settings.allKeys(), [])
self.settings.setValue('key1', 'core1', section=QgsSettings.Core)
self.settings.setValue('key2', 'core2', section=QgsSettings.Core)
self.settings.setValue('key1', 'server1', section=QgsSettings.Server)
self.settings.setValue('key2', 'server2', section=QgsSettings.Server)
self.settings.setValue('key1', 'gui1', section=QgsSettings.Gui)
self.settings.setValue('key2', 'gui2', QgsSettings.Gui)
self.settings.setValue('key1', 'plugins1', section=QgsSettings.Plugins)
self.settings.setValue('key2', 'plugins2', section=QgsSettings.Plugins)
self.settings.setValue('key1', 'misc1', section=QgsSettings.Misc)
self.settings.setValue('key2', 'misc2', section=QgsSettings.Misc)
self.settings.setValue('key1', 'auth1', section=QgsSettings.Auth)
self.settings.setValue('key2', 'auth2', section=QgsSettings.Auth)
self.settings.setValue('key1', 'app1', section=QgsSettings.App)
self.settings.setValue('key2', 'app2', section=QgsSettings.App)
self.settings.setValue('key1', 'provider1', section=QgsSettings.Providers)
self.settings.setValue('key2', 'provider2', section=QgsSettings.Providers)
# This is an overwrite of previous setting and it is intentional
self.settings.setValue('key1', 'auth1', section=QgsSettings.Auth)
self.settings.setValue('key2', 'auth2', section=QgsSettings.Auth)
# Test that the values are namespaced
self.assertEqual(self.settings.value('core/key1'), 'core1')
self.assertEqual(self.settings.value('core/key2'), 'core2')
self.assertEqual(self.settings.value('server/key1'), 'server1')
self.assertEqual(self.settings.value('server/key2'), 'server2')
self.assertEqual(self.settings.value('gui/key1'), 'gui1')
self.assertEqual(self.settings.value('gui/key2'), 'gui2')
self.assertEqual(self.settings.value('plugins/key1'), 'plugins1')
self.assertEqual(self.settings.value('plugins/key2'), 'plugins2')
self.assertEqual(self.settings.value('misc/key1'), 'misc1')
self.assertEqual(self.settings.value('misc/key2'), 'misc2')
# Test getters
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Core), 'core1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Core), 'core2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Server), 'server1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Server), 'server2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Gui), 'gui1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Gui), 'gui2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Plugins), 'plugins1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Plugins), 'plugins2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Misc), 'misc1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Misc), 'misc2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Auth), 'auth1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Auth), 'auth2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.App), 'app1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.App), 'app2')
self.assertEqual(self.settings.value('key1', None, section=QgsSettings.Providers), 'provider1')
self.assertEqual(self.settings.value('key2', None, section=QgsSettings.Providers), 'provider2')
# Test default values on Section getter
self.assertEqual(self.settings.value('key_not_exist', 'misc_not_exist', section=QgsSettings.Misc), 'misc_not_exist')
def test_contains(self):
self.assertEqual(self.settings.allKeys(), [])
self.addToDefaults('testqgissettings/name', 'qgisrocks1')
self.addToDefaults('testqgissettings/name2', 'qgisrocks2')
self.assertTrue(self.settings.contains('testqgissettings/name'))
self.assertTrue(self.settings.contains('testqgissettings/name2'))
self.settings.setValue('testqgissettings/name3', 'qgisrocks3')
self.assertTrue(self.settings.contains('testqgissettings/name3'))
def test_remove(self):
self.settings.setValue('testQgisSettings/temp', True)
self.assertEqual(self.settings.value('testQgisSettings/temp'), True)
self.settings.remove('testQgisSettings/temp')
self.assertEqual(self.settings.value('testqQgisSettings/temp'), None)
# Test remove by using Section
self.settings.setValue('testQgisSettings/tempSection', True, section=QgsSettings.Core)
self.assertEqual(self.settings.value('testQgisSettings/tempSection', section=QgsSettings.Core), True)
self.settings.remove('testQgisSettings/temp', section=QgsSettings.Core)
self.assertEqual(self.settings.value('testqQgisSettings/temp', section=QgsSettings.Core), None)
def test_enumValue(self):
self.settings.setValue('enum', 'LayerUnits')
self.assertEqual(self.settings.enumValue('enum', QgsTolerance.Pixels), QgsTolerance.LayerUnits)
self.settings.setValue('enum', 'dummy_setting')
self.assertEqual(self.settings.enumValue('enum', QgsTolerance.Pixels), QgsTolerance.Pixels)
self.assertEqual(type(self.settings.enumValue('enum', QgsTolerance.Pixels)), QgsTolerance.UnitType)
def test_setEnumValue(self):
self.settings.setValue('enum', 'LayerUnits')
self.assertEqual(self.settings.enumValue('enum', QgsTolerance.Pixels), QgsTolerance.LayerUnits)
self.settings.setEnumValue('enum', QgsTolerance.Pixels)
self.assertEqual(self.settings.enumValue('enum', QgsTolerance.Pixels), QgsTolerance.Pixels)
def test_flagValue(self):
pointAndLine = QgsMapLayerProxyModel.Filters(QgsMapLayerProxyModel.PointLayer | QgsMapLayerProxyModel.LineLayer)
pointAndPolygon = QgsMapLayerProxyModel.Filters(QgsMapLayerProxyModel.PointLayer | QgsMapLayerProxyModel.PolygonLayer)
self.settings.setValue('flag', 'PointLayer|PolygonLayer')
self.assertEqual(self.settings.flagValue('flag', pointAndLine), pointAndPolygon)
self.settings.setValue('flag', 'dummy_setting')
self.assertEqual(self.settings.flagValue('flag', pointAndLine), pointAndLine)
self.assertEqual(type(self.settings.flagValue('enum', pointAndLine)), QgsMapLayerProxyModel.Filters)
def test_overwriteDefaultValues(self):
"""Test that unchanged values are not stored"""
self.globalsettings.setValue('a_value_with_default', 'a value')
self.globalsettings.setValue('an_invalid_value', QVariant())
self.assertEqual(self.settings.value('a_value_with_default'), 'a value')
self.assertEqual(self.settings.value('an_invalid_value'), QVariant())
# Now, set them with the same current value
self.settings.setValue('a_value_with_default', 'a value')
self.settings.setValue('an_invalid_value', QVariant())
# Check
pure_settings = QSettings(self.settings.fileName(), QSettings.IniFormat)
self.assertFalse('a_value_with_default' in pure_settings.allKeys())
self.assertFalse('an_invalid_value' in pure_settings.allKeys())
# Set a changed value
self.settings.setValue('a_value_with_default', 'a new value')
self.settings.setValue('an_invalid_value', 'valid value')
# Check
self.assertTrue('a_value_with_default' in pure_settings.allKeys())
self.assertTrue('an_invalid_value' in pure_settings.allKeys())
self.assertEqual(self.settings.value('a_value_with_default'), 'a new value')
self.assertEqual(self.settings.value('an_invalid_value'), 'valid value')
# Re-set to original values
self.settings.setValue('a_value_with_default', 'a value')
self.settings.setValue('an_invalid_value', QVariant())
self.assertEqual(self.settings.value('a_value_with_default'), 'a value')
self.assertEqual(self.settings.value('an_invalid_value'), QVariant())
# Check if they are gone
pure_settings = QSettings(self.settings.fileName(), QSettings.IniFormat)
self.assertFalse('a_value_with_default' not in pure_settings.allKeys())
self.assertFalse('an_invalid_value' not in pure_settings.allKeys())
if __name__ == '__main__':
unittest.main()
|
from gi.repository import Gtk, Gdk, Notify
import gettext
import popupmenu
from autokey.configmanager import *
from autokey import common
HAVE_APPINDICATOR = False
try:
from gi.repository import AppIndicator3
HAVE_APPINDICATOR = True
except ImportError:
pass
gettext.install("autokey")
TOOLTIP_RUNNING = _("AutoKey - running")
TOOLTIP_PAUSED = _("AutoKey - paused")
def get_notifier(app):
if HAVE_APPINDICATOR:
return IndicatorNotifier(app)
else:
return Notifier(app)
class Notifier:
"""
Encapsulates all functionality related to the notification icon, notifications, and tray menu.
"""
def __init__(self, autokeyApp):
Notify.init("AutoKey")
self.app = autokeyApp
self.configManager = autokeyApp.service.configManager
self.icon = Gtk.StatusIcon.new_from_icon_name(ConfigManager.SETTINGS[NOTIFICATION_ICON])
self.update_tool_tip()
self.icon.connect("popup_menu", self.on_popup_menu)
self.icon.connect("activate", self.on_show_configure)
self.errorItem = None
self.update_visible_status()
def update_visible_status(self):
if ConfigManager.SETTINGS[SHOW_TRAY_ICON]:
self.icon.set_visible(True)
else:
self.icon.set_visible(False)
def update_tool_tip(self):
if ConfigManager.SETTINGS[SHOW_TRAY_ICON]:
if ConfigManager.SETTINGS[SERVICE_RUNNING]:
self.icon.set_tooltip_text(TOOLTIP_RUNNING)
else:
self.icon.set_tooltip_text(TOOLTIP_PAUSED)
def hide_icon(self):
self.icon.set_visible(False)
def rebuild_menu(self):
pass
# Signal Handlers ----
def on_popup_menu(self, status_icon, button, activate_time, data=None):
# Main Menu items
enableMenuItem = Gtk.CheckMenuItem(_("Enable Expansions"))
enableMenuItem.set_active(self.app.service.is_running())
enableMenuItem.set_sensitive(not self.app.serviceDisabled)
configureMenuItem = Gtk.ImageMenuItem(_("Show Main Window"))
configureMenuItem.set_image(Gtk.Image.new_from_stock(Gtk.STOCK_PREFERENCES, Gtk.IconSize.MENU))
removeMenuItem = Gtk.ImageMenuItem(_("Remove icon"))
removeMenuItem.set_image(Gtk.Image.new_from_stock(Gtk.STOCK_CLOSE, Gtk.IconSize.MENU))
quitMenuItem = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_QUIT, None)
# Menu signals
enableMenuItem.connect("toggled", self.on_enable_toggled)
configureMenuItem.connect("activate", self.on_show_configure)
removeMenuItem.connect("activate", self.on_remove_icon)
quitMenuItem.connect("activate", self.on_destroy_and_exit)
# Get phrase folders to add to main menu
folders = []
items = []
for folder in self.configManager.allFolders:
if folder.showInTrayMenu:
folders.append(folder)
for item in self.configManager.allItems:
if item.showInTrayMenu:
items.append(item)
# Construct main menu
menu = popupmenu.PopupMenu(self.app.service, folders, items, False)
if len(items) > 0:
menu.append(Gtk.SeparatorMenuItem())
menu.append(enableMenuItem)
if self.errorItem is not None:
menu.append(self.errorItem)
menu.append(configureMenuItem)
menu.append(removeMenuItem)
menu.append(quitMenuItem)
menu.show_all()
menu.popup(None, None, None, None, button, activate_time)
def on_enable_toggled(self, widget, data=None):
if widget.active:
self.app.unpause_service()
else:
self.app.pause_service()
def on_show_configure(self, widget, data=None):
self.app.show_configure()
def on_remove_icon(self, widget, data=None):
self.icon.set_visible(False)
ConfigManager.SETTINGS[SHOW_TRAY_ICON] = False
def on_destroy_and_exit(self, widget, data=None):
self.app.shutdown()
def notify_error(self, message):
self.show_notify(message, Gtk.STOCK_DIALOG_ERROR)
self.errorItem = Gtk.MenuItem(_("View script error"))
self.errorItem.connect("activate", self.on_show_error)
self.icon.set_from_icon_name(common.ICON_FILE_NOTIFICATION_ERROR)
def on_show_error(self, widget, data=None):
self.app.show_script_error()
self.errorItem = None
self.icon.set_from_icon_name(ConfigManager.SETTINGS[NOTIFICATION_ICON])
def show_notify(self, message, iconName):
Gdk.threads_enter()
n = Notify.Notification.new("AutoKey", message, iconName)
n.set_urgency(Notify.Urgency.LOW)
if ConfigManager.SETTINGS[SHOW_TRAY_ICON]:
n.attach_to_status_icon(self.icon)
n.show()
Gdk.threads_leave()
class IndicatorNotifier:
def __init__(self, autokeyApp):
Notify.init("AutoKey")
self.app = autokeyApp
self.configManager = autokeyApp.service.configManager
self.indicator = AppIndicator3.Indicator.new("AutoKey", ConfigManager.SETTINGS[NOTIFICATION_ICON],
AppIndicator3.IndicatorCategory.APPLICATION_STATUS)
self.indicator.set_attention_icon(common.ICON_FILE_NOTIFICATION_ERROR)
self.update_visible_status()
self.rebuild_menu()
def update_visible_status(self):
if ConfigManager.SETTINGS[SHOW_TRAY_ICON]:
self.indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
else:
self.indicator.set_status(AppIndicator3.IndicatorStatus.PASSIVE)
def hide_icon(self):
self.indicator.set_status(AppIndicator3.IndicatorStatus.PASSIVE)
def rebuild_menu(self):
# Main Menu items
self.errorItem = Gtk.MenuItem(_("View script error"))
enableMenuItem = Gtk.CheckMenuItem(_("Enable Expansions"))
enableMenuItem.set_active(self.app.service.is_running())
enableMenuItem.set_sensitive(not self.app.serviceDisabled)
configureMenuItem = Gtk.ImageMenuItem(_("Show Main Window"))
configureMenuItem.set_image(Gtk.Image.new_from_stock(Gtk.STOCK_PREFERENCES, Gtk.IconSize.MENU))
removeMenuItem = Gtk.ImageMenuItem(_("Remove icon"))
removeMenuItem.set_image(Gtk.Image.new_from_stock(Gtk.STOCK_CLOSE, Gtk.IconSize.MENU))
quitMenuItem = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_QUIT, None)
# Menu signals
enableMenuItem.connect("toggled", self.on_enable_toggled)
configureMenuItem.connect("activate", self.on_show_configure)
removeMenuItem.connect("activate", self.on_remove_icon)
quitMenuItem.connect("activate", self.on_destroy_and_exit)
self.errorItem.connect("activate", self.on_show_error)
# Get phrase folders to add to main menu
folders = []
items = []
for folder in self.configManager.allFolders:
if folder.showInTrayMenu:
folders.append(folder)
for item in self.configManager.allItems:
if item.showInTrayMenu:
items.append(item)
# Construct main menu
self.menu = popupmenu.PopupMenu(self.app.service, folders, items, False)
if len(items) > 0:
self.menu.append(Gtk.SeparatorMenuItem())
self.menu.append(self.errorItem)
self.menu.append(enableMenuItem)
self.menu.append(configureMenuItem)
self.menu.append(removeMenuItem)
self.menu.append(quitMenuItem)
self.menu.show_all()
self.errorItem.hide()
self.indicator.set_menu(self.menu)
def notify_error(self, message):
self.show_notify(message, Gtk.STOCK_DIALOG_ERROR)
self.errorItem.show()
self.indicator.set_status(AppIndicator3.IndicatorStatus.ATTENTION)
def show_notify(self, message, iconName):
Gdk.threads_enter()
n = Notify.Notification.new("AutoKey", message, iconName)
n.set_urgency(Notify.Urgency.LOW)
n.show()
Gdk.threads_leave()
def update_tool_tip(self):
pass
def on_show_error(self, widget, data=None):
self.app.show_script_error()
self.errorItem.hide()
self.update_visible_status()
def on_enable_toggled(self, widget, data=None):
if widget.active:
self.app.unpause_service()
else:
self.app.pause_service()
def on_show_configure(self, widget, data=None):
self.app.show_configure()
def on_remove_icon(self, widget, data=None):
self.indicator.set_status(AppIndicator3.IndicatorStatus.PASSIVE)
ConfigManager.SETTINGS[SHOW_TRAY_ICON] = False
def on_destroy_and_exit(self, widget, data=None):
self.app.shutdown()
class UnityLauncher(IndicatorNotifier):
SHOW_ITEM_STRING = _("Add to quicklist/notification menu")
#def __init__(self, autokeyApp):
# IndicatorNotifier.__init__(self, autokeyApp)
def __getQuickItem(self, label):
item = Dbusmenu.Menuitem.new()
item.property_set(Dbusmenu.MENUITEM_PROP_LABEL, label)
item.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True)
return item
def rebuild_menu(self):
IndicatorNotifier.rebuild_menu(self)
print threading.currentThread().name
#try:
from gi.repository import Unity, Dbusmenu
HAVE_UNITY = True
print "have unity"
#except ImportError:
# return
print "rebuild unity menu"
self.launcher = Unity.LauncherEntry.get_for_desktop_id ("autokey-gtk.desktop")
# Main Menu items
enableMenuItem = self.__getQuickItem(_("Enable Expansions"))
enableMenuItem.property_set(Dbusmenu.MENUITEM_PROP_TOGGLE_TYPE, Dbusmenu.MENUITEM_TOGGLE_CHECK)
#if self.app.service.is_running():
# enableMenuItem.property_set_int(Dbusmenu.MENUITEM_PROP_TOGGLE_STATE, Dbusmenu.MENUITEM_TOGGLE_STATE_CHECKED)
#else:
# enableMenuItem.property_set_int(Dbusmenu.MENUITEM_PROP_TOGGLE_STATE, Dbusmenu.MENUITEM_TOGGLE_STATE_UNCHECKED)
enableMenuItem.property_set_int(Dbusmenu.MENUITEM_PROP_TOGGLE_STATE, int(self.app.service.is_running()))
enableMenuItem.property_set_bool(Dbusmenu.MENUITEM_PROP_ENABLED, not self.app.serviceDisabled)
configureMenuItem = self.__getQuickItem(_("Show Main Window"))
# Menu signals
enableMenuItem.connect("item-activated", self.on_ql_enable_toggled, None)
configureMenuItem.connect("item-activated", self.on_show_configure, None)
# Get phrase folders to add to main menu
# Construct main menu
quicklist = Dbusmenu.Menuitem.new()
#if len(items) > 0:
# self.menu.append(Gtk.SeparatorMenuItem())
quicklist.child_append(enableMenuItem)
quicklist.child_append(configureMenuItem)
self.launcher.set_property ("quicklist", quicklist)
def on_ql_enable_toggled(self, menuitem, data=None):
if menuitem.property_get_int(Menuitem.MENUITEM_PROP_TOGGLE_STATE) == Menuitem.MENUITEM_TOGGLE_STATE_CHECKED:
self.app.unpause_service()
else:
self.app.pause_service()
|
"""
Tests for L{twisted.application.strports}.
"""
from twisted.trial.unittest import TestCase
from twisted.application import strports
from twisted.application import internet
from twisted.internet.test.test_endpoints import ParserTestCase
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import TCP4ServerEndpoint, UNIXServerEndpoint
class DeprecatedParseTestCase(ParserTestCase):
"""
L{strports.parse} is deprecated. It's an alias for a method that is now
private in L{twisted.internet.endpoints}.
"""
def parse(self, *a, **kw):
result = strports.parse(*a, **kw)
warnings = self.flushWarnings([self.parse])
self.assertEquals(len(warnings), 1)
self.assertEquals(
warnings[0]['message'],
"twisted.application.strports.parse was deprecated "
"in Twisted 10.2.0: in favor of twisted.internet.endpoints.serverFromString")
return result
def test_simpleNumeric(self):
"""
Base numeric ports should be parsed as TCP.
"""
self.assertEquals(self.parse('80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
def test_allKeywords(self):
"""
A collection of keyword arguments with no prefixed type, like 'port=80',
will be parsed as keyword arguments to 'tcp'.
"""
self.assertEquals(self.parse('port=80', self.f),
('TCP', (80, self.f), {'interface':'', 'backlog':50}))
class ServiceTestCase(TestCase):
"""
Tests for L{strports.service}.
"""
def test_service(self):
"""
L{strports.service} returns a L{StreamServerEndpointService}
constructed with an endpoint produced from
L{endpoint.serverFromString}, using the same syntax.
"""
reactor = object() # the cake is a lie
aFactory = Factory()
aGoodPort = 1337
svc = strports.service(
'tcp:'+str(aGoodPort), aFactory, reactor=reactor)
self.assertIsInstance(svc, internet.StreamServerEndpointService)
# See twisted.application.test.test_internet.TestEndpointService.
# test_synchronousRaiseRaisesSynchronously
self.assertEquals(svc._raiseSynchronously, True)
self.assertIsInstance(svc.endpoint, TCP4ServerEndpoint)
# Maybe we should implement equality for endpoints.
self.assertEquals(svc.endpoint._port, aGoodPort)
self.assertIdentical(svc.factory, aFactory)
self.assertIdentical(svc.endpoint._reactor, reactor)
def test_serviceDefaultReactor(self):
"""
L{strports.service} will use the default reactor when none is provided
as an argument.
"""
from twisted.internet import reactor as globalReactor
aService = strports.service("tcp:80", None)
self.assertIdentical(aService.endpoint._reactor, globalReactor)
def test_serviceDeprecatedDefault(self):
"""
L{strports.service} still accepts a 'default' argument, which will
affect the parsing of 'default' (i.e. 'not containing a colon')
endpoint descriptions, but this behavior is deprecated.
"""
svc = strports.service("8080", None, "unix")
self.assertIsInstance(svc.endpoint, UNIXServerEndpoint)
warnings = self.flushWarnings([self.test_serviceDeprecatedDefault])
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(
warnings[0]['message'],
"The 'default' parameter was deprecated in Twisted 10.2.0. "
"Use qualified endpoint descriptions; for example, 'tcp:8080'.")
self.assertEquals(len(warnings), 1)
# Almost the same case, but slightly tricky - explicitly passing the old
# default value, None, also must trigger a deprecation warning.
svc = strports.service("tcp:8080", None, None)
self.assertIsInstance(svc.endpoint, TCP4ServerEndpoint)
warnings = self.flushWarnings([self.test_serviceDeprecatedDefault])
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(
warnings[0]['message'],
"The 'default' parameter was deprecated in Twisted 10.2.0.")
self.assertEquals(len(warnings), 1)
def test_serviceDeprecatedUnqualified(self):
"""
Unqualified strport descriptions, i.e. "8080", are deprecated.
"""
svc = strports.service("8080", None)
self.assertIsInstance(svc.endpoint, TCP4ServerEndpoint)
warnings = self.flushWarnings(
[self.test_serviceDeprecatedUnqualified])
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(
warnings[0]['message'],
"Unqualified strport description passed to 'service'."
"Use qualified endpoint descriptions; for example, 'tcp:8080'.")
self.assertEquals(len(warnings), 1)
|
import glob
import os
import pickle
import platform
import select
import shlex
import subprocess
import traceback
from ansible.module_utils.six import PY2, b
from ansible.module_utils._text import to_bytes, to_text
def sysv_is_enabled(name):
'''
This function will check if the service name supplied
is enabled in any of the sysv runlevels
:arg name: name of the service to test for
'''
return bool(glob.glob('/etc/rc?.d/S??%s' % name))
def get_sysv_script(name):
'''
This function will return the expected path for an init script
corresponding to the service name supplied.
:arg name: name or path of the service to test for
'''
if name.startswith('/'):
result = name
else:
result = '/etc/init.d/%s' % name
return result
def sysv_exists(name):
'''
This function will return True or False depending on
the existence of an init script corresponding to the service name supplied.
:arg name: name of the service to test for
'''
return os.path.exists(get_sysv_script(name))
def fail_if_missing(module, found, service, msg=''):
'''
This function will return an error or exit gracefully depending on check mode status
and if the service is missing or not.
:arg module: is an AnsibleModule object, used for it's utility methods
:arg found: boolean indicating if services was found or not
:arg service: name of service
:kw msg: extra info to append to error/success msg when missing
'''
if not found:
if module.check_mode:
module.exit_json(msg="Service %s not found on %s, assuming it will exist on full run" % (service, msg), changed=True)
else:
module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg))
def daemonize(module, cmd):
'''
Execute a command while detaching as a daemon, returns rc, stdout, and stderr.
:arg module: is an AnsibleModule object, used for it's utility methods
:arg cmd: is a list or string representing the command and options to run
This is complex because daemonization is hard for people.
What we do is daemonize a part of this module, the daemon runs the command,
picks up the return code and output, and returns it to the main process.
'''
# init some vars
chunk = 4096 # FIXME: pass in as arg?
errors = 'surrogate_or_strict'
# start it!
try:
pipe = os.pipe()
pid = os.fork()
except OSError:
module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc())
# we don't do any locking as this should be a unique module/process
if pid == 0:
os.close(pipe[0])
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
# clone stdin/out/err
for num in range(3):
if fd != num:
os.dup2(fd, num)
# close otherwise
if fd not in range(3):
os.close(fd)
# Make us a daemon
pid = os.fork()
# end if not in child
if pid > 0:
os._exit(0)
# get new process session and detach
sid = os.setsid()
if sid == -1:
module.fail_json(msg="Unable to detach session while daemonizing")
# avoid possible problems with cwd being removed
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
# if command is string deal with py2 vs py3 conversions for shlex
if not isinstance(cmd, list):
if PY2:
cmd = shlex.split(to_bytes(cmd, errors=errors))
else:
cmd = shlex.split(to_text(cmd, errors=errors))
# make sure we always use byte strings
run_cmd = []
for c in cmd:
run_cmd.append(to_bytes(c, errors=errors))
# execute the command in forked process
p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
fds = [p.stdout, p.stderr]
# loop reading output till its done
output = {p.stdout: b(""), p.sterr: b("")}
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if (rfd + wfd + efd) or p.poll():
for out in fds:
if out in rfd:
data = os.read(out.fileno(), chunk)
if not data:
fds.remove(out)
output[out] += b(data)
# even after fds close, we might want to wait for pid to die
p.wait()
# Return a pickled data of parent
return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL)
os.write(pipe[1], to_bytes(return_data, errors=errors))
# clean up
os.close(pipe[1])
os._exit(0)
elif pid == -1:
module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.")
else:
# in parent
os.close(pipe[1])
os.waitpid(pid, 0)
# Grab response data after child finishes
return_data = b("")
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
data = os.read(pipe[0], chunk)
if not data:
break
return_data += b(data)
# Note: no need to specify encoding on py3 as this module sends the
# pickle to itself (thus same python interpreter so we aren't mixing
# py2 and py3)
return pickle.loads(to_bytes(return_data, errors=errors))
def check_ps(module, pattern):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = module.get_bin_path('ps', True)
(rc, out, err) = module.run_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
for line in out.split('\n'):
if pattern in line:
return True
return False
|
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name(u"sys"), Name(u"intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
touch_import(None, u'sys', node)
return new
|
"""
.. warn:: This module_util is currently internal implementation.
We want to evaluate this code for stability and API suitability before
making backwards compatibility guarantees. The API may change between
releases. Do not use this unless you are willing to port your module code.
"""
import codecs
from ansible.module_utils.six import PY3, text_type, binary_type
try:
codecs.lookup_error('surrogateescape')
HAS_SURROGATEESCAPE = True
except LookupError:
HAS_SURROGATEESCAPE = False
_COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_escape',
'surrogate_or_strict',
'surrogate_then_replace'))
def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a text string
:arg obj: An object to make sure is a text string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. We support three additional error strategies
specifically aimed at helping people to port code:
:surrogate_or_strict: Will use surrogateescape if it is a valid
handler, otherwise it will use strict
:surrogate_or_replace: Will use surrogateescape if it is a valid
handler, otherwise it will use replace.
:surrogate_then_replace: Does the same as surrogate_or_replace but
`was added for symmetry with the error handlers in
:func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
Because surrogateescape was added in Python3 this usually means that
Python3 will use `surrogateescape` and Python2 will use the fallback
error handler. Note that the code checks for surrogateescape when the
module is imported. If you have a backport of `surrogateescape` for
python2, be sure to register the error handler prior to importing this
module.
The default until Ansible-2.2 was `surrogate_or_replace`
In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
with :func:`ansible.module_utils._text.to_bytes` .
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the text version of that string.
:empty: Return an empty text string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a text string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a byte string.
From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
.. version_changed:: 2.3
Added the surrogate_then_replace error handler and made it the default error handler.
"""
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
return u''
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
return to_text(value, encoding, errors)
if PY3:
to_native = to_text
else:
to_native = to_bytes
|
import frappe
def execute():
if "device" not in frappe.db.get_table_columns("Sessions"):
frappe.db.sql("alter table tabSessions add column `device` varchar(255) default 'desktop'")
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.play_iterator import HostState, PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.task import Task
from ansible.playbook.play_context import PlayContext
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestPlayIterator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_host_state(self):
hs = HostState(blocks=[x for x in range(0, 10)])
hs.tasks_child_state = HostState(blocks=[0])
hs.rescue_child_state = HostState(blocks=[1])
hs.always_child_state = HostState(blocks=[2])
hs.__repr__()
hs.run_state = 100
hs.__repr__()
hs.fail_state = 15
hs.__repr__()
for i in range(0, 10):
hs.cur_block = i
self.assertEqual(hs.get_current_block(), i)
new_hs = hs.copy()
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_play_iterator(self):
#import epdb; epdb.st()
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
roles:
- test_role
pre_tasks:
- debug: msg="this is a pre_task"
tasks:
- debug: msg="this is a regular task"
- block:
- debug: msg="this is a block task"
- block:
- debug: msg="this is a sub-block in a block"
rescue:
- debug: msg="this is a rescue task"
- block:
- debug: msg="this is a sub-block in a rescue"
always:
- debug: msg="this is an always task"
- block:
- debug: msg="this is a sub-block in an always"
post_tasks:
- debug: msg="this is a post_task"
""",
'/etc/ansible/roles/test_role/tasks/main.yml': """
- name: role task
debug: msg="this is a role task"
- block:
- name: role block task
debug: msg="inside block in role"
always:
- name: role always task
debug: msg="always task in block in role"
- include: foo.yml
- name: role task after include
debug: msg="after include in role"
- block:
- name: starting role nested block 1
debug:
- block:
- name: role nested block 1 task 1
debug:
- name: role nested block 1 task 2
debug:
- name: role nested block 1 task 3
debug:
- name: end of role nested block 1
debug:
- name: starting role nested block 2
debug:
- block:
- name: role nested block 2 task 1
debug:
- name: role nested block 2 task 2
debug:
- name: role nested block 2 task 3
debug:
- name: end of role nested block 2
debug:
""",
'/etc/ansible/roles/test_role/tasks/foo.yml': """
- name: role included task
debug: msg="this is task in an include from a role"
"""
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
mock_var_manager._fact_cache['host00'] = dict()
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# lookup up an original task
target_task = p._entries[0].tasks[0].block[0]
task_copy = target_task.copy(exclude_parent=True)
found_task = itr.get_original_task(hosts[0], task_copy)
self.assertEqual(target_task, found_task)
bad_task = Task()
found_task = itr.get_original_task(hosts[0], bad_task)
self.assertIsNone(found_task)
# pre task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# role task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.name, "role task")
self.assertIsNotNone(task._role)
# role block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role block task")
self.assertIsNotNone(task._role)
# role block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role always task")
self.assertIsNotNone(task._role)
# role include task
#(host_state, task) = itr.get_next_task_for_host(hosts[0])
#self.assertIsNotNone(task)
#self.assertEqual(task.action, 'debug')
#self.assertEqual(task.name, "role included task")
#self.assertIsNotNone(task._role)
# role task after include
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role task after include")
self.assertIsNotNone(task._role)
# role nested block tasks
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "starting role nested block 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 3")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "end of role nested block 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "starting role nested block 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 3")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "end of role nested block 2")
self.assertIsNotNone(task._role)
# regular play task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertIsNone(task._role)
# block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a block task"))
# sub-block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in a block"))
# mark the host failed
itr.mark_host_failed(hosts[0])
# block rescue task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a rescue task"))
# sub-block rescue task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue"))
# block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is an always task"))
# sub-block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in an always"))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# post task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
# host 0 shouldn't be in the failed hosts, as the error
# was handled by a rescue block
failed_hosts = itr.get_failed_hosts()
self.assertNotIn(hosts[0], failed_hosts)
def test_play_iterator_nested_blocks(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
tasks:
- block:
- block:
- block:
- block:
- block:
- debug: msg="this is the first task"
- ping:
rescue:
- block:
- block:
- block:
- block:
- debug: msg="this is the rescue task"
always:
- block:
- block:
- block:
- block:
- debug: msg="this is the always task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# get the first task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the first task'))
# fail the host
itr.mark_host_failed(hosts[0])
# get the resuce task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the rescue task'))
# get the always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the always task'))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
def test_play_iterator_add_tasks(self):
fake_loader = DictDataLoader({
'test_play.yml': """
- hosts: all
gather_facts: no
tasks:
- debug: msg="dummy task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# test the high-level add_tasks() method
s = HostState(blocks=[0,1,2])
itr._insert_tasks_into_state = MagicMock(return_value=s)
itr.add_tasks(hosts[0], [MagicMock(), MagicMock(), MagicMock()])
self.assertEqual(itr._host_states[hosts[0].name], s)
# now actually test the lower-level method that does the work
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# iterate past first task
_, task = itr.get_next_task_for_host(hosts[0])
while(task and task.action != 'debug'):
_, task = itr.get_next_task_for_host(hosts[0])
if task is None:
raise Exception("iterated past end of play while looking for place to insert tasks")
# get the current host state and copy it so we can mutate it
s = itr.get_host_state(hosts[0])
s_copy = s.copy()
# assert with an empty task list, or if we're in a failed state, we simply return the state as-is
res_state = itr._insert_tasks_into_state(s_copy, task_list=[])
self.assertEqual(res_state, s_copy)
s_copy.fail_state = itr.FAILED_TASKS
res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
self.assertEqual(res_state, s_copy)
# but if we've failed with a rescue/always block
mock_task = MagicMock()
s_copy.run_state = itr.ITERATING_RESCUE
res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task])
self.assertEqual(res_state, s_copy)
self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue)
itr._host_states[hosts[0].name] = res_state
(next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True)
self.assertEqual(next_task, mock_task)
itr._host_states[hosts[0].name] = s
# test a regular insertion
s_copy = s.copy()
res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
|
import sys
import os
import unittest
import cStringIO
import warnings
import re
try:
import json
except ImportError:
import simplejson as json
from support import html5lib_test_files
from html5lib.tokenizer import HTMLTokenizer
from html5lib import constants
class TokenizerTestParser(object):
def __init__(self, initialState, lastStartTag=None):
self.tokenizer = HTMLTokenizer
self._state = initialState
self._lastStartTag = lastStartTag
def parse(self, stream, encoding=None, innerHTML=False):
tokenizer = self.tokenizer(stream, encoding)
self.outputTokens = []
tokenizer.state = getattr(tokenizer, self._state)
if self._lastStartTag is not None:
tokenizer.currentToken = {"type": "startTag",
"name":self._lastStartTag}
types = dict((v,k) for k,v in constants.tokenTypes.iteritems())
for token in tokenizer:
getattr(self, 'process%s' % types[token["type"]])(token)
return self.outputTokens
def processDoctype(self, token):
self.outputTokens.append([u"DOCTYPE", token["name"], token["publicId"],
token["systemId"], token["correct"]])
def processStartTag(self, token):
self.outputTokens.append([u"StartTag", token["name"],
dict(token["data"][::-1]), token["selfClosing"]])
def processEmptyTag(self, token):
if token["name"] not in constants.voidElements:
self.outputTokens.append(u"ParseError")
self.outputTokens.append([u"StartTag", token["name"], dict(token["data"][::-1])])
def processEndTag(self, token):
self.outputTokens.append([u"EndTag", token["name"],
token["selfClosing"]])
def processComment(self, token):
self.outputTokens.append([u"Comment", token["data"]])
def processSpaceCharacters(self, token):
self.outputTokens.append([u"Character", token["data"]])
self.processSpaceCharacters = self.processCharacters
def processCharacters(self, token):
self.outputTokens.append([u"Character", token["data"]])
def processEOF(self, token):
pass
def processParseError(self, token):
self.outputTokens.append([u"ParseError", token["data"]])
def concatenateCharacterTokens(tokens):
outputTokens = []
for token in tokens:
if not "ParseError" in token and token[0] == "Character":
if (outputTokens and not "ParseError" in outputTokens[-1] and
outputTokens[-1][0] == "Character"):
outputTokens[-1][1] += token[1]
else:
outputTokens.append(token)
else:
outputTokens.append(token)
return outputTokens
def normalizeTokens(tokens):
# TODO: convert tests to reflect arrays
for i, token in enumerate(tokens):
if token[0] == u'ParseError':
tokens[i] = token[0]
return tokens
def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
ignoreErrors=False):
"""Test whether the test has passed or failed
If the ignoreErrorOrder flag is set to true we don't test the relative
positions of parse errors and non parse errors
"""
checkSelfClosing= False
for token in expectedTokens:
if (token[0] == "StartTag" and len(token) == 4
or token[0] == "EndTag" and len(token) == 3):
checkSelfClosing = True
break
if not checkSelfClosing:
for token in receivedTokens:
if token[0] == "StartTag" or token[0] == "EndTag":
token.pop()
if not ignoreErrorOrder and not ignoreErrors:
return expectedTokens == receivedTokens
else:
#Sort the tokens into two groups; non-parse errors and parse errors
tokens = {"expected":[[],[]], "received":[[],[]]}
for tokenType, tokenList in zip(tokens.keys(),
(expectedTokens, receivedTokens)):
for token in tokenList:
if token != "ParseError":
tokens[tokenType][0].append(token)
else:
if not ignoreErrors:
tokens[tokenType][1].append(token)
return tokens["expected"] == tokens["received"]
def unescape_test(test):
def decode(inp):
return inp.decode("unicode-escape")
test["input"] = decode(test["input"])
for token in test["output"]:
if token == "ParseError":
continue
else:
token[1] = decode(token[1])
if len(token) > 2:
for key, value in token[2]:
del token[2][key]
token[2][decode(key)] = decode(value)
return test
def runTokenizerTest(test):
#XXX - move this out into the setup function
#concatenate all consecutive character tokens into a single token
if 'doubleEscaped' in test:
test = unescape_test(test)
expected = concatenateCharacterTokens(test['output'])
if 'lastStartTag' not in test:
test['lastStartTag'] = None
outBuffer = cStringIO.StringIO()
stdout = sys.stdout
sys.stdout = outBuffer
parser = TokenizerTestParser(test['initialState'],
test['lastStartTag'])
tokens = parser.parse(test['input'])
tokens = concatenateCharacterTokens(tokens)
received = normalizeTokens(tokens)
errorMsg = u"\n".join(["\n\nInitial state:",
test['initialState'] ,
"\nInput:", unicode(test['input']),
"\nExpected:", unicode(expected),
"\nreceived:", unicode(tokens)])
errorMsg = errorMsg.encode("utf-8")
ignoreErrorOrder = test.get('ignoreErrorOrder', False)
assert tokensMatch(expected, received, ignoreErrorOrder), errorMsg
def _doCapitalize(match):
return match.group(1).upper()
_capitalizeRe = re.compile(r"\W+(\w)").sub
def capitalize(s):
s = s.lower()
s = _capitalizeRe(_doCapitalize, s)
return s
def test_tokenizer():
for filename in html5lib_test_files('tokenizer', '*.test'):
tests = json.load(file(filename))
testName = os.path.basename(filename).replace(".test","")
if 'tests' in tests:
for index,test in enumerate(tests['tests']):
#Skip tests with a self closing flag
skip = False
if 'initialStates' not in test:
test["initialStates"] = ["Data state"]
for initialState in test["initialStates"]:
test["initialState"] = capitalize(initialState)
yield runTokenizerTest, test
|
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[PREV]
last[NEXT] = root[PREV] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, PREV=0, NEXT=1, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
NEXT, KEY = 1, 2
root = self.__root
curr = root[NEXT]
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
PREV, KEY = 0, 2
root = self.__root
curr = root[PREV]
while curr is not root:
yield curr[KEY]
curr = curr[PREV]
def clear(self):
'od.clear() -> None. Remove all items from od.'
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
'Create new instance of %(typename)s(%(argtxt)s)'
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
'Return a nicely formatted representation string'
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += " %s = _property(_itemgetter(%d), doc='Alias for field number %d')\n" % (name, i, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super(Counter, self).__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.