repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
albertjan/pypyjs | website/js/pypy.js-0.2.0/lib/modules/test/test_rlcompleter.py | 122 | 2848 | from test import test_support as support
import unittest
import __builtin__ as builtins
import rlcompleter
class CompleteMe(object):
""" Trivial class used in testing rlcompleter.Completer. """
spam = 1
class TestRlcompleter(unittest.TestCase):
def setUp(self):
self.stdcompleter = rlcompleter.Completer()
self.completer = rlcompleter.Completer(dict(spam=int,
egg=str,
CompleteMe=CompleteMe))
# forces stdcompleter to bind builtins namespace
self.stdcompleter.complete('', 0)
def test_namespace(self):
class A(dict):
pass
class B(list):
pass
self.assertTrue(self.stdcompleter.use_main_ns)
self.assertFalse(self.completer.use_main_ns)
self.assertFalse(rlcompleter.Completer(A()).use_main_ns)
self.assertRaises(TypeError, rlcompleter.Completer, B((1,)))
def test_global_matches(self):
# test with builtins namespace
self.assertEqual(sorted(self.stdcompleter.global_matches('di')),
[x+'(' for x in dir(builtins) if x.startswith('di')])
self.assertEqual(sorted(self.stdcompleter.global_matches('st')),
[x+'(' for x in dir(builtins) if x.startswith('st')])
self.assertEqual(self.stdcompleter.global_matches('akaksajadhak'), [])
# test with a customized namespace
self.assertEqual(self.completer.global_matches('CompleteM'),
['CompleteMe('])
self.assertEqual(self.completer.global_matches('eg'),
['egg('])
# XXX: see issue5256
self.assertEqual(self.completer.global_matches('CompleteM'),
['CompleteMe('])
def test_attr_matches(self):
# test with builtins namespace
self.assertEqual(self.stdcompleter.attr_matches('str.s'),
['str.{}('.format(x) for x in dir(str)
if x.startswith('s')])
self.assertEqual(self.stdcompleter.attr_matches('tuple.foospamegg'), [])
# test with a customized namespace
self.assertEqual(self.completer.attr_matches('CompleteMe.sp'),
['CompleteMe.spam'])
self.assertEqual(self.completer.attr_matches('Completeme.egg'), [])
CompleteMe.me = CompleteMe
self.assertEqual(self.completer.attr_matches('CompleteMe.me.me.sp'),
['CompleteMe.me.me.spam'])
self.assertEqual(self.completer.attr_matches('egg.s'),
['egg.{}('.format(x) for x in dir(str)
if x.startswith('s')])
def test_main():
support.run_unittest(TestRlcompleter)
if __name__ == '__main__':
test_main()
| mit |
grilo/ansible-1 | lib/ansible/modules/network/nxos/nxos_vlan.py | 21 | 11538 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vlan
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VLAN resources and attributes.
description:
- Manages VLAN configurations on NX-OS switches.
author: Jason Edelman (@jedelman8)
options:
vlan_id:
description:
- Single VLAN ID.
required: false
default: null
vlan_range:
description:
- Range of VLANs such as 2-10 or 2,5,10-15, etc.
required: false
default: null
name:
description:
- Name of VLAN.
required: false
default: null
vlan_state:
description:
- Manage the vlan operational state of the VLAN
(equivalent to state {active | suspend} command.
required: false
default: active
choices: ['active','suspend']
admin_state:
description:
- Manage the VLAN administrative state of the VLAN equivalent
to shut/no shut in VLAN config mode.
required: false
default: up
choices: ['up','down']
mapped_vni:
description:
- The Virtual Network Identifier (VNI) ID that is mapped to the
VLAN. Valid values are integer and keyword 'default'.
required: false
default: null
version_added: "2.2"
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
mode:
description:
- Set VLAN mode to classical ethernet or fabricpath.
required: false
default: null
choices: ['ce','fabricpath']
version_added: "2.4"
'''
EXAMPLES = '''
- name: Ensure a range of VLANs are not present on the switch
nxos_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
state: absent
transport: nxapi
- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state
nxos_vlan:
vlan_id: 50
admin_state: down
name: WEB
transport: nxapi
- name: Ensure VLAN is NOT on the device
nxos_vlan:
vlan_id: 50
state: absent
transport: nxapi
'''
RETURN = '''
commands:
description: Set of command strings to send to the remote device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec
from ansible.module_utils.nxos import check_args as nxos_check_args
from ansible.module_utils.basic import AnsibleModule
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
start, end = part.split('-')
start, end = int(start), int(end)
result.extend([str(i) for i in range(start, end + 1)])
else:
result.append(part)
return result
def numerical_sort(iterable):
"""Sort list of strings (VLAN IDs) that are digits in numerical order.
"""
as_int_list = []
for vlan in iterable:
as_int_list.append(int(vlan))
as_int_list.sort()
as_str_list = []
for vlan in as_int_list:
as_str_list.append(str(vlan))
return as_str_list
def build_commands(vlans, state):
commands = []
for vlan in vlans:
if state == 'present':
command = 'vlan {0}'.format(vlan)
commands.append(command)
elif state == 'absent':
command = 'no vlan {0}'.format(vlan)
commands.append(command)
return commands
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# apply value map when making change to the admin state
# note: would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
vlan_args = {
'name': 'name {0}',
'vlan_state': 'state {0}',
'admin_state': '{0}',
'mode': 'mode {0}',
'mapped_vni': 'vn-segment {0}'
}
commands = []
for param, value in vlan.items():
if param == 'mapped_vni' and value == 'default':
command = 'no vn-segment'
else:
command = vlan_args.get(param).format(vlan.get(param))
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands
def get_list_of_vlans(module):
body = run_commands(module, ['show vlan | json'])[0]
vlan_list = []
vlan_table = body.get('TABLE_vlanbrief')['ROW_vlanbrief']
if isinstance(vlan_table, list):
for vlan in vlan_table:
vlan_list.append(str(vlan['vlanshowbr-vlanid-utf']))
else:
vlan_list.append('1')
return vlan_list
def get_vni(vlanid, module):
flags = str('all | section vlan.{0}'.format(vlanid)).split(' ')
body = get_config(module, flags=flags)
value = ''
if body:
REGEX = re.compile(r'(?:vn-segment\s)(?P<value>.*)$', re.M)
if 'vn-segment' in body:
value = REGEX.search(body).group('value')
return value
def get_vlan(vlanid, module):
"""Get instance of VLAN as a dictionary
"""
command = 'show vlan id %s | json' % vlanid
try:
body = run_commands(module, [command])[0]
vlan_table = body['TABLE_vlanbriefid']['ROW_vlanbriefid']
mtu_table = body['TABLE_mtuinfoid']['ROW_mtuinfoid']
except (TypeError, IndexError, KeyError):
return {}
key_map = {
"vlanshowbr-vlanid-utf": "vlan_id",
"vlanshowbr-vlanname": "name",
"vlanshowbr-vlanstate": "vlan_state",
"vlanshowbr-shutstate": "admin_state"
}
vlan = apply_key_map(key_map, vlan_table)
vlan['mode'] = mtu_table['vlanshowinfo-vlanmode']
value_map = {
"admin_state": {
"shutdown": "down",
"noshutdown": "up"
},
"mode": {
"fabricpath-vlan": "fabricpath",
"ce-vlan": "ce"
}
}
vlan = apply_value_map(value_map, vlan)
vlan['mapped_vni'] = get_vni(vlanid, module)
return vlan
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = str(value)
return new_dict
def apply_value_map(value_map, resource):
for key, value in value_map.items():
resource[key] = value[resource.get(key)]
return resource
def check_args(module, warnings):
nxos_check_args(module, warnings)
for key in ('include_defaults', 'config', 'save'):
if module.params[key] is not None:
warnings.append('argument %s is no longer supported, ignoring value' % key)
def main():
argument_spec = dict(
vlan_id=dict(required=False, type='str'),
vlan_range=dict(required=False),
name=dict(required=False),
vlan_state=dict(choices=['active', 'suspend'], required=False),
mapped_vni=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present', required=False),
admin_state=dict(choices=['up', 'down'], required=False),
mode=dict(choices=['ce', 'fabricpath'], required=False),
# Deprecated in Ansible 2.4
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['vlan_range', 'name'],
['vlan_id', 'vlan_range']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = dict(changed=False, warnings=warnings)
vlan_range = module.params['vlan_range']
vlan_id = module.params['vlan_id']
name = module.params['name']
vlan_state = module.params['vlan_state']
admin_state = module.params['admin_state']
mapped_vni = module.params['mapped_vni']
state = module.params['state']
mode = module.params['mode']
if vlan_id:
if not vlan_id.isdigit():
module.fail_json(msg='vlan_id must be a valid VLAN ID')
args = dict(name=name, vlan_state=vlan_state,
admin_state=admin_state, mapped_vni=mapped_vni, mode=mode)
proposed = dict((k, v) for k, v in args.items() if v is not None)
proposed_vlans_list = vlan_range_to_list(vlan_id or vlan_range)
existing_vlans_list = get_list_of_vlans(module)
commands = []
existing = {}
if vlan_range:
if state == 'present':
# These are all of the VLANs being proposed that don't
# already exist on the switch
vlans_delta = numerical_sort(
set(proposed_vlans_list).difference(existing_vlans_list))
commands = build_commands(vlans_delta, state)
elif state == 'absent':
# VLANs that are common between what is being proposed and
# what is on the switch
vlans_common = numerical_sort(
set(proposed_vlans_list).intersection(existing_vlans_list))
commands = build_commands(vlans_common, state)
else:
existing = get_vlan(vlan_id, module)
if state == 'absent' and existing:
commands = ['no vlan ' + vlan_id]
elif state == 'present':
if (existing.get('mapped_vni') == '0' and
proposed.get('mapped_vni') == 'default'):
proposed.pop('mapped_vni')
delta = dict(set(proposed.items()).difference(existing.items()))
if delta or not existing:
commands = get_vlan_config_commands(delta, vlan_id)
if commands:
if existing.get('mapped_vni'):
if (existing.get('mapped_vni') != proposed.get('mapped_vni') and
existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'):
if state == 'absent':
commands = ['vlan ' + vlan_id, 'no vn-segment', 'no vlan ' + vlan_id]
else:
commands.insert(1, 'no vn-segment')
if module.check_mode:
module.exit_json(changed=True, commands=commands)
else:
load_config(module, commands)
results['changed'] = True
results['commands'] = commands
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
mattsmart/biomodels | oncogenesis_dynamics/firstpassage.py | 1 | 15435 | import matplotlib.pyplot as plt
import numpy as np
import time
from os import sep
from multiprocessing import Pool, cpu_count
from constants import OUTPUT_DIR, PARAMS_ID, PARAMS_ID_INV, COLOURS_DARK_BLUE
from data_io import read_varying_mean_sd_fpt_and_params, collect_fpt_mean_stats_and_params, read_fpt_and_params,\
write_fpt_and_params
from formulae import stoch_gillespie, stoch_tauleap_lowmem, stoch_tauleap, get_physical_fp_stable_and_not, map_init_name_to_init_cond
from params import Params
from presets import presets
from plotting import plot_table_params
def get_fpt(ensemble, init_cond, params, num_steps=1000000, establish_switch=False, brief=True):
# TODO could pass simmethod tau or gillespie to params and parse here
if establish_switch:
fpt_flag = False
establish_flag = True
else:
fpt_flag = True
establish_flag = False
fp_times = np.zeros(ensemble)
for i in xrange(ensemble):
if brief:
species_end, times_end = stoch_tauleap_lowmem(init_cond, num_steps, params, fpt_flag=fpt_flag,
establish_flag=establish_flag)
else:
species, times = stoch_gillespie(init_cond, num_steps, params, fpt_flag=fpt_flag,
establish_flag=establish_flag)
times_end = times[-1]
# plotting
#plt.plot(times, species)
#plt.show()
fp_times[i] = times_end
if establish_switch:
print "establish time is", fp_times[i]
return fp_times
def get_mean_fpt(init_cond, params, samplesize=32, establish_switch=False):
fpt = get_fpt(samplesize, init_cond, params, establish_switch=establish_switch)
return np.mean(fpt)
def wrapper_get_fpt(fn_args_dict):
np.random.seed() # TODO double check that this fixes cluster RNG issues
if fn_args_dict['kwargs'] is not None:
return get_fpt(*fn_args_dict['args'], **fn_args_dict['kwargs'])
else:
return get_fpt(*fn_args_dict['args'])
def fast_fp_times(ensemble, init_cond, params, num_processes, num_steps='default', establish_switch=False):
if num_steps == 'default':
kwargs_dict = {'num_steps': 1000000, 'establish_switch': establish_switch}
else:
kwargs_dict = {'num_steps': num_steps, 'establish_switch': establish_switch}
fn_args_dict = [0]*num_processes
print "NUM_PROCESSES:", num_processes
assert ensemble % num_processes == 0
for i in xrange(num_processes):
subensemble = ensemble / num_processes
print "process:", i, "job size:", subensemble, "runs"
fn_args_dict[i] = {'args': (subensemble, init_cond, params),
'kwargs': kwargs_dict}
t0 = time.time()
pool = Pool(num_processes)
results = pool.map(wrapper_get_fpt, fn_args_dict)
pool.close()
pool.join()
print "TIMER:", time.time() - t0
fp_times = np.zeros(ensemble)
for i, result in enumerate(results):
fp_times[i*subensemble:(i+1)*subensemble] = result
return fp_times
def fast_mean_fpt_varying(param_vary_name, param_vary_values, params, num_processes, init_name="x_all", samplesize=30, establish_switch=False):
assert samplesize % num_processes == 0
mean_fpt_varying = [0]*len(param_vary_values)
sd_fpt_varying = [0]*len(param_vary_values)
for idx, pv in enumerate(param_vary_values):
params_step = params.mod_copy( {param_vary_name: pv} )
init_cond = map_init_name_to_init_cond(params, init_name)
fp_times = fast_fp_times(samplesize, init_cond, params_step, num_processes, establish_switch=establish_switch)
mean_fpt_varying[idx] = np.mean(fp_times)
sd_fpt_varying[idx] = np.std(fp_times)
return mean_fpt_varying, sd_fpt_varying
def fpt_histogram(fpt_list, params, figname_mod="", flag_show=False, flag_norm=True, flag_xlog10=False, flag_ylog10=False, fs=12):
ensemble_size = len(fpt_list)
bins = np.linspace(np.min(fpt_list), np.max(fpt_list), 50) #50)
#bins = np.arange(0, 3*1e4, 50) # to plot against FSP
# normalize
if flag_norm:
y_label = 'Probability'
weights = np.ones_like(fpt_list) / ensemble_size
else:
y_label = 'Frequency'
weights = np.ones_like(fpt_list)
# prep fig before axes mod
fig = plt.figure(figsize=(8,6), dpi=120)
ax = plt.gca()
# mod axes (log)
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
max_log = np.ceil(np.max(np.log10(fpt_list))) # TODO check this matches multihist
bins = np.logspace(0.1, max_log, 100)
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
# plot
plt.hist(fpt_list, bins=bins, alpha=0.6, weights=weights)
plt.hist(fpt_list, histtype='step', bins=bins, alpha=0.6, label=None, weights=weights, edgecolor='k', linewidth=0.5,
fill=False)
# draw mean line
#plt.axvline(np.mean(fpt_list), color='k', linestyle='dashed', linewidth=2)
# labels
plt.title('First-passage time histogram (%d runs) - %s' % (ensemble_size, params.system), fontsize=fs)
ax.set_xlabel('First-passage time (cell division timescale)', fontsize=fs)
ax.set_ylabel(y_label, fontsize=fs)
ax.tick_params(labelsize=fs)
# plt.locator_params(axis='x', nbins=4)
#plt.legend(loc='upper right', fontsize=fs)
# create table of params
plot_table_params(ax, params)
# save and show
plt_save = "fpt_histogram" + figname_mod
plt.savefig(OUTPUT_DIR + sep + plt_save + '.pdf', bbox_inches='tight')
if flag_show:
plt.show()
return ax
def fpt_histogram_multi(multi_fpt_list, labels, figname_mod="", fs=12, bin_linspace=80, colours=COLOURS_DARK_BLUE,
figsize=(8,6), ec='k', lw=0.5, flag_norm=False, flag_show=False, flag_xlog10=False,
flag_ylog10=False, flag_disjoint=False):
# resize fpt lists if not all same size (to the min size)
fpt_lengths = [len(fpt) for fpt in multi_fpt_list]
ensemble_size = np.min(fpt_lengths)
# cleanup data to same size
if sum(fpt_lengths - ensemble_size) > 0:
print "Resizing multi_fpt_list elements:", fpt_lengths, "to the min size of:", ensemble_size
for idx in xrange(len(fpt_lengths)):
multi_fpt_list[idx] = multi_fpt_list[idx][:ensemble_size]
bins = np.linspace(np.min(multi_fpt_list), np.max(multi_fpt_list), bin_linspace)
# normalize
if flag_norm:
y_label = 'Probability'
weights = np.ones_like(multi_fpt_list) / ensemble_size
else:
y_label = 'Frequency'
weights = np.ones_like(multi_fpt_list)
# prep fig before axes mod
fig = plt.figure(figsize=figsize, dpi=120)
ax = plt.gca()
# mod axes (log)
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
max_log = np.ceil(np.max(np.log10(multi_fpt_list)))
bins = np.logspace(0.1, max_log, 100)
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
# plot calls
if flag_disjoint:
plt.hist(multi_fpt_list, bins=bins, color=colours, label=labels, weights=weights, edgecolor=ec, linewidth=lw)
else:
for idx, fpt_list in enumerate(multi_fpt_list):
plt.hist(fpt_list, bins=bins, alpha=0.6, color=colours[idx], label=labels[idx],
weights=weights[idx,:])
plt.hist(fpt_list, histtype='step', bins=bins, alpha=0.6, color=colours[idx],
label=None,weights=weights[idx,:], edgecolor=ec, linewidth=lw, fill=False)
# labels
plt.title('First-passage time histogram (%d runs)' % (ensemble_size), fontsize=fs)
ax.set_xlabel('First-passage time (cell division timescale)', fontsize=fs)
ax.set_ylabel(y_label, fontsize=fs)
plt.legend(loc='upper right', fontsize=fs)
ax.tick_params(labelsize=fs)
# plt.locator_params(axis='x', nbins=4)
# save and show
plt_save = "fpt_multihistogram" + figname_mod
fig.savefig(OUTPUT_DIR + sep + plt_save + '.pdf', bbox_inches='tight')
if flag_show:
plt.show()
def plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_vary_name, param_set, params, samplesize, SEM_flag=True, show_flag=False, figname_mod=""):
if SEM_flag:
sd_fpt_varying = sd_fpt_varying / np.sqrt(samplesize) # s.d. from CLT since sample mean is approx N(mu, sd**2/n)
plt.errorbar(param_set, mean_fpt_varying, yerr=sd_fpt_varying, label="sim")
plt.title("Mean FP Time, %s varying (sample=%d)" % (param_vary_name, samplesize))
ax = plt.gca()
ax.set_xlabel(param_vary_name)
ax.set_ylabel('Mean FP time')
# log options
for i in xrange(len(mean_fpt_varying)):
print i, param_set[i], mean_fpt_varying[i], sd_fpt_varying[i]
flag_xlog10 = True
flag_ylog10 = True
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
#ax.set_xlim([0.8*1e2, 1*1e7])
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
#ax.set_ylim([0.8*1e2, 3*1e5])
# create table of params
plot_table_params(ax, params)
plt_save = "mean_fpt_varying" + figname_mod
plt.savefig(OUTPUT_DIR + sep + plt_save + '.png', bbox_inches='tight')
if show_flag:
plt.show()
return ax
if __name__ == "__main__":
# SCRIPT FLAGS
run_compute_fpt = False
run_read_fpt = False
run_generate_hist_multi = False
run_load_hist_multi = False
run_collect = False
run_means_read_and_plot = False
run_means_collect_and_plot = True
# SCRIPT PARAMETERS
establish_switch = True
brief = True
num_steps = 1000000 # default 1000000
ensemble = 1 # default 100
# DYNAMICS PARAMETERS
params = presets('preset_xyz_constant') # preset_xyz_constant, preset_xyz_constant_fast, valley_2hit
# OTHER PARAMETERS
init_cond = np.zeros(params.numstates, dtype=int)
init_cond[0] = int(params.N)
# PLOTTING
FS = 16
EC = 'k'
LW = 0.5
FIGSIZE=(8,6)
if run_compute_fpt:
fp_times = get_fpt(ensemble, init_cond, params, num_steps=num_steps, establish_switch=establish_switch, brief=brief)
write_fpt_and_params(fp_times, params)
fpt_histogram(fp_times, params, flag_show=True, figname_mod="XZ_model_withFeedback_mu1e-1")
if run_read_fpt:
dbdir = OUTPUT_DIR
dbdir_100 = dbdir + sep + "fpt_mean" + sep + "100_c95"
fp_times_xyz_100, params_a = read_fpt_and_params(dbdir_100)
dbdir_10k = dbdir + sep + "fpt_mean" + sep + "10k_c95"
fp_times_xyz_10k, params_b = read_fpt_and_params(dbdir_10k)
if run_generate_hist_multi:
ensemble = 21
num_proc = cpu_count() - 1
param_vary_id = "N"
param_idx = PARAMS_ID_INV[param_vary_id]
param_vary_values = [1e2, 1e3, 1e4]
param_vary_labels = ['A', 'B', 'C']
params_ensemble = [params.params_list[:] for _ in param_vary_values]
multi_fpt = np.zeros((len(param_vary_values), ensemble))
multi_fpt_labels = ['label' for _ in param_vary_values]
for idx, param_val in enumerate(param_vary_values):
param_val_string = "%s=%.3f" % (param_vary_id, param_val)
params_step = params.mod_copy({param_vary_id: param_val})
#fp_times = get_fpt(ensemble, init_cond, params_set[idx], num_steps=num_steps)
fp_times = fast_fp_times(ensemble, init_cond, params_step, num_proc, establish_switch=establish_switch)
write_fpt_and_params(fp_times, params_step, filename="fpt_multi", filename_mod=param_val_string)
multi_fpt[idx,:] = np.array(fp_times)
multi_fpt_labels[idx] = "%s (%s)" % (param_vary_labels[idx], param_val_string)
fpt_histogram_multi(multi_fpt, multi_fpt_labels, flag_show=True, flag_ylog10=False)
if run_load_hist_multi:
flag_norm = True
dbdir = OUTPUT_DIR + sep + "may25_100"
#dbdir_c80 = dbdir + "fpt_feedback_z_ens1040_c0.80_params"
c80_header = "fpt_feedback_z_ens1040_c80_N100"
c88_header = "fpt_feedback_z_ens1040_c88_N100"
c95_header = "fpt_feedback_z_ens1040_c95_N100"
fp_times_xyz_c80, params_a = read_fpt_and_params(dbdir, "%s_data.txt" % c80_header, "%s_params.csv" % c80_header)
fp_times_xyz_c88, params_b = read_fpt_and_params(dbdir, "%s_data.txt" % c88_header, "%s_params.csv" % c88_header)
fp_times_xyz_c95, params_c = read_fpt_and_params(dbdir, "%s_data.txt" % c95_header, "%s_params.csv" % c95_header)
fpt_histogram(fp_times_xyz_c88, params_b, flag_ylog10=False, figname_mod="_xyz_feedbackz_N10k_c88_may25")
plt.close('all')
fpt_histogram(fp_times_xyz_c88, params_b, flag_ylog10=True, figname_mod="_xyz_feedbackz_N10k_c88_may25_logy")
plt.close('all')
multi_fpt = [fp_times_xyz_c80, fp_times_xyz_c88, fp_times_xyz_c95]
labels = ("c=0.80 (Region I)", "c=0.88 (Region IV)", "c=0.95 (Region III)")
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=False, flag_norm=flag_norm, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE)
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=True, flag_norm=flag_norm, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE)
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=True, flag_norm=False, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE, flag_disjoint=True)
if run_means_read_and_plot:
datafile = OUTPUT_DIR + sep + "fpt_stats_collected_mean_sd_varying_N.txt"
paramfile = OUTPUT_DIR + sep + "fpt_stats_collected_mean_sd_varying_N_params.csv"
samplesize=48
mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params = \
read_varying_mean_sd_fpt_and_params(datafile, paramfile)
plt_axis = plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params, samplesize,
SEM_flag=True, show_flag=True, figname_mod="_%s_n%d" % (param_to_vary, samplesize))
"""
mu = params.mu
mixed_fp_zinf_at_N = [0.0]*len(param_set)
for idx, N in enumerate(param_set):
params_at_N = params.mod_copy( {'N': N} )
fps = get_physical_and_stable_fp(params_at_N)
assert len(fps) == 1
mixed_fp_zinf_at_N[idx] = fps[0][2]
plt_axis.plot(param_set, [1/(mu*n) for n in param_set], '-o', label="(mu*N)^-1")
plt_axis.plot(param_set, [1/(mu*zinf) for zinf in mixed_fp_zinf_at_N], '-o', label="(mu*z_inf)^-1")
plt_axis.set_yscale("log", nonposx='clip')
plt_axis.set_xscale("log", nonposx='clip')
plt_axis.legend()
plt.savefig(OUTPUT_DIR + sep + "theorycompare_loglog" + '.png', bbox_inches='tight')
plt.show()
"""
if run_means_collect_and_plot:
dbdir = OUTPUT_DIR + sep + "tocollect" + sep + "runset_june17_FPT_cvary_44_ens240"
datafile, paramfile = collect_fpt_mean_stats_and_params(dbdir)
samplesize=240
mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params = \
read_varying_mean_sd_fpt_and_params(datafile, paramfile)
plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params, samplesize,
SEM_flag=True, show_flag=True, figname_mod="_%s_n%d" % (param_to_vary, samplesize))
| mit |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_collections.py | 8 | 63844 | """Unit tests for collections.py."""
import unittest, doctest, operator
from test.support import TESTFN, forget, unlink
import inspect
from test import support
from collections import namedtuple, Counter, OrderedDict, _count_elements
from test import mapping_tests
import pickle, copy
from random import randrange, shuffle
import keyword
import re
import sys
from collections import UserDict
from collections import ChainMap
from collections.abc import Hashable, Iterable, Iterator
from collections.abc import Sized, Container, Callable
from collections.abc import Set, MutableSet
from collections.abc import Mapping, MutableMapping, KeysView, ItemsView
from collections.abc import Sequence, MutableSequence
from collections.abc import ByteString
################################################################################
### ChainMap (helper class for configparser and the string module)
################################################################################
class TestChainMap(unittest.TestCase):
def test_basics(self):
c = ChainMap()
c['a'] = 1
c['b'] = 2
d = c.new_child()
d['b'] = 20
d['c'] = 30
self.assertEqual(d.maps, [{'b':20, 'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertEqual(d.items(), dict(a=1, b=20, c=30).items()) # check items/iter/getitem
self.assertEqual(len(d), 3) # check len
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, b=20, c=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
del d['b'] # unmask a value
self.assertEqual(d.maps, [{'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertEqual(d.items(), dict(a=1, b=2, c=30).items()) # check items/iter/getitem
self.assertEqual(len(d), 3) # check len
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, b=2, c=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
self.assertIn(repr(d), [ # check repr
type(d).__name__ + "({'c': 30}, {'a': 1, 'b': 2})",
type(d).__name__ + "({'c': 30}, {'b': 2, 'a': 1})"
])
for e in d.copy(), copy.copy(d): # check shallow copies
self.assertEqual(d, e)
self.assertEqual(d.maps, e.maps)
self.assertIsNot(d, e)
self.assertIsNot(d.maps[0], e.maps[0])
for m1, m2 in zip(d.maps[1:], e.maps[1:]):
self.assertIs(m1, m2)
# check deep copies
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
e = pickle.loads(pickle.dumps(d, proto))
self.assertEqual(d, e)
self.assertEqual(d.maps, e.maps)
self.assertIsNot(d, e)
for m1, m2 in zip(d.maps, e.maps):
self.assertIsNot(m1, m2, e)
for e in [copy.deepcopy(d),
eval(repr(d))
]:
self.assertEqual(d, e)
self.assertEqual(d.maps, e.maps)
self.assertIsNot(d, e)
for m1, m2 in zip(d.maps, e.maps):
self.assertIsNot(m1, m2, e)
f = d.new_child()
f['b'] = 5
self.assertEqual(f.maps, [{'b': 5}, {'c':30}, {'a':1, 'b':2}])
self.assertEqual(f.parents.maps, [{'c':30}, {'a':1, 'b':2}]) # check parents
self.assertEqual(f['b'], 5) # find first in chain
self.assertEqual(f.parents['b'], 2) # look beyond maps[0]
def test_contructor(self):
self.assertEqual(ChainMap().maps, [{}]) # no-args --> one new dict
self.assertEqual(ChainMap({1:2}).maps, [{1:2}]) # 1 arg --> list
def test_bool(self):
self.assertFalse(ChainMap())
self.assertFalse(ChainMap({}, {}))
self.assertTrue(ChainMap({1:2}, {}))
self.assertTrue(ChainMap({}, {1:2}))
def test_missing(self):
class DefaultChainMap(ChainMap):
def __missing__(self, key):
return 999
d = DefaultChainMap(dict(a=1, b=2), dict(b=20, c=30))
for k, v in dict(a=1, b=2, c=30, d=999).items():
self.assertEqual(d[k], v) # check __getitem__ w/missing
for k, v in dict(a=1, b=2, c=30, d=77).items():
self.assertEqual(d.get(k, 77), v) # check get() w/ missing
for k, v in dict(a=True, b=True, c=True, d=False).items():
self.assertEqual(k in d, v) # check __contains__ w/missing
self.assertEqual(d.pop('a', 1001), 1, d)
self.assertEqual(d.pop('a', 1002), 1002) # check pop() w/missing
self.assertEqual(d.popitem(), ('b', 2)) # check popitem() w/missing
with self.assertRaises(KeyError):
d.popitem()
def test_dict_coercion(self):
d = ChainMap(dict(a=1, b=2), dict(b=20, c=30))
self.assertEqual(dict(d), dict(a=1, b=2, c=30))
self.assertEqual(dict(d.items()), dict(a=1, b=2, c=30))
def test_new_child(self):
'Tests for changes for issue #16613.'
c = ChainMap()
c['a'] = 1
c['b'] = 2
m = {'b':20, 'c': 30}
d = c.new_child(m)
self.assertEqual(d.maps, [{'b':20, 'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertIs(m, d.maps[0])
# Use a different map than a dict
class lowerdict(dict):
def __getitem__(self, key):
if isinstance(key, str):
key = key.lower()
return dict.__getitem__(self, key)
def __contains__(self, key):
if isinstance(key, str):
key = key.lower()
return dict.__contains__(self, key)
c = ChainMap()
c['a'] = 1
c['b'] = 2
m = lowerdict(b=20, c=30)
d = c.new_child(m)
self.assertIs(m, d.maps[0])
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, B=20, C=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
################################################################################
### Named Tuples
################################################################################
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertIn('class Point(tuple)', Point._source)
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', 'the quick brown fox') # check unicode input
self.assertNotIn("u'", repr(nt._fields))
nt = namedtuple('nt', ('the', 'quick')) # check unicode input
self.assertNotIn("u'", repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_factory_doc_attr(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__doc__, 'Point(x, y)')
def test_name_fixer(self):
for spec, renamed in [
[('efg', 'g%hi'), ('efg', '_1')], # field with non-alpha char
[('abc', 'class'), ('abc', '_1')], # field has keyword
[('8efg', '9ghi'), ('_0', '_1')], # field starts with digit
[('abc', '_efg'), ('abc', '_1')], # field with leading underscore
[('abc', 'efg', 'efg', 'ghi'), ('abc', 'efg', '_2', 'ghi')], # duplicate field
[('abc', '', 'x'), ('abc', '_1', 'x')], # fieldname is a space
]:
self.assertEqual(namedtuple('NT', spec, rename=True)._fields, renamed)
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assertNotIn('__weakref__', dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
self.assertEqual(vars(p), p._asdict()) # verify that vars() works
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertIsInstance(p, tuple)
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
# n = 5000
n = 254 # SyntaxError: more than 255 arguments:
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = list(range(n))
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in (pickle,):
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in range(-1, module.HIGHEST_PROTOCOL + 1):
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
self.assertNotIn(b'OrderedDict', dumps(p, protocol))
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_name_conflicts(self):
# Some names like "self", "cls", "tuple", "itemgetter", and "property"
# failed when used as field names. Test to make sure these now work.
T = namedtuple('T', 'itemgetter property self cls tuple')
t = T(1, 2, 3, 4, 5)
self.assertEqual(t, (1,2,3,4,5))
newt = t._replace(itemgetter=10, property=20, self=30, cls=40, tuple=50)
self.assertEqual(newt, (10,20,30,40,50))
# Broader test of all interesting names in a template
with support.captured_stdout() as template:
T = namedtuple('T', 'x', verbose=True)
words = set(re.findall('[A-Za-z]+', template.getvalue()))
words -= set(keyword.kwlist)
T = namedtuple('T', words)
# test __new__
values = tuple(range(len(words)))
t = T(*values)
self.assertEqual(t, values)
t = T(**dict(zip(T._fields, values)))
self.assertEqual(t, values)
# test _make
t = T._make(values)
self.assertEqual(t, values)
# exercise __repr__
repr(t)
# test _asdict
self.assertEqual(t._asdict(), dict(zip(T._fields, values)))
# test _replace
t = T._make(values)
newvalues = tuple(v*10 for v in values)
newt = t._replace(**dict(zip(T._fields, newvalues)))
self.assertEqual(newt, newvalues)
# test _fields
self.assertEqual(T._fields, tuple(words))
# test __getnewargs__
self.assertEqual(t.__getnewargs__(), values)
def test_repr(self):
with support.captured_stdout() as template:
A = namedtuple('A', 'x', verbose=True)
self.assertEqual(repr(A(1)), 'A(x=1)')
# repr should show the name of the subclass
class B(A):
pass
self.assertEqual(repr(B(1)), 'B(x=1)')
def test_source(self):
# verify that _source can be run through exec()
tmp = namedtuple('NTColor', 'red green blue')
globals().pop('NTColor', None) # remove artifacts from other tests
exec(tmp._source, globals())
self.assertIn('NTColor', globals())
c = NTColor(10, 20, 30)
self.assertEqual((c.red, c.green, c.blue), (10, 20, 30))
self.assertEqual(NTColor._fields, ('red', 'green', 'blue'))
globals().pop('NTColor', None) # clean-up after this test
################################################################################
### Abstract Base Classes
################################################################################
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
methodstubs = dict.fromkeys(names, lambda s, *args: 0)
# everything should work will all required methods are present
C = type('C', (abc,), methodstubs)
C()
# instantiation should fail if a required method is missing
for name in names:
stubs = methodstubs.copy()
del stubs[name]
C = type('C', (abc,), stubs)
self.assertRaises(TypeError, C, name)
def validate_isinstance(self, abc, name):
stub = lambda s, *args: 0
C = type('C', (object,), {'__hash__': None})
setattr(C, name, stub)
self.assertIsInstance(C(), abc)
self.assertTrue(issubclass(C, abc))
C = type('C', (object,), {'__hash__': None})
self.assertNotIsInstance(C(), abc)
self.assertFalse(issubclass(C, abc))
def validate_comparison(self, instance):
ops = ['lt', 'gt', 'le', 'ge', 'ne', 'or', 'and', 'xor', 'sub']
operators = {}
for op in ops:
name = '__' + op + '__'
operators[name] = getattr(operator, name)
class Other:
def __init__(self):
self.right_side = False
def __eq__(self, other):
self.right_side = True
return True
__lt__ = __eq__
__gt__ = __eq__
__le__ = __eq__
__ge__ = __eq__
__ne__ = __eq__
__ror__ = __eq__
__rand__ = __eq__
__rxor__ = __eq__
__rsub__ = __eq__
for name, op in operators.items():
if not hasattr(instance, name):
continue
other = Other()
op(instance, other)
self.assertTrue(other.right_side,'Right side not called for %s.%s'
% (type(instance), name))
class TestOneTrickPonyABCs(ABCTestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [bytearray(), list(), set(), dict()]
for x in non_samples:
self.assertNotIsInstance(x, Hashable)
self.assertFalse(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type, bytes()
]
for x in samples:
self.assertIsInstance(x, Hashable)
self.assertTrue(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super().__hash__()
self.assertEqual(hash(H()), 0)
self.assertFalse(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
self.validate_isinstance(Hashable, '__hash__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.assertNotIsInstance(x, Iterable)
self.assertFalse(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterable)
self.assertTrue(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super().__iter__()
self.assertEqual(list(I()), [])
self.assertFalse(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
self.validate_isinstance(Iterable, '__iter__')
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, b"", "", (), [], {}, set()]
for x in non_samples:
self.assertNotIsInstance(x, Iterator)
self.assertFalse(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(bytes()), iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterator)
self.assertTrue(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, '__next__', '__iter__')
# Issue 10565
class NextOnly:
def __next__(self):
yield 1
raise StopIteration
self.assertNotIsInstance(NextOnly(), Iterator)
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Sized)
self.assertFalse(issubclass(type(x), Sized), repr(type(x)))
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.assertIsInstance(x, Sized)
self.assertTrue(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
self.validate_isinstance(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Container)
self.assertFalse(issubclass(type(x), Container), repr(type(x)))
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.assertIsInstance(x, Container)
self.assertTrue(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
self.validate_isinstance(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", b"", (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Callable)
self.assertFalse(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.assertIsInstance(x, Callable)
self.assertTrue(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
self.validate_isinstance(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.assertTrue(issubclass(C, B))
self.assertFalse(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__hash__ = None # Make sure it isn't hashable by default
self.assertFalse(issubclass(C, B), B.__name__)
B.register(C)
self.assertTrue(issubclass(C, B))
class WithSet(MutableSet):
def __init__(self, it=()):
self.data = set(it)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __contains__(self, item):
return item in self.data
def add(self, item):
self.data.add(item)
def discard(self, item):
self.data.discard(item)
class TestCollectionABCs(ABCTestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.assertIsInstance(sample(), Set)
self.assertTrue(issubclass(sample, Set))
self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')
class MySet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
self.validate_comparison(MySet())
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.assertTrue(hash(a) == hash(b))
def test_MutableSet(self):
self.assertIsInstance(set(), MutableSet)
self.assertTrue(issubclass(set, MutableSet))
self.assertNotIsInstance(frozenset(), MutableSet)
self.assertFalse(issubclass(frozenset, MutableSet))
self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',
'add', 'discard')
def test_issue_5647(self):
# MutableSet.__iand__ mutated the set during iteration
s = WithSet('abcd')
s &= WithSet('cdef') # This used to fail
self.assertEqual(set(s), set('cd'))
def test_issue_4920(self):
# MutableSet.pop() method did not work
class MySet(MutableSet):
__slots__=['__s']
def __init__(self,items=None):
if items is None:
items=[]
self.__s=set(items)
def __contains__(self,v):
return v in self.__s
def __iter__(self):
return iter(self.__s)
def __len__(self):
return len(self.__s)
def add(self,v):
result=v not in self.__s
self.__s.add(v)
return result
def discard(self,v):
result=v in self.__s
self.__s.discard(v)
return result
def __repr__(self):
return "MySet(%s)" % repr(list(self))
s = MySet([5,43,2,1])
self.assertEqual(s.pop(), 1)
def test_issue8750(self):
empty = WithSet()
full = WithSet(range(10))
s = WithSet(full)
s -= s
self.assertEqual(s, empty)
s = WithSet(full)
s ^= s
self.assertEqual(s, empty)
s = WithSet(full)
s &= s
self.assertEqual(s, full)
s |= s
self.assertEqual(s, full)
def test_issue16373(self):
# Recursion error comparing comparable and noncomparable
# Set instances
class MyComparableSet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
class MyNonComparableSet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
def __le__(self, x):
return NotImplemented
def __lt__(self, x):
return NotImplemented
cs = MyComparableSet()
ncs = MyNonComparableSet()
self.assertFalse(ncs < cs)
self.assertTrue(ncs <= cs)
self.assertFalse(ncs > cs)
self.assertTrue(ncs >= cs)
def assertSameSet(self, s1, s2):
# coerce both to a real set then check equality
self.assertSetEqual(set(s1), set(s2))
def test_Set_interoperability_with_real_sets(self):
# Issue: 8743
class ListSet(Set):
def __init__(self, elements=()):
self.data = []
for elem in elements:
if elem not in self.data:
self.data.append(elem)
def __contains__(self, elem):
return elem in self.data
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
return 'Set({!r})'.format(self.data)
r1 = set('abc')
r2 = set('bcd')
r3 = set('abcde')
f1 = ListSet('abc')
f2 = ListSet('bcd')
f3 = ListSet('abcde')
l1 = list('abccba')
l2 = list('bcddcb')
l3 = list('abcdeedcba')
target = r1 & r2
self.assertSameSet(f1 & f2, target)
self.assertSameSet(f1 & r2, target)
self.assertSameSet(r2 & f1, target)
self.assertSameSet(f1 & l2, target)
target = r1 | r2
self.assertSameSet(f1 | f2, target)
self.assertSameSet(f1 | r2, target)
self.assertSameSet(r2 | f1, target)
self.assertSameSet(f1 | l2, target)
fwd_target = r1 - r2
rev_target = r2 - r1
self.assertSameSet(f1 - f2, fwd_target)
self.assertSameSet(f2 - f1, rev_target)
self.assertSameSet(f1 - r2, fwd_target)
self.assertSameSet(f2 - r1, rev_target)
self.assertSameSet(r1 - f2, fwd_target)
self.assertSameSet(r2 - f1, rev_target)
self.assertSameSet(f1 - l2, fwd_target)
self.assertSameSet(f2 - l1, rev_target)
target = r1 ^ r2
self.assertSameSet(f1 ^ f2, target)
self.assertSameSet(f1 ^ r2, target)
self.assertSameSet(r2 ^ f1, target)
self.assertSameSet(f1 ^ l2, target)
# Don't change the following to use assertLess or other
# "more specific" unittest assertions. The current
# assertTrue/assertFalse style makes the pattern of test
# case combinations clear and allows us to know for sure
# the exact operator being invoked.
# proper subset
self.assertTrue(f1 < f3)
self.assertFalse(f1 < f1)
self.assertFalse(f1 < f2)
self.assertTrue(r1 < f3)
self.assertFalse(r1 < f1)
self.assertFalse(r1 < f2)
self.assertTrue(r1 < r3)
self.assertFalse(r1 < r1)
self.assertFalse(r1 < r2)
with self.assertRaises(TypeError):
f1 < l3
with self.assertRaises(TypeError):
f1 < l1
with self.assertRaises(TypeError):
f1 < l2
# any subset
self.assertTrue(f1 <= f3)
self.assertTrue(f1 <= f1)
self.assertFalse(f1 <= f2)
self.assertTrue(r1 <= f3)
self.assertTrue(r1 <= f1)
self.assertFalse(r1 <= f2)
self.assertTrue(r1 <= r3)
self.assertTrue(r1 <= r1)
self.assertFalse(r1 <= r2)
with self.assertRaises(TypeError):
f1 <= l3
with self.assertRaises(TypeError):
f1 <= l1
with self.assertRaises(TypeError):
f1 <= l2
# proper superset
self.assertTrue(f3 > f1)
self.assertFalse(f1 > f1)
self.assertFalse(f2 > f1)
self.assertTrue(r3 > r1)
self.assertFalse(f1 > r1)
self.assertFalse(f2 > r1)
self.assertTrue(r3 > r1)
self.assertFalse(r1 > r1)
self.assertFalse(r2 > r1)
with self.assertRaises(TypeError):
f1 > l3
with self.assertRaises(TypeError):
f1 > l1
with self.assertRaises(TypeError):
f1 > l2
# any superset
self.assertTrue(f3 >= f1)
self.assertTrue(f1 >= f1)
self.assertFalse(f2 >= f1)
self.assertTrue(r3 >= r1)
self.assertTrue(f1 >= r1)
self.assertFalse(f2 >= r1)
self.assertTrue(r3 >= r1)
self.assertTrue(r1 >= r1)
self.assertFalse(r2 >= r1)
with self.assertRaises(TypeError):
f1 >= l3
with self.assertRaises(TypeError):
f1 >=l1
with self.assertRaises(TypeError):
f1 >= l2
# equality
self.assertTrue(f1 == f1)
self.assertTrue(r1 == f1)
self.assertTrue(f1 == r1)
self.assertFalse(f1 == f3)
self.assertFalse(r1 == f3)
self.assertFalse(f1 == r3)
self.assertFalse(f1 == l3)
self.assertFalse(f1 == l1)
self.assertFalse(f1 == l2)
# inequality
self.assertFalse(f1 != f1)
self.assertFalse(r1 != f1)
self.assertFalse(f1 != r1)
self.assertTrue(f1 != f3)
self.assertTrue(r1 != f3)
self.assertTrue(f1 != r3)
self.assertTrue(f1 != l3)
self.assertTrue(f1 != l1)
self.assertTrue(f1 != l2)
def test_Mapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), Mapping)
self.assertTrue(issubclass(sample, Mapping))
self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
'__getitem__')
class MyMapping(Mapping):
def __len__(self):
return 0
def __getitem__(self, i):
raise IndexError
def __iter__(self):
return iter(())
self.validate_comparison(MyMapping())
def test_MutableMapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), MutableMapping)
self.assertTrue(issubclass(sample, MutableMapping))
self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',
'__getitem__', '__setitem__', '__delitem__')
def test_MutableMapping_subclass(self):
# Test issue 9214
mymap = UserDict()
mymap['red'] = 5
self.assertIsInstance(mymap.keys(), Set)
self.assertIsInstance(mymap.keys(), KeysView)
self.assertIsInstance(mymap.items(), Set)
self.assertIsInstance(mymap.items(), ItemsView)
mymap = UserDict()
mymap['red'] = 5
z = mymap.keys() | {'orange'}
self.assertIsInstance(z, set)
list(z)
mymap['blue'] = 7 # Shouldn't affect 'z'
self.assertEqual(sorted(z), ['orange', 'red'])
mymap = UserDict()
mymap['red'] = 5
z = mymap.items() | {('orange', 3)}
self.assertIsInstance(z, set)
list(z)
mymap['blue'] = 7 # Shouldn't affect 'z'
self.assertEqual(sorted(z), [('orange', 3), ('red', 5)])
def test_Sequence(self):
for sample in [tuple, list, bytes, str]:
self.assertIsInstance(sample(), Sequence)
self.assertTrue(issubclass(sample, Sequence))
self.assertIsInstance(range(10), Sequence)
self.assertTrue(issubclass(range, Sequence))
self.assertIsInstance(memoryview(b""), Sequence)
self.assertTrue(issubclass(memoryview, Sequence))
self.assertTrue(issubclass(str, Sequence))
self.validate_abstract_methods(Sequence, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_ByteString(self):
for sample in [bytes, bytearray]:
self.assertIsInstance(sample(), ByteString)
self.assertTrue(issubclass(sample, ByteString))
for sample in [str, list, tuple]:
self.assertNotIsInstance(sample(), ByteString)
self.assertFalse(issubclass(sample, ByteString))
self.assertNotIsInstance(memoryview(b""), ByteString)
self.assertFalse(issubclass(memoryview, ByteString))
def test_MutableSequence(self):
for sample in [tuple, str, bytes]:
self.assertNotIsInstance(sample(), MutableSequence)
self.assertFalse(issubclass(sample, MutableSequence))
for sample in [list, bytearray]:
self.assertIsInstance(sample(), MutableSequence)
self.assertTrue(issubclass(sample, MutableSequence))
self.assertFalse(issubclass(str, MutableSequence))
self.validate_abstract_methods(MutableSequence, '__contains__', '__iter__',
'__len__', '__getitem__', '__setitem__', '__delitem__', 'insert')
def test_MutableSequence_mixins(self):
# Test the mixins of MutableSequence by creating a miminal concrete
# class inherited from it.
class MutableSequenceSubclass(MutableSequence):
def __init__(self):
self.lst = []
def __setitem__(self, index, value):
self.lst[index] = value
def __getitem__(self, index):
return self.lst[index]
def __len__(self):
return len(self.lst)
def __delitem__(self, index):
del self.lst[index]
def insert(self, index, value):
self.lst.insert(index, value)
mss = MutableSequenceSubclass()
mss.append(0)
mss.extend((1, 2, 3, 4))
self.assertEqual(len(mss), 5)
self.assertEqual(mss[3], 3)
mss.reverse()
self.assertEqual(mss[3], 1)
mss.pop()
self.assertEqual(len(mss), 4)
mss.remove(3)
self.assertEqual(len(mss), 3)
mss += (10, 20, 30)
self.assertEqual(len(mss), 6)
self.assertEqual(mss[-1], 30)
mss.clear()
self.assertEqual(len(mss), 0)
################################################################################
### Counter
################################################################################
class CounterSubclassWithSetItem(Counter):
# Test a counter subclass that overrides __setitem__
def __init__(self, *args, **kwds):
self.called = False
Counter.__init__(self, *args, **kwds)
def __setitem__(self, key, value):
self.called = True
Counter.__setitem__(self, key, value)
class CounterSubclassWithGet(Counter):
# Test a counter subclass that overrides get()
def __init__(self, *args, **kwds):
self.called = False
Counter.__init__(self, *args, **kwds)
def get(self, key, default):
self.called = True
return Counter.get(self, key, default)
class TestCounter(unittest.TestCase):
def test_basics(self):
c = Counter('abcaba')
self.assertEqual(c, Counter({'a':3 , 'b': 2, 'c': 1}))
self.assertEqual(c, Counter(a=3, b=2, c=1))
self.assertIsInstance(c, dict)
self.assertIsInstance(c, Mapping)
self.assertTrue(issubclass(Counter, dict))
self.assertTrue(issubclass(Counter, Mapping))
self.assertEqual(len(c), 3)
self.assertEqual(sum(c.values()), 6)
self.assertEqual(sorted(c.values()), [1, 2, 3])
self.assertEqual(sorted(c.keys()), ['a', 'b', 'c'])
self.assertEqual(sorted(c), ['a', 'b', 'c'])
self.assertEqual(sorted(c.items()),
[('a', 3), ('b', 2), ('c', 1)])
self.assertEqual(c['b'], 2)
self.assertEqual(c['z'], 0)
self.assertEqual(c.__contains__('c'), True)
self.assertEqual(c.__contains__('z'), False)
self.assertEqual(c.get('b', 10), 2)
self.assertEqual(c.get('z', 10), 10)
self.assertEqual(c, dict(a=3, b=2, c=1))
self.assertEqual(repr(c), "Counter({'a': 3, 'b': 2, 'c': 1})")
self.assertEqual(c.most_common(), [('a', 3), ('b', 2), ('c', 1)])
for i in range(5):
self.assertEqual(c.most_common(i),
[('a', 3), ('b', 2), ('c', 1)][:i])
self.assertEqual(''.join(sorted(c.elements())), 'aaabbc')
c['a'] += 1 # increment an existing value
c['b'] -= 2 # sub existing value to zero
del c['c'] # remove an entry
del c['c'] # make sure that del doesn't raise KeyError
c['d'] -= 2 # sub from a missing value
c['e'] = -5 # directly assign a missing value
c['f'] += 4 # add to a missing value
self.assertEqual(c, dict(a=4, b=0, d=-2, e=-5, f=4))
self.assertEqual(''.join(sorted(c.elements())), 'aaaaffff')
self.assertEqual(c.pop('f'), 4)
self.assertNotIn('f', c)
for i in range(3):
elem, cnt = c.popitem()
self.assertNotIn(elem, c)
c.clear()
self.assertEqual(c, {})
self.assertEqual(repr(c), 'Counter()')
self.assertRaises(NotImplementedError, Counter.fromkeys, 'abc')
self.assertRaises(TypeError, hash, c)
c.update(dict(a=5, b=3))
c.update(c=1)
c.update(Counter('a' * 50 + 'b' * 30))
c.update() # test case with no args
c.__init__('a' * 500 + 'b' * 300)
c.__init__('cdc')
c.__init__()
self.assertEqual(c, dict(a=555, b=333, c=3, d=1))
self.assertEqual(c.setdefault('d', 5), 1)
self.assertEqual(c['d'], 1)
self.assertEqual(c.setdefault('e', 5), 5)
self.assertEqual(c['e'], 5)
def test_init(self):
self.assertEqual(list(Counter(self=42).items()), [('self', 42)])
self.assertEqual(list(Counter(iterable=42).items()), [('iterable', 42)])
self.assertEqual(list(Counter(iterable=None).items()), [('iterable', None)])
self.assertRaises(TypeError, Counter, 42)
self.assertRaises(TypeError, Counter, (), ())
self.assertRaises(TypeError, Counter.__init__)
def test_update(self):
c = Counter()
c.update(self=42)
self.assertEqual(list(c.items()), [('self', 42)])
c = Counter()
c.update(iterable=42)
self.assertEqual(list(c.items()), [('iterable', 42)])
c = Counter()
c.update(iterable=None)
self.assertEqual(list(c.items()), [('iterable', None)])
self.assertRaises(TypeError, Counter().update, 42)
self.assertRaises(TypeError, Counter().update, {}, {})
self.assertRaises(TypeError, Counter.update)
def test_copying(self):
# Check that counters are copyable, deepcopyable, picklable, and
#have a repr/eval round-trip
words = Counter('which witch had which witches wrist watch'.split())
def check(dup):
msg = "\ncopy: %s\nwords: %s" % (dup, words)
self.assertIsNot(dup, words, msg)
self.assertEqual(dup, words)
check(words.copy())
check(copy.copy(words))
check(copy.deepcopy(words))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
check(pickle.loads(pickle.dumps(words, proto)))
check(eval(repr(words)))
update_test = Counter()
update_test.update(words)
check(update_test)
check(Counter(words))
def test_copy_subclass(self):
class MyCounter(Counter):
pass
c = MyCounter('slartibartfast')
d = c.copy()
self.assertEqual(d, c)
self.assertEqual(len(d), len(c))
self.assertEqual(type(d), type(c))
def test_conversions(self):
# Convert to: set, list, dict
s = 'she sells sea shells by the sea shore'
self.assertEqual(sorted(Counter(s).elements()), sorted(s))
self.assertEqual(sorted(Counter(s)), sorted(set(s)))
self.assertEqual(dict(Counter(s)), dict(Counter(s).items()))
self.assertEqual(set(Counter(s)), set(s))
def test_invariant_for_the_in_operator(self):
c = Counter(a=10, b=-2, c=0)
for elem in c:
self.assertTrue(elem in c)
self.assertIn(elem, c)
def test_multiset_operations(self):
# Verify that adding a zero counter will strip zeros and negatives
c = Counter(a=10, b=-2, c=0) + Counter()
self.assertEqual(dict(c), dict(a=10))
elements = 'abcd'
for i in range(1000):
# test random pairs of multisets
p = Counter(dict((elem, randrange(-2,4)) for elem in elements))
p.update(e=1, f=-1, g=0)
q = Counter(dict((elem, randrange(-2,4)) for elem in elements))
q.update(h=1, i=-1, j=0)
for counterop, numberop in [
(Counter.__add__, lambda x, y: max(0, x+y)),
(Counter.__sub__, lambda x, y: max(0, x-y)),
(Counter.__or__, lambda x, y: max(0,x,y)),
(Counter.__and__, lambda x, y: max(0, min(x,y))),
]:
result = counterop(p, q)
for x in elements:
self.assertEqual(numberop(p[x], q[x]), result[x],
(counterop, x, p, q))
# verify that results exclude non-positive counts
self.assertTrue(x>0 for x in result.values())
elements = 'abcdef'
for i in range(100):
# verify that random multisets with no repeats are exactly like sets
p = Counter(dict((elem, randrange(0, 2)) for elem in elements))
q = Counter(dict((elem, randrange(0, 2)) for elem in elements))
for counterop, setop in [
(Counter.__sub__, set.__sub__),
(Counter.__or__, set.__or__),
(Counter.__and__, set.__and__),
]:
counter_result = counterop(p, q)
set_result = setop(set(p.elements()), set(q.elements()))
self.assertEqual(counter_result, dict.fromkeys(set_result, 1))
def test_inplace_operations(self):
elements = 'abcd'
for i in range(1000):
# test random pairs of multisets
p = Counter(dict((elem, randrange(-2,4)) for elem in elements))
p.update(e=1, f=-1, g=0)
q = Counter(dict((elem, randrange(-2,4)) for elem in elements))
q.update(h=1, i=-1, j=0)
for inplace_op, regular_op in [
(Counter.__iadd__, Counter.__add__),
(Counter.__isub__, Counter.__sub__),
(Counter.__ior__, Counter.__or__),
(Counter.__iand__, Counter.__and__),
]:
c = p.copy()
c_id = id(c)
regular_result = regular_op(c, q)
inplace_result = inplace_op(c, q)
self.assertEqual(inplace_result, regular_result)
self.assertEqual(id(inplace_result), c_id)
def test_subtract(self):
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50)
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(Counter(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50))
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter('aaabbcd')
c.subtract('aaaabbcce')
self.assertEqual(c, Counter(a=-1, b=0, c=-1, d=1, e=-1))
c = Counter()
c.subtract(self=42)
self.assertEqual(list(c.items()), [('self', -42)])
c = Counter()
c.subtract(iterable=42)
self.assertEqual(list(c.items()), [('iterable', -42)])
self.assertRaises(TypeError, Counter().subtract, 42)
self.assertRaises(TypeError, Counter().subtract, {}, {})
self.assertRaises(TypeError, Counter.subtract)
def test_unary(self):
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
self.assertEqual(dict(+c), dict(c=5, d=10, e=15, g=40))
self.assertEqual(dict(-c), dict(a=5))
def test_repr_nonsortable(self):
c = Counter(a=2, b=None)
r = repr(c)
self.assertIn("'a': 2", r)
self.assertIn("'b': None", r)
def test_helper_function(self):
# two paths, one for real dicts and one for other mappings
elems = list('abracadabra')
d = dict()
_count_elements(d, elems)
self.assertEqual(d, {'a': 5, 'r': 2, 'b': 2, 'c': 1, 'd': 1})
m = OrderedDict()
_count_elements(m, elems)
self.assertEqual(m,
OrderedDict([('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)]))
# test fidelity to the pure python version
c = CounterSubclassWithSetItem('abracadabra')
self.assertTrue(c.called)
self.assertEqual(dict(c), {'a': 5, 'b': 2, 'c': 1, 'd': 1, 'r':2 })
c = CounterSubclassWithGet('abracadabra')
self.assertTrue(c.called)
self.assertEqual(dict(c), {'a': 5, 'b': 2, 'c': 1, 'd': 1, 'r':2 })
################################################################################
### OrderedDict
################################################################################
class TestOrderedDict(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input
self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs) # mixed input
# make sure no positional args conflict with possible kwdargs
self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)])
self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)])
self.assertRaises(TypeError, OrderedDict, 42)
self.assertRaises(TypeError, OrderedDict, (), ())
self.assertRaises(TypeError, OrderedDict.__init__)
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
with self.assertRaises(TypeError):
OrderedDict().update([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # dict input
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # kwds input
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs) # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs) # mixed input
# Issue 9137: Named argument called 'other' or 'self'
# shouldn't be treated specially.
od = OrderedDict()
od.update(self=23)
self.assertEqual(list(od.items()), [('self', 23)])
od = OrderedDict()
od.update(other={})
self.assertEqual(list(od.items()), [('other', {})])
od = OrderedDict()
od.update(red=5, blue=6, other=7, self=8)
self.assertEqual(sorted(list(od.items())),
[('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
self.assertRaises(TypeError, OrderedDict().update, 42)
self.assertRaises(TypeError, OrderedDict().update, (), ())
self.assertRaises(TypeError, OrderedDict.update)
def test_abc(self):
self.assertIsInstance(OrderedDict(), MutableMapping)
self.assertTrue(issubclass(OrderedDict, MutableMapping))
def test_clear(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assertNotIn('a', od)
with self.assertRaises(KeyError):
del od['a']
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(list(od.keys()), [t[0] for t in pairs])
self.assertEqual(list(od.values()), [t[1] for t in pairs])
self.assertEqual(list(od.items()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
def test_popitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
with self.assertRaises(KeyError):
od.popitem()
self.assertEqual(len(od), 0)
def test_pop(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
with self.assertRaises(KeyError):
od.pop('xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
# make sure pop still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
m = Missing(a=1)
self.assertEqual(m.pop('b', 5), 5)
self.assertEqual(m.pop('a', 6), 1)
self.assertEqual(m.pop('a', 6), 6)
with self.assertRaises(KeyError):
m.pop('a')
def test_equality(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2) # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2) # different order implies inequality
# comparison to regular dict is not order sensitive
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
# different length implied inequality
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
def check(dup):
msg = "\ncopy: %s\nod: %s" % (dup, od)
self.assertIsNot(dup, od, msg)
self.assertEqual(dup, od)
check(od.copy())
check(copy.copy(od))
check(copy.deepcopy(od))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
check(pickle.loads(pickle.dumps(od, proto)))
check(eval(repr(od)))
update_test = OrderedDict()
update_test.update(od)
check(update_test)
check(OrderedDict(od))
def test_yaml_linkage(self):
# Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
# In yaml, lists are native but tuples are not.
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n'
self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
def test_reduce_not_too_fat(self):
# do not save instance dictionary if not needed
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
self.assertIsNone(od.__reduce__()[2])
od.x = 10
self.assertIsNotNone(od.__reduce__()[2])
def test_pickle_recursive(self):
od = OrderedDict()
od[1] = od
for proto in range(-1, pickle.HIGHEST_PROTOCOL + 1):
dup = pickle.loads(pickle.dumps(od, proto))
self.assertIsNot(dup, od)
self.assertEqual(list(dup.keys()), [1])
self.assertIs(dup[1], dup)
def test_repr(self):
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_repr_recursive(self):
# See issue #9826
od = OrderedDict.fromkeys('abc')
od['x'] = od
self.assertEqual(repr(od),
"OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
def test_setdefault(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
# make sure order didn't change
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
# make sure setdefault still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
self.assertEqual(Missing().setdefault(5, 9), 9)
def test_reinsert(self):
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
def test_move_to_end(self):
od = OrderedDict.fromkeys('abcde')
self.assertEqual(list(od), list('abcde'))
od.move_to_end('c')
self.assertEqual(list(od), list('abdec'))
od.move_to_end('c', 0)
self.assertEqual(list(od), list('cabde'))
od.move_to_end('c', 0)
self.assertEqual(list(od), list('cabde'))
od.move_to_end('e')
self.assertEqual(list(od), list('cabde'))
with self.assertRaises(KeyError):
od.move_to_end('x')
def test_sizeof(self):
# Wimpy test: Just verify the reported size is larger than a regular dict
d = dict(a=1)
od = OrderedDict(**d)
self.assertGreater(sys.getsizeof(od), sys.getsizeof(d))
def test_override_update(self):
# Verify that subclasses can override update() without breaking __init__()
class MyOD(OrderedDict):
def update(self, *args, **kwds):
raise Exception()
items = [('a', 1), ('c', 3), ('b', 2)]
self.assertEqual(list(MyOD(items).items()), items)
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
class MyOrderedDict(OrderedDict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
################################################################################
### Run tests
################################################################################
import doctest, collections
def test_main(verbose=None):
NamedTupleDocs = doctest.DocTestSuite(module=collections)
test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs,
TestCollectionABCs, TestCounter, TestChainMap,
TestOrderedDict, GeneralMappingTests, SubclassMappingTests]
support.run_unittest(*test_classes)
support.run_doctest(collections, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-2.0 |
zeroc0d3/docker-lab | nodejs/rootfs/usr/lib/python2.7/dist-packages/powerline/lib/watcher/uv.py | 34 | 4854 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from collections import defaultdict
from threading import RLock
from functools import partial
from threading import Thread
from errno import ENOENT
from powerline.lib.path import realpath
from powerline.lib.encoding import get_preferred_file_name_encoding
class UvNotFound(NotImplementedError):
pass
pyuv = None
pyuv_version_info = None
def import_pyuv():
global pyuv
global pyuv_version_info
if not pyuv:
try:
pyuv = __import__('pyuv')
except ImportError:
raise UvNotFound
else:
pyuv_version_info = tuple((int(c) for c in pyuv.__version__.split('.')))
class UvThread(Thread):
daemon = True
def __init__(self, loop):
self.uv_loop = loop
self.async_handle = pyuv.Async(loop, self._async_cb)
super(UvThread, self).__init__()
def _async_cb(self, handle):
self.uv_loop.stop()
self.async_handle.close()
def run(self):
self.uv_loop.run()
def join(self):
self.async_handle.send()
return super(UvThread, self).join()
_uv_thread = None
def start_uv_thread():
global _uv_thread
if _uv_thread is None:
loop = pyuv.Loop()
_uv_thread = UvThread(loop)
_uv_thread.start()
return _uv_thread.uv_loop
def normpath(path, fenc):
path = realpath(path)
if isinstance(path, bytes):
return path.decode(fenc)
else:
return path
class UvWatcher(object):
def __init__(self):
import_pyuv()
self.watches = {}
self.lock = RLock()
self.loop = start_uv_thread()
self.fenc = get_preferred_file_name_encoding()
if pyuv_version_info >= (1, 0):
self._start_watch = self._start_watch_1_x
else:
self._start_watch = self._start_watch_0_x
def _start_watch_1_x(self, path):
handle = pyuv.fs.FSEvent(self.loop)
handle.start(path, 0, partial(self._record_event, path))
self.watches[path] = handle
def _start_watch_0_x(self, path):
self.watches[path] = pyuv.fs.FSEvent(
self.loop,
path,
partial(self._record_event, path),
pyuv.fs.UV_CHANGE | pyuv.fs.UV_RENAME
)
def watch(self, path):
path = normpath(path, self.fenc)
with self.lock:
if path not in self.watches:
try:
self._start_watch(path)
except pyuv.error.FSEventError as e:
code = e.args[0]
if code == pyuv.errno.UV_ENOENT:
raise OSError(ENOENT, 'No such file or directory: ' + path)
else:
raise
def unwatch(self, path):
path = normpath(path, self.fenc)
with self.lock:
try:
watch = self.watches.pop(path)
except KeyError:
return
watch.close(partial(self._stopped_watching, path))
def is_watching(self, path):
with self.lock:
return normpath(path, self.fenc) in self.watches
def __del__(self):
try:
lock = self.lock
except AttributeError:
pass
else:
with lock:
while self.watches:
path, watch = self.watches.popitem()
watch.close(partial(self._stopped_watching, path))
class UvFileWatcher(UvWatcher):
def __init__(self):
super(UvFileWatcher, self).__init__()
self.events = defaultdict(list)
def _record_event(self, path, fsevent_handle, filename, events, error):
with self.lock:
self.events[path].append(events)
if events | pyuv.fs.UV_RENAME:
if not os.path.exists(path):
self.watches.pop(path).close()
def _stopped_watching(self, path, *args):
self.events.pop(path, None)
def __call__(self, path):
path = normpath(path, self.fenc)
with self.lock:
events = self.events.pop(path, None)
if events:
return True
if path not in self.watches:
self.watch(path)
return True
return False
class UvTreeWatcher(UvWatcher):
is_dummy = False
def __init__(self, basedir, ignore_event=None):
super(UvTreeWatcher, self).__init__()
self.ignore_event = ignore_event or (lambda path, name: False)
self.basedir = normpath(basedir, self.fenc)
self.modified = True
self.watch_directory(self.basedir)
def watch_directory(self, path):
for root, dirs, files in os.walk(normpath(path, self.fenc)):
self.watch_one_directory(root)
def watch_one_directory(self, dirname):
try:
self.watch(dirname)
except OSError:
pass
def _stopped_watching(self, path, *args):
pass
def _record_event(self, path, fsevent_handle, filename, events, error):
if not self.ignore_event(path, filename):
self.modified = True
if events == pyuv.fs.UV_CHANGE | pyuv.fs.UV_RENAME:
# Stat changes to watched directory are UV_CHANGE|UV_RENAME. It
# is weird.
pass
elif events | pyuv.fs.UV_RENAME:
if not os.path.isdir(path):
self.unwatch(path)
else:
full_name = os.path.join(path, filename)
if os.path.isdir(full_name):
# For some reason mkdir and rmdir both fall into this
# category
self.watch_directory(full_name)
def __call__(self):
return self.__dict__.pop('modified', False)
| mit |
popazerty/e2_sh4 | lib/python/Components/ParentalControlList.py | 37 | 2159 | from MenuList import MenuList
from Components.ParentalControl import IMG_WHITESERVICE, IMG_WHITEBOUQUET, IMG_BLACKSERVICE, IMG_BLACKBOUQUET
from Tools.Directories import SCOPE_ACTIVE_SKIN, resolveFilename
from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_LEFT
from Tools.LoadPixmap import LoadPixmap
#Now there is a list of pictures instead of one...
entryPicture = {IMG_BLACKSERVICE: LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock.png")),
IMG_BLACKBOUQUET: LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lockBouquet.png")),
IMG_WHITESERVICE: LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/unlock.png")),
IMG_WHITEBOUQUET: LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/unlockBouquet.png"))}
def ParentalControlEntryComponent(service, name, protectionType):
locked = protectionType[0]
sImage = protectionType[1]
res = [
(service, name, locked),
(eListboxPythonMultiContent.TYPE_TEXT, 80, 5, 300, 50, 0, RT_HALIGN_LEFT, name)
]
#Changed logic: The image is defined by sImage, not by locked anymore
if sImage != "":
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, 0, 0, 32, 32, entryPicture[sImage]))
return res
class ParentalControlList(MenuList):
def __init__(self, list, enableWrapAround = False):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", 20))
self.l.setItemHeight(32)
def toggleSelectedLock(self):
from Components.ParentalControl import parentalControl
print "self.l.getCurrentSelection():", self.l.getCurrentSelection()
print "self.l.getCurrentSelectionIndex():", self.l.getCurrentSelectionIndex()
curSel = self.l.getCurrentSelection()
if curSel[0][2]:
parentalControl.unProtectService(self.l.getCurrentSelection()[0][0])
else:
parentalControl.protectService(self.l.getCurrentSelection()[0][0])
#Instead of just negating the locked- flag, now I call the getProtectionType every time...
self.list[self.l.getCurrentSelectionIndex()] = ParentalControlEntryComponent(curSel[0][0], curSel[0][1], parentalControl.getProtectionType(curSel[0][0]))
self.l.setList(self.list)
| gpl-2.0 |
its-lab/MoniTutor-Tunnel | start_resultwriter.py | 1 | 2246 | import argparse
import logging
import signal
import time
from server.icinga2_resultwriter import IcingaResultWriter as ResultWriter
import sys
import os
from utils import daemonize
from utils import configure_logging
from utils import get_logger
parser = argparse.ArgumentParser(description="MoniTunnel server")
parser.add_argument("-a", "--rabbit-mq-host", default="localhost", help="Address of the rabbit-mq server")
parser.add_argument("-v", "--verbose", action="count", help="Increase verbosity. -vvvvv == DEBUG")
parser.add_argument("-l", "--logging", action="store_true", help="Write messages to syslog instead of stdout. Increase verbosity of logs with -v")
parser.add_argument("-t", "--task-exchange", default="task_exchange", help="Name of the task exchange")
parser.add_argument("-r", "--result-exchange", default="result_exchange", help="Name of the result exchange")
parser.add_argument("-d", "--daemonize", action="store_true", help="Start as daemon")
parser.add_argument("-i", "--icinga-path", default="/var/run/icinga2/cmd/icinga2.cmd", help="Absolut path to icinga2.cmd file")
config = vars(parser.parse_args())
result_writer = ResultWriter(config["rabbit_mq_host"],
config["result_exchange"],
config["task_exchange"],
config["icinga_path"])
logger = get_logger(config["verbose"])
configure_logging(logger, config["logging"])
def signal_handler(signum, frame):
logging.warn("SIGNAL " + str(signum) + " received! Frame: " + str(frame))
logging.debug("Stop ResultWriter thread")
result_writer.stop()
logging.debug("Wait for ResultWriter thread to join")
result_writer.join()
logging.debug("ResultWriter thread joined")
if config["daemonize"]:
os.remove("/var/run/monitunnel.pid")
sys.exit(0)
if "__main__" == __name__:
if config["daemonize"]:
daemonize()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGALRM, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
logging.debug("Start Icinga ResultWriter Thread")
result_writer.start()
run = True
while run:
time.sleep(1)
| gpl-3.0 |
jamesblunt/edx-platform | lms/djangoapps/edxnotes/views.py | 72 | 3809 | """
Views related to EdxNotes.
"""
import json
import logging
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.conf import settings
from django.core.urlresolvers import reverse
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from util.json_request import JsonResponse, JsonResponseBadRequest
from edxnotes.exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
from edxnotes.helpers import (
get_notes,
get_id_token,
is_feature_enabled,
search,
get_course_position,
)
log = logging.getLogger(__name__)
@login_required
def edxnotes(request, course_id):
"""
Displays the EdxNotes page.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
try:
notes = get_notes(request.user, course)
except EdxNotesServiceUnavailable:
raise Http404
context = {
"course": course,
"search_endpoint": reverse("search_notes", kwargs={"course_id": course_id}),
"notes": notes,
"debug": json.dumps(settings.DEBUG),
'position': None,
}
if not notes:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2
)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
position = get_course_position(course_module)
if position:
context.update({
'position': position,
})
return render_to_response("edxnotes/edxnotes.html", context)
@login_required
def search_notes(request, course_id):
"""
Handles search requests.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if "text" not in request.GET:
return HttpResponseBadRequest()
query_string = request.GET["text"]
try:
search_results = search(request.user, course, query_string)
except (EdxNotesParseError, EdxNotesServiceUnavailable) as err:
return JsonResponseBadRequest({"error": err.message}, status=500)
return HttpResponse(search_results)
# pylint: disable=unused-argument
@login_required
def get_token(request, course_id):
"""
Get JWT ID-Token, in case you need new one.
"""
return HttpResponse(get_id_token(request.user), content_type='text/plain')
@login_required
def edxnotes_visibility(request, course_id):
"""
Handle ajax call from "Show notes" checkbox.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
field_data_cache = FieldDataCache([course], course_key, request.user)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
if not is_feature_enabled(course):
raise Http404
try:
visibility = json.loads(request.body)["visibility"]
course_module.edxnotes_visibility = visibility
course_module.save()
return JsonResponse(status=200)
except (ValueError, KeyError):
log.warning(
"Could not decode request body as JSON and find a boolean visibility field: '%s'", request.body
)
return JsonResponseBadRequest()
| agpl-3.0 |
edcast-inc/edx-platform-edcast | lms/djangoapps/course_wiki/utils.py | 204 | 3623 | """
Utility functions for course_wiki.
"""
from django.core.exceptions import ObjectDoesNotExist
from xmodule import modulestore
import courseware
def user_is_article_course_staff(user, article):
"""
The root of a course wiki is /<course_number>. This means in case there
are two courses which have the same course_number they will end up with
the same course wiki root e.g. MITx/Phy101/Spring and HarvardX/Phy101/Fall
will share /Phy101.
This looks at the course wiki root of the article and returns True if
the user belongs to a group whose name starts with 'instructor_' or
'staff_' and contains '/<course_wiki_root_slug>/'. So if the user is
staff on course MITx/Phy101/Spring they will be in
'instructor_MITx/Phy101/Spring' or 'staff_MITx/Phy101/Spring' groups and
so this will return True.
"""
wiki_slug = article_course_wiki_root_slug(article)
if wiki_slug is None:
return False
modstore = modulestore.django.modulestore()
return _has_wiki_staff_access(user, wiki_slug, modstore)
def _has_wiki_staff_access(user, wiki_slug, modstore):
"""Returns whether the user has staff access to the wiki represented by wiki_slug"""
course_keys = modstore.get_courses_for_wiki(wiki_slug)
# The wiki expects article slugs to contain at least one non-digit so if
# the course number is just a number the course wiki root slug is set to
# be '<course_number>_'. This means slug '202_' can belong to either
# course numbered '202_' or '202' and so we need to consider both.
if wiki_slug.endswith('_') and slug_is_numerical(wiki_slug[:-1]):
course_keys.extend(modstore.get_courses_for_wiki(wiki_slug[:-1]))
for course_key in course_keys:
course = modstore.get_course(course_key)
if courseware.access.has_access(user, 'staff', course, course_key):
return True
return False
def slug_is_numerical(slug):
"""Returns whether the slug can be interpreted as a number."""
try:
float(slug)
except ValueError:
return False
return True
def course_wiki_slug(course):
"""Returns the slug for the course wiki root."""
slug = course.wiki_slug
# Django-wiki expects article slug to be non-numerical. In case the
# course number is numerical append an underscore.
if slug_is_numerical(slug):
slug = slug + "_"
return slug
def article_course_wiki_root_slug(article):
"""
We assume the second level ancestor is the course wiki root. Examples:
/ returns None
/Phy101 returns 'Phy101'
/Phy101/Mechanics returns 'Phy101'
/Chem101/Metals/Iron returns 'Chem101'
Note that someone can create an article /random-article/sub-article on the
wiki. In this case this function will return 'some-random-article' even
if no course with course number 'some-random-article' exists.
"""
try:
urlpath = article.urlpath_set.get()
except ObjectDoesNotExist:
return None
# Ancestors of /Phy101/Mechanics/Acceleration/ is a list of URLPaths
# ['Root', 'Phy101', 'Mechanics']
ancestors = urlpath.cached_ancestors
course_wiki_root_urlpath = None
if len(ancestors) == 0: # It is the wiki root article.
course_wiki_root_urlpath = None
elif len(ancestors) == 1: # It is a course wiki root article.
course_wiki_root_urlpath = urlpath
else: # It is an article inside a course wiki.
course_wiki_root_urlpath = ancestors[1]
if course_wiki_root_urlpath is not None:
return course_wiki_root_urlpath.slug
return None
| agpl-3.0 |
neilalbrock/xhtml2pdf | ez_setup.py | 61 | 9429 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| apache-2.0 |
endlessm/chromium-browser | tools/cr/cr/targets/target.py | 10 | 4752 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to hold the Target plugin."""
from __future__ import print_function
import operator
import re
import cr
import cr.base.context
DEFAULT = cr.Config.From(
CR_DEFAULT_TARGET='chrome',
)
class Target(cr.base.context.Context, cr.AutoExport):
"""Base class for implementing cr targets.
A target is something that can be built and run.
"""
# The default base priority
PRIORITY = 0
# The default pattern used to try to detect whether a target is a test and
# should use the test runner.
TEST_PATTERN = re.compile('tests?$')
# The special "test type" that means it's not a test.
NOT_A_TEST = 'no'
# The default choice for the type of test when it can't be determined.
NORMAL_TEST = 'gtest'
INSTRUMENTATION_TEST = 'instrumentation'
# TODO(iancottrell): support the other test types
TEST_TYPES = [NOT_A_TEST, NORMAL_TEST, INSTRUMENTATION_TEST]
def __init__(self, target_name):
super(Target, self).__init__(target_name)
test_type = None
if self.TEST_PATTERN.search(target_name):
test_type = self.NORMAL_TEST
config = cr.Config('DEFAULTS').From(
CR_TARGET=target_name,
CR_TARGET_NAME='{CR_TARGET}',
CR_BUILD_TARGET=cr.Config.Optional(
'{CR_TARGET}{CR_TARGET_SUFFIX}', '{CR_TARGET}'),
CR_RUN_ARGUMENTS='',
CR_TEST_TYPE=test_type,
CR_RUN_DEPENDENCIES=[],
)
self._data = cr.context.data
self.AddChildren(config, cr.context)
if hasattr(self, 'CONFIG'):
self.AddChild(self.CONFIG)
if not self.valid:
self.Set(CR_TARGET_SUFFIX='')
self.test_type = self.Find('CR_TEST_TYPE')
self.target_name = self.Find('CR_TARGET_NAME')
def GetRunDependencies(self):
return map(Target.CreateTarget, self.Get('CR_RUN_DEPENDENCIES'))
@property
def build_target(self):
return self.Get('CR_BUILD_TARGET')
@property
def valid(self):
return cr.Builder.IsTarget(self.build_target)
@property
def is_test(self):
return self.test_type and self.test_type != self.NOT_A_TEST
@classmethod
def AddArguments(cls, command, parser, allow_multiple=False):
nargs = '?'
help_string = 'The target to {0}'
if allow_multiple:
nargs = '*'
help_string = 'The target(s) to {0}'
parser.add_argument(
'_targets', metavar='target',
help=help_string.format(command.name),
nargs=nargs
)
@classmethod
def AllTargets(cls):
yield cls
for child in cls.__subclasses__():
for t in child.AllTargets():
yield t
@classmethod
def CreateTarget(cls, target_name):
"""Attempts to build a target by name.
This searches the set of installed targets in priority order to see if any
of them are willing to handle the supplied name.
If a target cannot be found, the program will be aborted.
Args:
target_name: The name of the target we are searching for.
Returns:
The target that matched.
"""
target_clses = sorted(
cls.AllTargets(),
key=operator.attrgetter('PRIORITY'),
reverse=True
)
for handler in target_clses:
target = handler.Build(target_name)
if target:
if not target.valid:
print('Invalid target {0} as {1}'.format(target_name,
target.build_target))
guesses = cr.Builder.GuessTargets(target_name)
if guesses:
print('Did you mean {0}?'
.format(', '.join(guesses[:-1]) + ' or ' +
guesses[-1] if len(guesses) > 1 else guesses[0]))
exit(1)
return target
print('Unknown target {0}'.format(target_name))
exit(1)
@classmethod
def GetTargets(cls):
target_names = getattr(cr.context.args, '_targets', None)
if not target_names:
target_names = [cr.context.Get('CR_DEFAULT_TARGET')]
elif hasattr(target_names, 'swapcase'):
# deal with the single target case
target_names = [target_names]
return [cls.CreateTarget(target_name)
for target_name in target_names]
@classmethod
def Build(cls, target_name):
return cls(target_name)
class NamedTarget(Target):
"""A base class for explicit named targets.
Only matches a target if the name is an exact match.
Up it's priority to come ahead of general purpose rule matches.
"""
NAME = None
PRIORITY = Target.PRIORITY + 1
@classmethod
def Build(cls, target_name):
try:
if target_name == cls.NAME:
return cls(target_name)
except AttributeError:
pass
return None
| bsd-3-clause |
RobinQuetin/CAIRIS-web | cairis/cairis/RiskScatterDialog.py | 1 | 1238 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from RiskScatterPanel import RiskScatterPanel
class RiskScatterDialog(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,-1,'View Risk Scatter',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(600,525))
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = RiskScatterPanel(self)
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
| apache-2.0 |
YongseopKim/crosswalk-test-suite | webapi/tct-testconfig/inst.xpk.py | 357 | 6759 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
JeyZeta/Dangerous | Dangerous/Golismero/thirdparty_libs/nltk/sem/boxer.py | 12 | 46491 | # Natural Language Toolkit: Interface to Boxer
# <http://svn.ask.it.usyd.edu.au/trac/candc/wiki/boxer>
#
# Author: Dan Garrette <dhgarrette@gmail.com>
#
# Copyright (C) 2001-2012 NLTK Project
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
An interface to Boxer.
Usage:
Set the environment variable CANDCHOME to the bin directory of your CandC installation.
The models directory should be in the CandC root directory.
For example:
/path/to/candc/
bin/
candc
boxer
models/
boxer/
"""
import os
import re
import operator
import subprocess
from optparse import OptionParser
import tempfile
from nltk.internals import Counter, find_binary
from nltk.sem.logic import (ExpectedMoreTokensException, ParseException,
UnexpectedTokenException, Variable)
from nltk.sem.drt import (DRS, DrtApplicationExpression, DrtEqualityExpression,
DrtNegatedExpression, DrtOrExpression, DrtParser,
DrtProposition, DrtTokens, DrtVariableExpression)
class Boxer(object):
"""
This class is an interface to Johan Bos's program Boxer, a wide-coverage
semantic parser that produces Discourse Representation Structures (DRSs).
"""
def __init__(self, boxer_drs_interpreter=None, elimeq=False, bin_dir=None, verbose=False):
"""
:param boxer_drs_interpreter: A class that converts from the
``AbstractBoxerDrs`` object hierarchy to a different object. The
default is ``NltkDrtBoxerDrsInterpreter``, which converts to the NLTK
DRT hierarchy.
:param elimeq: When set to true, Boxer removes all equalities from the
DRSs and discourse referents standing in the equality relation are
unified, but only if this can be done in a meaning-preserving manner.
"""
if boxer_drs_interpreter is None:
boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter()
self._boxer_drs_interpreter = boxer_drs_interpreter
self._elimeq = elimeq
self.set_bin_dir(bin_dir, verbose)
def set_bin_dir(self, bin_dir, verbose=False):
self._candc_bin = self._find_binary('candc', bin_dir, verbose)
self._candc_models_path = os.path.normpath(os.path.join(self._candc_bin[:-5], '../models'))
self._boxer_bin = self._find_binary('boxer', bin_dir, verbose)
def interpret(self, input, discourse_id=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param input: str Input sentence to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate.
:return: ``drt.AbstractDrs``
"""
if discourse_id is not None:
discourse_ids = [discourse_id]
else:
discourse_ids = None
d, = self.batch_interpret_multisentence([[input]], discourse_ids, question, verbose)
if not d:
raise Exception('Unable to interpret: "%s"' % input)
return d
def interpret_multisentence(self, input, discourse_id=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param input: list of str Input sentences to parse as a single discourse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate.
:return: ``drt.AbstractDrs``
"""
if discourse_id is not None:
discourse_ids = [discourse_id]
else:
discourse_ids = None
d, = self.batch_interpret_multisentence([input], discourse_ids, question, verbose)
if not d:
raise Exception('Unable to interpret: "%s"' % input)
return d
def batch_interpret(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of str Input sentences to parse as individual discourses
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: list of ``drt.AbstractDrs``
"""
return self.batch_interpret_multisentence([[input] for input in inputs], discourse_ids, question, verbose)
def batch_interpret_multisentence(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of list of str Input discourses to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: ``drt.AbstractDrs``
"""
if discourse_ids is not None:
assert len(inputs) == len(discourse_ids)
assert reduce(operator.and_, (id is not None for id in discourse_ids))
use_disc_id = True
else:
discourse_ids = map(str, xrange(len(inputs)))
use_disc_id = False
candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
boxer_out = self._call_boxer(candc_out, verbose=verbose)
# if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
# raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
return [drs_dict.get(id, None) for id in discourse_ids]
def _call_candc(self, inputs, discourse_ids, question, verbose=False):
"""
Call the ``candc`` binary with the given input.
:param inputs: list of list of str Input discourses to parse
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:param filename: str A filename for the output file
:return: stdout
"""
args = ['--models', os.path.join(self._candc_models_path, ['boxer','questions'][question]),
'--candc-printer', 'boxer']
return self._call('\n'.join(sum((["<META>'%s'" % id] + d for d,id in zip(inputs,discourse_ids)), [])), self._candc_bin, args, verbose)
def _call_boxer(self, candc_out, verbose=False):
"""
Call the ``boxer`` binary with the given input.
:param candc_out: str output from C&C parser
:return: stdout
"""
f = None
try:
fd, temp_filename = tempfile.mkstemp(prefix='boxer-', suffix='.in', text=True)
f = os.fdopen(fd, 'w')
f.write(candc_out)
finally:
if f: f.close()
args = ['--box', 'false',
'--semantics', 'drs',
'--flat', 'false',
'--resolve', 'true',
'--elimeq', ['false','true'][self._elimeq],
'--format', 'prolog',
'--instantiate', 'true',
'--input', temp_filename]
stdout = self._call(None, self._boxer_bin, args, verbose)
os.remove(temp_filename)
return stdout
def _find_binary(self, name, bin_dir, verbose=False):
return find_binary(name,
path_to_bin=bin_dir,
env_vars=['CANDCHOME'],
url='http://svn.ask.it.usyd.edu.au/trac/candc/',
binary_names=[name, name + '.exe'],
verbose=verbose)
def _call(self, input_str, binary, args=[], verbose=False):
"""
Call the binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param binary: The location of the binary to call
:param args: A list of command-line arguments.
:return: stdout
"""
if verbose:
print 'Calling:', binary
print 'Args:', args
print 'Input:', input_str
print 'Command:', binary + ' ' + ' '.join(args)
# Call via a subprocess
if input_str is None:
cmd = [binary] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
cmd = 'echo "%s" | %s %s' % (input_str, binary, ' '.join(args))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = p.communicate()
if verbose:
print 'Return code:', p.returncode
if stdout: print 'stdout:\n', stdout, '\n'
if stderr: print 'stderr:\n', stderr, '\n'
if p.returncode != 0:
raise Exception('ERROR CALLING: %s %s\nReturncode: %d\n%s' % (binary, ' '.join(args), p.returncode, stderr))
return stdout
def _parse_to_drs_dict(self, boxer_out, use_disc_id):
lines = boxer_out.split('\n')
drs_dict = {}
i = 0
while i < len(lines):
line = lines[i]
if line.startswith('id('):
comma_idx = line.index(',')
discourse_id = line[3:comma_idx]
if discourse_id[0] == "'" and discourse_id[-1] == "'":
discourse_id = discourse_id[1:-1]
drs_id = line[comma_idx+1:line.index(')')]
i += 1
line = lines[i]
assert line.startswith('sem(%s,' % drs_id)
i += 4
line = lines[i]
assert line.endswith(').')
drs_input = line[:-2].strip()
parsed = self._parse_drs(drs_input, discourse_id, use_disc_id)
drs_dict[discourse_id] = self._boxer_drs_interpreter.interpret(parsed)
i += 1
return drs_dict
def _parse_drs(self, drs_string, discourse_id, use_disc_id):
return BoxerOutputDrsParser([None,discourse_id][use_disc_id]).parse(drs_string)
class BoxerOutputDrsParser(DrtParser):
def __init__(self, discourse_id=None):
"""
This class is used to parse the Prolog DRS output from Boxer into a
hierarchy of python objects.
"""
DrtParser.__init__(self)
self.discourse_id = discourse_id
self.sentence_id_offset = None
self.quote_chars = [("'", "'", "\\", False)]
self._label_counter = None
def parse(self, data, signature=None):
self._label_counter = Counter(-1)
return DrtParser.parse(self, data, signature)
def get_all_symbols(self):
return ['(', ')', ',', '[', ']',':']
def handle(self, tok, context):
return self.handle_drs(tok)
def attempt_adjuncts(self, expression, context):
return expression
def parse_condition(self, indices):
"""
Parse a DRS condition
:return: list of ``AbstractDrs``
"""
tok = self.token()
accum = self.handle_condition(tok, indices)
if accum is None:
raise UnexpectedTokenException(tok)
return accum
def handle_drs(self, tok):
if tok == 'drs':
return self.parse_drs()
elif tok in ['merge', 'smerge']:
return self._handle_binary_expression(self._make_merge_expression)(None, [])
def handle_condition(self, tok, indices):
"""
Handle a DRS condition
:param indices: list of int
:return: list of ``AbstractDrs``
"""
if tok == 'not':
return [self._handle_not()]
if tok == 'or':
conds = [self._handle_binary_expression(self._make_or_expression)]
elif tok == 'imp':
conds = [self._handle_binary_expression(self._make_imp_expression)]
elif tok == 'eq':
conds = [self._handle_eq()]
elif tok == 'prop':
conds = [self._handle_prop()]
elif tok == 'pred':
conds = [self._handle_pred()]
elif tok == 'named':
conds = [self._handle_named()]
elif tok == 'rel':
conds = [self._handle_rel()]
elif tok == 'timex':
conds = self._handle_timex()
elif tok == 'card':
conds = [self._handle_card()]
elif tok == 'whq':
conds = [self._handle_whq()]
else:
conds = []
return sum([[cond(sent_index, word_indices) for cond in conds] for sent_index, word_indices in self._sent_and_word_indices(indices)], [])
def _handle_not(self):
self.assertToken(self.token(), '(')
drs = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return BoxerNot(drs)
def _handle_pred(self):
#pred(_G3943, dog, n, 0)
self.assertToken(self.token(), '(')
variable = self.parse_variable()
self.assertToken(self.token(), ',')
name = self.token()
self.assertToken(self.token(), ',')
pos = self.token()
self.assertToken(self.token(), ',')
sense = int(self.token())
self.assertToken(self.token(), ')')
def _handle_pred_f(sent_index, word_indices):
return BoxerPred(self.discourse_id, sent_index, word_indices, variable, name, pos, sense)
return _handle_pred_f
def _handle_named(self):
#named(x0, john, per, 0)
self.assertToken(self.token(), '(')
variable = self.parse_variable()
self.assertToken(self.token(), ',')
name = self.token()
self.assertToken(self.token(), ',')
type = self.token()
self.assertToken(self.token(), ',')
sense = int(self.token())
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerNamed(self.discourse_id, sent_index, word_indices, variable, name, type, sense)
def _handle_rel(self):
#rel(_G3993, _G3943, agent, 0)
self.assertToken(self.token(), '(')
var1 = self.parse_variable()
self.assertToken(self.token(), ',')
var2 = self.parse_variable()
self.assertToken(self.token(), ',')
rel = self.token()
self.assertToken(self.token(), ',')
sense = int(self.token())
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerRel(self.discourse_id, sent_index, word_indices, var1, var2, rel, sense)
def _handle_timex(self):
#timex(_G18322, date([]: (+), []:'XXXX', [1004]:'04', []:'XX'))
self.assertToken(self.token(), '(')
arg = self.parse_variable()
self.assertToken(self.token(), ',')
new_conds = self._handle_time_expression(arg)
self.assertToken(self.token(), ')')
return new_conds
def _handle_time_expression(self, arg):
#date([]: (+), []:'XXXX', [1004]:'04', []:'XX')
tok = self.token()
self.assertToken(self.token(), '(')
if tok == 'date':
conds = self._handle_date(arg)
elif tok == 'time':
conds = self._handle_time(arg)
else:
return None
self.assertToken(self.token(), ')')
return [lambda sent_index, word_indices: BoxerPred(self.discourse_id, sent_index, word_indices, arg, tok, 'n', 0)] + \
[lambda sent_index, word_indices: cond for cond in conds]
def _handle_date(self, arg):
#[]: (+), []:'XXXX', [1004]:'04', []:'XX'
conds = []
(sent_index, word_indices), = self._sent_and_word_indices(self._parse_index_list())
self.assertToken(self.token(), '(')
pol = self.token()
self.assertToken(self.token(), ')')
conds.append(BoxerPred(self.discourse_id, sent_index, word_indices, arg, 'date_pol_%s' % (pol), 'a', 0))
self.assertToken(self.token(), ',')
(sent_index, word_indices), = self._sent_and_word_indices(self._parse_index_list())
year = self.token()
if year != 'XXXX':
year = year.replace(':', '_')
conds.append(BoxerPred(self.discourse_id, sent_index, word_indices, arg, 'date_year_%s' % (year), 'a', 0))
self.assertToken(self.token(), ',')
(sent_index, word_indices), = self._sent_and_word_indices(self._parse_index_list())
month = self.token()
if month != 'XX':
conds.append(BoxerPred(self.discourse_id, sent_index, word_indices, arg, 'date_month_%s' % (month), 'a', 0))
self.assertToken(self.token(), ',')
(sent_index, word_indices), = self._sent_and_word_indices(self._parse_index_list())
day = self.token()
if day != 'XX':
conds.append(BoxerPred(self.discourse_id, sent_index, word_indices, arg, 'date_day_%s' % (day), 'a', 0))
return conds
def _handle_time(self, arg):
#time([1018]:'18', []:'XX', []:'XX')
conds = []
self._parse_index_list()
hour = self.token()
if hour != 'XX':
conds.append(self._make_atom('r_hour_2',arg,hour))
self.assertToken(self.token(), ',')
self._parse_index_list()
min = self.token()
if min != 'XX':
conds.append(self._make_atom('r_min_2',arg,min))
self.assertToken(self.token(), ',')
self._parse_index_list()
sec = self.token()
if sec != 'XX':
conds.append(self._make_atom('r_sec_2',arg,sec))
return conds
def _handle_card(self):
#card(_G18535, 28, ge)
self.assertToken(self.token(), '(')
variable = self.parse_variable()
self.assertToken(self.token(), ',')
value = self.token()
self.assertToken(self.token(), ',')
type = self.token()
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerCard(self.discourse_id, sent_index, word_indices, variable, value, type)
def _handle_prop(self):
#prop(_G15949, drs(...))
self.assertToken(self.token(), '(')
variable = self.parse_variable()
self.assertToken(self.token(), ',')
drs = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerProp(self.discourse_id, sent_index, word_indices, variable, drs)
def _parse_index_list(self):
#[1001,1002]:
indices = []
self.assertToken(self.token(), '[')
while self.token(0) != ']':
indices.append(self.parse_index())
if self.token(0) == ',':
self.token() #swallow ','
self.token() #swallow ']'
self.assertToken(self.token(), ':')
return indices
def parse_drs(self):
#drs([[1001]:_G3943],
# [[1002]:pred(_G3943, dog, n, 0)]
# )
label = self._label_counter.get()
self.assertToken(self.token(), '(')
self.assertToken(self.token(), '[')
refs = set()
while self.token(0) != ']':
indices = self._parse_index_list()
refs.add(self.parse_variable())
if self.token(0) == ',':
self.token() #swallow ','
self.token() #swallow ']'
self.assertToken(self.token(), ',')
self.assertToken(self.token(), '[')
conds = []
while self.token(0) != ']':
indices = self._parse_index_list()
conds.extend(self.parse_condition(indices))
if self.token(0) == ',':
self.token() #swallow ','
self.token() #swallow ']'
self.assertToken(self.token(), ')')
return BoxerDrs(label, list(refs), conds)
def _handle_binary_expression(self, make_callback):
self.assertToken(self.token(), '(')
drs1 = self.parse_Expression(None)
self.assertToken(self.token(), ',')
drs2 = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: make_callback(sent_index, word_indices, drs1, drs2)
def _handle_eq(self):
self.assertToken(self.token(), '(')
var1 = self.parse_variable()
self.assertToken(self.token(), ',')
var2 = self.parse_variable()
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerEq(self.discourse_id, sent_index, word_indices, var1, var2)
def _handle_whq(self):
self.assertToken(self.token(), '(')
self.assertToken(self.token(), '[')
ans_types = []
while self.token(0) != ']':
cat = self.token()
self.assertToken(self.token(), ':')
if cat == 'des':
ans_types.append(self.token())
elif cat == 'num':
ans_types.append('number')
typ = self.token()
if typ == 'cou':
ans_types.append('count')
else:
ans_types.append(typ)
else:
ans_types.append(self.token())
self.token() #swallow the ']'
self.assertToken(self.token(), ',')
d1 = self.parse_Expression(None)
self.assertToken(self.token(), ',')
ref = self.parse_variable()
self.assertToken(self.token(), ',')
d2 = self.parse_Expression(None)
self.assertToken(self.token(), ')')
return lambda sent_index, word_indices: BoxerWhq(self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2)
def _make_merge_expression(self, sent_index, word_indices, drs1, drs2):
return BoxerDrs(drs1.label, drs1.refs + drs2.refs, drs1.conds + drs2.conds)
def _make_or_expression(self, sent_index, word_indices, drs1, drs2):
return BoxerOr(self.discourse_id, sent_index, word_indices, drs1, drs2)
def _make_imp_expression(self, sent_index, word_indices, drs1, drs2):
return BoxerDrs(drs1.label, drs1.refs, drs1.conds, drs2)
def parse_variable(self):
var = self.token()
assert re.match('^[ex]\d+$', var), var
return int(var[1:])
def parse_index(self):
return int(self.token())
def _sent_and_word_indices(self, indices):
"""
:return: list of (sent_index, word_indices) tuples
"""
sent_indices = set((i / 1000)-1 for i in indices if i>=0)
if sent_indices:
pairs = []
for sent_index in sent_indices:
word_indices = [(i % 1000)-1 for i in indices if sent_index == (i / 1000)-1]
pairs.append((sent_index, word_indices))
return pairs
else:
word_indices = [(i % 1000)-1 for i in indices]
return [(None, word_indices)]
class BoxerDrsParser(DrtParser):
"""
Reparse the str form of subclasses of ``AbstractBoxerDrs``
"""
def __init__(self, discourse_id=None):
DrtParser.__init__(self)
self.discourse_id = discourse_id
def get_all_symbols(self):
return [DrtTokens.OPEN, DrtTokens.CLOSE, DrtTokens.COMMA, DrtTokens.OPEN_BRACKET, DrtTokens.CLOSE_BRACKET]
def attempt_adjuncts(self, expression, context):
return expression
def handle(self, tok, context):
try:
if tok == 'drs':
self.assertNextToken(DrtTokens.OPEN)
label = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
refs = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
conds = self.handle_conds(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerDrs(label, refs, conds)
elif tok == 'pred':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
variable = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
name = self.token()
self.assertNextToken(DrtTokens.COMMA)
pos = self.token()
self.assertNextToken(DrtTokens.COMMA)
sense = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerPred(disc_id, sent_id, word_ids, variable, name, pos, sense)
elif tok == 'named':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
variable = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
name = self.token()
self.assertNextToken(DrtTokens.COMMA)
type = self.token()
self.assertNextToken(DrtTokens.COMMA)
sense = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerNamed(disc_id, sent_id, word_ids, variable, name, type, sense)
elif tok == 'rel':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
var1 = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
var2 = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
rel = self.token()
self.assertNextToken(DrtTokens.COMMA)
sense = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerRel(disc_id, sent_id, word_ids, var1, var2, rel, sense)
elif tok == 'prop':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
variable = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
drs = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerProp(disc_id, sent_id, word_ids, variable, drs)
elif tok == 'not':
self.assertNextToken(DrtTokens.OPEN)
drs = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerNot(drs)
elif tok == 'imp':
self.assertNextToken(DrtTokens.OPEN)
drs1 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.COMMA)
drs2 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerDrs(drs1.label, drs1.refs, drs1.conds, drs2)
elif tok == 'or':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
drs1 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.COMMA)
drs2 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2)
elif tok == 'eq':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
var1 = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
var2 = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerEq(disc_id, sent_id, word_ids, var1, var2)
elif tok == 'card':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
var = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
value = self.token()
self.assertNextToken(DrtTokens.COMMA)
type = self.token()
self.assertNextToken(DrtTokens.CLOSE)
return BoxerCard(disc_id, sent_id, word_ids, var, value, type)
elif tok == 'whq':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (self.token(), self.discourse_id)[self.discourse_id is not None]
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
ans_types = self.handle_refs()
self.assertNextToken(DrtTokens.COMMA)
drs1 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.COMMA)
var = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
drs2 = self.parse_Expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2)
except Exception, e:
raise ParseException(self._currentIndex, str(e))
assert False, repr(tok)
def nullableIntToken(self):
t = self.token()
return [None,int(t)][t != 'None']
def get_next_token_variable(self, description):
try:
return self.token()
except ExpectedMoreTokensException, e:
raise ExpectedMoreTokensException(e.index, 'Variable expected.')
class AbstractBoxerDrs(object):
def variables(self):
"""
:return: (set<variables>, set<events>, set<propositions>)
"""
variables, events, propositions = self._variables()
return (variables - (events | propositions), events, propositions - events)
def variable_types(self):
vartypes = {}
for t,vars in zip(('z','e','p'), self.variables()):
for v in vars:
vartypes[v] = t
return vartypes
def _variables(self):
"""
:return: (set<variables>, set<events>, set<propositions>)
"""
return (set(), set(), set())
def atoms(self):
return set()
def clean(self):
return self
def _clean_name(self, name):
return name.replace('-','_').replace("'", "_")
def renumber_sentences(self, f):
return self
def __hash__(self):
return hash(str(self))
class BoxerDrs(AbstractBoxerDrs):
def __init__(self, label, refs, conds, consequent=None):
AbstractBoxerDrs.__init__(self)
self.label = label
self.refs = refs
self.conds = conds
self.consequent = consequent
def _variables(self):
variables = (set(), set(), set())
for cond in self.conds:
for s,v in zip(variables, cond._variables()):
s.update(v)
if self.consequent is not None:
for s,v in zip(variables, self.consequent._variables()):
s.update(v)
return variables
def atoms(self):
atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set())
if self.consequent is not None:
atoms.update(self.consequent.atoms())
return atoms
def clean(self):
if self.consequent:
consequent = self.consequent.clean()
else:
consequent = None
return BoxerDrs(self.label, self.refs, [c.clean() for c in self.conds], consequent)
def renumber_sentences(self, f):
if self.consequent:
consequent = self.consequent.renumber_sentences(f)
else:
consequent = None
return BoxerDrs(self.label, self.refs, [c.renumber_sentences(f) for c in self.conds], consequent)
def __repr__(self):
s = 'drs(%s, [%s], [%s])' % (self.label,
', '.join(map(str, self.refs)),
', '.join(map(str, self.conds)))
if self.consequent is not None:
s = 'imp(%s, %s)' % (s, self.consequent)
return s
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.label == other.label and \
self.refs == other.refs and \
len(self.conds) == len(other.conds) and \
reduce(operator.and_, (c1==c2 for c1,c2 in zip(self.conds, other.conds))) and \
self.consequent == other.consequent
class BoxerNot(AbstractBoxerDrs):
def __init__(self, drs):
AbstractBoxerDrs.__init__(self)
self.drs = drs
def _variables(self):
return self.drs._variables()
def atoms(self):
return self.drs.atoms()
def clean(self):
return BoxerNot(self.drs.clean())
def renumber_sentences(self, f):
return BoxerNot(self.drs.renumber_sentences(f))
def __repr__(self):
return 'not(%s)' % (self.drs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.drs == other.drs
class BoxerIndexed(AbstractBoxerDrs):
def __init__(self, discourse_id, sent_index, word_indices):
AbstractBoxerDrs.__init__(self)
self.discourse_id = discourse_id
self.sent_index = sent_index
self.word_indices = word_indices
def atoms(self):
return set([self])
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.discourse_id == other.discourse_id and \
self.sent_index == other.sent_index and \
self.word_indices == other.word_indices and \
reduce(operator.and_, (s==o for s,o in zip(self, other)))
def __repr__(self):
s = '%s(%s, %s, [%s]' % (self._pred(), self.discourse_id, self.sent_index, ', '.join(map(str, self.word_indices)))
for v in self:
s += ', %s' % v
return s + ')'
class BoxerPred(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.name = name
self.pos = pos
self.sense = sense
def _variables(self):
return (set([self.var]), set(), set())
def change_var(self, var):
return BoxerPred(self.discourse_id, self.sent_index, self.word_indices, var, self.name, self.pos, self.sense)
def clean(self):
return BoxerPred(self.discourse_id, self.sent_index, self.word_indices, self.var, self._clean_name(self.name), self.pos, self.sense)
def renumber_sentences(self, f):
new_sent_index = f(self.sent_index)
return BoxerPred(self.discourse_id, new_sent_index, self.word_indices, self.var, self.name, self.pos, self.sense)
def __iter__(self):
return iter((self.var, self.name, self.pos, self.sense))
def _pred(self):
return 'pred'
class BoxerNamed(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.name = name
self.type = type
self.sense = sense
def _variables(self):
return (set([self.var]), set(), set())
def change_var(self, var):
return BoxerNamed(self.discourse_id, self.sent_index, self.word_indices, var, self.name, self.type, self.sense)
def clean(self):
return BoxerNamed(self.discourse_id, self.sent_index, self.word_indices, self.var, self._clean_name(self.name), self.type, self.sense)
def renumber_sentences(self, f):
return BoxerNamed(self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.name, self.type, self.sense)
def __iter__(self):
return iter((self.var, self.name, self.type, self.sense))
def _pred(self):
return 'named'
class BoxerRel(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var1 = var1
self.var2 = var2
self.rel = rel
self.sense = sense
def _variables(self):
return (set([self.var1, self.var2]), set(), set())
def clean(self):
return BoxerRel(self.discourse_id, self.sent_index, self.word_indices, self.var1, self.var2, self._clean_name(self.rel), self.sense)
def renumber_sentences(self, f):
return BoxerRel(self.discourse_id, f(self.sent_index), self.word_indices, self.var1, self.var2, self.rel, self.sense)
def __iter__(self):
return iter((self.var1, self.var2, self.rel, self.sense))
def _pred(self):
return 'rel'
class BoxerProp(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, drs):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.drs = drs
def _variables(self):
return tuple(map(operator.or_, (set(), set(), set([self.var])), self.drs._variables()))
def referenced_labels(self):
return set([self.drs])
def atoms(self):
return self.drs.atoms()
def clean(self):
return BoxerProp(self.discourse_id, self.sent_index, self.word_indices, self.var, self.drs.clean())
def renumber_sentences(self, f):
return BoxerProp(self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.drs.renumber_sentences(f))
def __iter__(self):
return iter((self.var, self.drs))
def _pred(self):
return 'prop'
class BoxerEq(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var1, var2):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var1 = var1
self.var2 = var2
def _variables(self):
return (set([self.var1, self.var2]), set(), set())
def atoms(self):
return set()
def renumber_sentences(self, f):
return BoxerEq(self.discourse_id, f(self.sent_index), self.word_indices, self.var1, self.var2)
def __iter__(self):
return iter((self.var1, self.var2))
def _pred(self):
return 'eq'
class BoxerCard(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, value, type):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.value = value
self.type = type
def _variables(self):
return (set([self.var]), set(), set())
def renumber_sentences(self, f):
return BoxerCard(self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.value, self.type)
def __iter__(self):
return iter((self.var, self.value, self.type))
def _pred(self):
return 'card'
class BoxerOr(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.drs1 = drs1
self.drs2 = drs2
def _variables(self):
return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables()))
def atoms(self):
return self.drs1.atoms() | self.drs2.atoms()
def clean(self):
return BoxerOr(self.discourse_id, self.sent_index, self.word_indices, self.drs1.clean(), self.drs2.clean())
def renumber_sentences(self, f):
return BoxerOr(self.discourse_id, f(self.sent_index), self.word_indices, self.drs1, self.drs2)
def __iter__(self):
return iter((self.drs1, self.drs2))
def _pred(self):
return 'or'
class BoxerWhq(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.ans_types = ans_types
self.drs1 = drs1
self.variable = variable
self.drs2 = drs2
def _variables(self):
return tuple(map(operator.or_, (set([self.variable]), set(), set()), self.drs1._variables(), self.drs2._variables()))
def atoms(self):
return self.drs1.atoms() | self.drs2.atoms()
def clean(self):
return BoxerWhq(self.discourse_id, self.sent_index, self.word_indices, self.ans_types, self.drs1.clean(), self.variable, self.drs2.clean())
def renumber_sentences(self, f):
return BoxerWhq(self.discourse_id, f(self.sent_index), self.word_indices, self.ans_types, self.drs1, self.variable, self.drs2)
def __iter__(self):
return iter(('['+','.join(self.ans_types)+']', self.drs1, self.variable, self.drs2))
def _pred(self):
return 'whq'
class PassthroughBoxerDrsInterpreter(object):
def interpret(self, ex):
return ex
class NltkDrtBoxerDrsInterpreter(object):
def __init__(self, occur_index=False):
self._occur_index = occur_index
def interpret(self, ex):
"""
:param ex: ``AbstractBoxerDrs``
:return: ``AbstractDrs``
"""
if isinstance(ex, BoxerDrs):
drs = DRS([Variable('x%d' % r) for r in ex.refs], map(self.interpret, ex.conds))
if ex.label is not None:
drs.label = Variable('x%d' % ex.label)
if ex.consequent is not None:
drs.consequent = self.interpret(ex.consequent)
return drs
elif isinstance(ex, BoxerNot):
return DrtNegatedExpression(self.interpret(ex.drs))
elif isinstance(ex, BoxerPred):
pred = self._add_occur_indexing('%s_%s' % (ex.pos, ex.name), ex)
return self._make_atom(pred, 'x%d' % ex.var)
elif isinstance(ex, BoxerNamed):
pred = self._add_occur_indexing('ne_%s_%s' % (ex.type, ex.name), ex)
return self._make_atom(pred, 'x%d' % ex.var)
elif isinstance(ex, BoxerRel):
pred = self._add_occur_indexing('%s' % (ex.rel), ex)
return self._make_atom(pred, 'x%d' % ex.var1, 'x%d' % ex.var2)
elif isinstance(ex, BoxerProp):
return DrtProposition(Variable('x%d' % ex.var), self.interpret(ex.drs))
elif isinstance(ex, BoxerEq):
return DrtEqualityExpression(DrtVariableExpression(Variable('x%d' % ex.var1)),
DrtVariableExpression(Variable('x%d' % ex.var2)))
elif isinstance(ex, BoxerCard):
pred = self._add_occur_indexing('card_%s_%s' % (ex.type, ex.value), ex)
return self._make_atom(pred, 'x%d' % ex.var)
elif isinstance(ex, BoxerOr):
return DrtOrExpression(self.interpret(ex.drs1), self.interpret(ex.drs2))
elif isinstance(ex, BoxerWhq):
drs1 = self.interpret(ex.drs1)
drs2 = self.interpret(ex.drs2)
return DRS(drs1.refs + drs2.refs, drs1.conds + drs2.conds)
assert False, '%s: %s' % (ex.__class__.__name__, ex)
def _make_atom(self, pred, *args):
accum = DrtVariableExpression(Variable(pred))
for arg in args:
accum = DrtApplicationExpression(accum, DrtVariableExpression(Variable(arg)))
return accum
def _add_occur_indexing(self, base, ex):
if self._occur_index and ex.sent_index is not None:
if ex.discourse_id:
base += '_%s' % ex.discourse_id
base += '_s%s' % ex.sent_index
base += '_w%s' % sorted(ex.word_indices)[0]
return base
class UnparseableInputException(Exception):
pass
if __name__ == '__main__':
opts = OptionParser("usage: %prog TEXT [options]")
opts.add_option("--verbose", "-v", help="display verbose logs", action="store_true", default=False, dest="verbose")
opts.add_option("--fol", "-f", help="output FOL", action="store_true", default=False, dest="fol")
opts.add_option("--question", "-q", help="input is a question", action="store_true", default=False, dest="question")
opts.add_option("--occur", "-o", help="occurrence index", action="store_true", default=False, dest="occur_index")
(options, args) = opts.parse_args()
if len(args) != 1:
opts.error("incorrect number of arguments")
interpreter = NltkDrtBoxerDrsInterpreter(occur_index=options.occur_index)
drs = Boxer(interpreter).interpret_multisentence(args[0].split(r'\n'), question=options.question, verbose=options.verbose)
if drs is None:
print None
else:
drs = drs.simplify().eliminate_equality()
if options.fol:
print drs.fol().normalize()
else:
drs.normalize().pprint()
| mit |
chemelnucfin/tensorflow | tensorflow/python/keras/preprocessing/image_test.py | 9 | 13832 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.python import keras
from tensorflow.python.platform import test
try:
import PIL # pylint:disable=g-import-not-at-top
except ImportError:
PIL = None
def _generate_test_images():
img_w = img_h = 20
rgb_images = []
gray_images = []
for _ in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = keras.preprocessing.image.array_to_img(imarray, scale=False)
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = keras.preprocessing.image.array_to_img(imarray, scale=False)
gray_images.append(im)
return [rgb_images, gray_images]
class TestImage(test.TestCase):
def test_image_data_generator(self):
if PIL is None:
return # Skip test if PIL is not available.
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(keras.preprocessing.image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
brightness_range=(1, 5),
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
# Basic test before fit
x = np.random.random((32, 10, 10, 3))
generator.flow(x)
# Fit
generator.fit(images, augment=True)
for x, _ in generator.flow(
images,
np.arange(images.shape[0]),
shuffle=True):
self.assertEqual(x.shape[1:], images.shape[1:])
break
def test_image_data_generator_with_split_value_error(self):
with self.assertRaises(ValueError):
keras.preprocessing.image.ImageDataGenerator(validation_split=5)
def test_image_data_generator_invalid_data(self):
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test fit with invalid data
with self.assertRaises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
# Test flow with invalid data
with self.assertRaises(ValueError):
generator.flow(np.arange(5))
# Invalid number of channels: will work but raise a warning
x = np.random.random((32, 10, 10, 5))
generator.flow(x)
with self.assertRaises(ValueError):
generator = keras.preprocessing.image.ImageDataGenerator(
data_format='unknown')
generator = keras.preprocessing.image.ImageDataGenerator(
zoom_range=(2, 2))
def test_image_data_generator_fit(self):
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_first')
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
def test_directory_iterator(self):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory, os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'), os.path.join(
class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(temp_dir, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(temp_dir, filename))
count += 1
# Test image loading util
fname = os.path.join(temp_dir, filenames[0])
_ = keras.preprocessing.image.load_img(fname)
_ = keras.preprocessing.image.load_img(fname, grayscale=True)
_ = keras.preprocessing.image.load_img(fname, target_size=(10, 10))
_ = keras.preprocessing.image.load_img(fname, target_size=(10, 10),
interpolation='bilinear')
# create iterator
generator = keras.preprocessing.image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(temp_dir)
# check number of classes and images
self.assertEqual(len(dir_iterator.class_indices), num_classes)
self.assertEqual(len(dir_iterator.classes), count)
self.assertEqual(set(dir_iterator.filenames), set(filenames))
def preprocessing_function(x):
"""This will fail if not provided by a Numpy array.
Note: This is made to enforce backward compatibility.
Args:
x: A numpy array.
Returns:
An array of zeros with the same shape as the given array.
"""
self.assertEqual(x.shape, (26, 26, 3))
self.assertIs(type(x), np.ndarray)
return np.zeros_like(x)
# Test usage as Sequence
generator = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
dir_seq = generator.flow_from_directory(
str(temp_dir),
target_size=(26, 26),
color_mode='rgb',
batch_size=3,
class_mode='categorical')
self.assertEqual(len(dir_seq), count // 3 + 1)
x1, y1 = dir_seq[1]
self.assertEqual(x1.shape, (3, 26, 26, 3))
self.assertEqual(y1.shape, (3, num_classes))
x1, y1 = dir_seq[5]
self.assertTrue((x1 == 0).all())
def directory_iterator_with_validation_split_test_helper(
self, validation_split):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
tmp_folder = tempfile.mkdtemp(prefix='test_images')
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory,
os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'),
os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(tmp_folder, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(tmp_folder, filename))
count += 1
# create iterator
generator = keras.preprocessing.image.ImageDataGenerator(
validation_split=validation_split)
with self.assertRaises(ValueError):
generator.flow_from_directory(tmp_folder, subset='foo')
num_validation = int(count * validation_split)
num_training = count - num_validation
train_iterator = generator.flow_from_directory(
tmp_folder, subset='training')
self.assertEqual(train_iterator.samples, num_training)
valid_iterator = generator.flow_from_directory(
tmp_folder, subset='validation')
self.assertEqual(valid_iterator.samples, num_validation)
# check number of classes and images
self.assertEqual(len(train_iterator.class_indices), num_classes)
self.assertEqual(len(train_iterator.classes), num_training)
self.assertEqual(
len(set(train_iterator.filenames) & set(filenames)), num_training)
shutil.rmtree(tmp_folder)
def test_directory_iterator_with_validation_split_25_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.25)
def test_directory_iterator_with_validation_split_40_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.40)
def test_directory_iterator_with_validation_split_50_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.50)
def test_img_utils(self):
if PIL is None:
return # Skip test if PIL is not available.
height, width = 10, 8
# Test channels_first data format
x = np.random.random((3, height, width))
img = keras.preprocessing.image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (3, height, width))
# Test 2D
x = np.random.random((1, height, width))
img = keras.preprocessing.image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (1, height, width))
# Test channels_last data format
x = np.random.random((height, width, 3))
img = keras.preprocessing.image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 3))
# Test 2D
x = np.random.random((height, width, 1))
img = keras.preprocessing.image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
def test_batch_standardize(self):
if PIL is None:
return # Skip test if PIL is not available.
# ImageDataGenerator.standardize should work on batches
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(keras.preprocessing.image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
brightness_range=(1, 5),
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
generator.fit(images, augment=True)
transformed = np.copy(images)
for i, im in enumerate(transformed):
transformed[i] = generator.random_transform(im)
transformed = generator.standardize(transformed)
def test_img_transforms(self):
x = np.random.random((3, 200, 200))
_ = keras.preprocessing.image.random_rotation(x, 20)
_ = keras.preprocessing.image.random_shift(x, 0.2, 0.2)
_ = keras.preprocessing.image.random_shear(x, 2.)
_ = keras.preprocessing.image.random_zoom(x, (0.5, 0.5))
_ = keras.preprocessing.image.apply_channel_shift(x, 2, 2)
_ = keras.preprocessing.image.apply_affine_transform(x, 2)
with self.assertRaises(ValueError):
keras.preprocessing.image.random_zoom(x, (0, 0, 0))
_ = keras.preprocessing.image.random_channel_shift(x, 2.)
if __name__ == '__main__':
test.main()
| apache-2.0 |
alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/_base.py | 915 | 13711 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
| mit |
datacommonsorg/website | server/routes/api/browser.py | 1 | 6125 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph browser related handlers."""
import flask
import json
from cache import cache
import services.datacommons as dc
from flask import Response
from flask import request
import routes.api.place as place_api
import logging
bp = flask.Blueprint('api.browser', __name__, url_prefix='/api/browser')
NO_MMETHOD_KEY = 'no_mmethod'
NO_OBSPERIOD_KEY = 'no_obsPeriod'
@cache.memoize(timeout=3600 * 24) # Cache for one day.
@bp.route('/triples/<path:dcid>')
def triple_api(dcid):
"""Returns all the triples given a node dcid."""
return json.dumps(dc.get_triples([dcid]).get(dcid, []))
@cache.memoize(timeout=3600 * 24) # Cache for one day.
@bp.route('/propvals/<path:prop>/<path:dcid>')
def get_property_value(dcid, prop):
"""Returns the property values for a given node dcid and property label."""
response = dc.fetch_data('/node/property-values', {
'dcids': [dcid],
'property': prop,
},
compress=False,
post=False)
result = {}
result["property"] = prop
result["values"] = response.get(dcid, {})
return Response(json.dumps(result), 200, mimetype='application/json')
@cache.memoize(timeout=3600 * 24) # Cache for one day.
@bp.route('/proplabels/<path:dcid>')
def get_property_labels(dcid):
"""Returns all property labels given a node dcid."""
labels = dc.get_property_labels([dcid]).get(dcid, {})
return Response(json.dumps(labels), 200, mimetype='application/json')
def get_sparql_query(place_id, stat_var_id, date):
date_triple = "?svObservation observationDate ?obsDate ."
date_selector = " ?obsDate"
if date:
date_triple = f'?svObservation observationDate "{date}" .'
date_selector = ""
sparql_query = f"""
SELECT ?dcid ?mmethod ?obsPeriod{date_selector}
WHERE {{
?svObservation typeOf StatVarObservation .
?svObservation variableMeasured {stat_var_id} .
?svObservation observationAbout {place_id} .
?svObservation dcid ?dcid .
?svObservation measurementMethod ?mmethod .
?svObservation observationPeriod ?obsPeriod .
{date_triple}
}}
"""
return sparql_query
@cache.cached(timeout=3600 * 24, query_string=True) # Cache for one day.
@bp.route('/observation-id')
def get_observation_id():
"""Returns the observation node dcid for a combination of
predicates: observedNodeLocation, statisticalVariable, date,
measurementMethod optional), observationPeriod (optional)"""
place_id = request.args.get("place")
if not place_id:
return Response(json.dumps("error: must provide a place field"),
400,
mimetype='application/json')
stat_var_id = request.args.get("statVar")
if not stat_var_id:
return Response(json.dumps("error: must provide a statVar field"),
400,
mimetype='application/json')
date = request.args.get("date", "")
if not date:
return Response(json.dumps("error: must provide a date field"),
400,
mimetype='application/json')
request_mmethod = request.args.get("measurementMethod", NO_MMETHOD_KEY)
request_obsPeriod = request.args.get("obsPeriod", NO_OBSPERIOD_KEY)
sparql_query = get_sparql_query(place_id, stat_var_id, date)
result = ""
(_, rows) = dc.query(sparql_query)
for row in rows:
cells = row.get('cells', [])
if len(cells) != 3:
continue
dcid = cells[0].get('value', '')
mmethod = cells[1].get('value', NO_MMETHOD_KEY)
obsPeriod = cells[2].get('value', NO_OBSPERIOD_KEY)
if mmethod == request_mmethod and obsPeriod == request_obsPeriod:
result = dcid
break
return Response(json.dumps(result), 200, mimetype='application/json')
@bp.route('/statvar/search')
@cache.cached(timeout=3600 * 24, query_string=True)
def search_statvar():
"""Gets the statvars and statvar groups that match the tokens in the query
"""
query = request.args.get("query")
places = request.args.getlist("places")
result = dc.search_statvar(query, places)
return Response(json.dumps(result), 200, mimetype='application/json')
@bp.route('/statvar/group')
@cache.cached(timeout=3600 * 24, query_string=True)
def get_statvar_group():
"""Gets the stat var group node information.
This is to retrieve the adjacent nodes, including child stat vars, child stat
var groups and parent stat var groups for the given stat var group node.
"""
stat_var_group = request.args.get("stat_var_group")
places = request.args.getlist("places")
result = dc.get_statvar_group(stat_var_group, places)
return Response(json.dumps(result), 200, mimetype='application/json')
@bp.route('/statvar/path')
@cache.cached(timeout=3600 * 24, query_string=True)
def get_statvar_path():
"""Gets the path of a stat var to the root of the hierarchy.
"""
id = request.args.get("id")
result = dc.get_statvar_path(id)
return Response(json.dumps(result), 200, mimetype='application/json')
@cache.memoize(timeout=3600 * 24) # Cache for one day.
@bp.route('/num_stat_vars/<path:dcid>')
def get_num_statvars(dcid):
"""Returns number of stat vars for a dcid
"""
statsvars = place_api.statsvars(dcid)
num_statvars = len(statsvars)
return Response(json.dumps(num_statvars), 200, mimetype='application/json') | apache-2.0 |
jimi-c/ansible | test/units/modules/network/nxos/test_nxos_config.py | 11 | 8373 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch, MagicMock
from ansible.modules.network.nxos import nxos_config
from ansible.plugins.cliconf.nxos import Cliconf
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosConfigModule(TestNxosModule):
module = nxos_config
def setUp(self):
super(TestNxosConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_save_config = patch('ansible.modules.network.nxos.nxos_config.save_config')
self.save_config = self.mock_save_config.start()
self.mock_get_connection = patch('ansible.modules.network.nxos.nxos_config.get_connection')
self.get_connection = self.mock_get_connection.start()
self.conn = self.get_connection()
self.conn.edit_config = MagicMock()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_config.run_commands')
self.run_commands = self.mock_run_commands.start()
self.cliconf_obj = Cliconf(MagicMock())
self.running_config = load_fixture('nxos_config', 'config.cfg')
def tearDown(self):
super(TestNxosConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
self.mock_get_connection.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_config', 'config.cfg')
self.load_config.return_value = None
def test_nxos_config_no_change(self):
lines = ['hostname localhost']
args = dict(lines=lines)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
set_module_args(args)
result = self.execute_module()
def test_nxos_config_src(self):
src = load_fixture('nxos_config', 'candidate.cfg')
args = dict(src=src)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(src, self.running_config))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01', 'interface Ethernet1',
'description test interface', 'no shutdown', 'ip routing']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_replace_src(self):
set_module_args(dict(replace_src='bootflash:config', replace='config'))
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff(self.running_config, self.running_config, diff_replace='config'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['config replace bootflash:config'])
def test_nxos_config_lines(self):
lines = ['hostname switch01', 'ip domain-name eng.ansible.com']
args = dict(lines=lines)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_before(self):
lines = ['hostname switch01', 'ip domain-name eng.ansible.com']
args = dict(lines=lines,
before=['before command'])
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['before command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('before command', result['commands'][0])
def test_nxos_config_after(self):
lines = ['hostname switch01', 'ip domain-name eng.ansible.com']
args = dict(lines=lines,
after=['after command'])
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(lines), self.running_config))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['after command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('after command', result['commands'][-1])
def test_nxos_config_parents(self):
lines = ['ip address 1.2.3.4/5', 'no shutdown']
parents = ['interface Ethernet10']
args = dict(lines=lines, parents=parents)
self.conn.get_diff = MagicMock(return_value=self.cliconf_obj.get_diff('\n'.join(parents + lines), self.running_config, path=parents))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['interface Ethernet10', 'ip address 1.2.3.4/5', 'no shutdown']
self.assertEqual(config, result['commands'], result['commands'])
def test_nxos_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_src_and_parents_fails(self):
args = dict(src='foo', parents='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_backup_returns__backup__(self):
args = dict(backup=True)
set_module_args(args)
result = self.execute_module()
self.assertIn('__backup__', result)
def test_nxos_config_save_always(self):
args = dict(save_when='always')
set_module_args(args)
self.execute_module()
self.assertEqual(self.save_config.call_count, 1)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
def test_nxos_config_save_changed_true(self):
args = dict(save_when='changed', lines=['hostname foo', 'interface GigabitEthernet0/0', 'no ip address'])
set_module_args(args)
self.execute_module(changed=True)
self.assertEqual(self.save_config.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
def test_nxos_config_save_changed_false(self):
args = dict(save_when='changed')
set_module_args(args)
self.execute_module()
self.assertEqual(self.save_config.call_count, 0)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
| gpl-3.0 |
provaleks/o8 | openerp/addons/base/module/wizard/__init__.py | 365 | 1250 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_module_update
import base_language_install
import base_import_language
import base_module_upgrade
import base_module_configuration
import base_export_language
import base_update_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
stevehof/location-ninja | lib/flask_admin/tools.py | 7 | 2422 | import sys
import traceback
# Python 3 compatibility
from ._compat import reduce
def import_module(name, required=True):
"""
Import module by name
:param name:
Module name
:param required:
If set to `True` and module was not found - will throw exception.
If set to `False` and module was not found - will return None.
Default is `True`.
"""
try:
__import__(name, globals(), locals(), [])
except ImportError:
if not required and module_not_found():
return None
raise
return sys.modules[name]
def import_attribute(name):
"""
Import attribute using string reference.
:param name:
String reference.
Raises ImportError or AttributeError if module or attribute do not exist.
Example::
import_attribute('a.b.c.foo')
"""
path, attr = name.rsplit('.', 1)
module = __import__(path, globals(), locals(), [attr])
return getattr(module, attr)
def module_not_found(additional_depth=0):
"""
Checks if ImportError was raised because module does not exist or
something inside it raised ImportError
:param additional_depth:
supply int of depth of your call if you're not doing
import on the same level of code - f.e., if you call function, which is
doing import, you should pass 1 for single additional level of depth
"""
tb = sys.exc_info()[2]
if len(traceback.extract_tb(tb)) > (1 + additional_depth):
return False
return True
def rec_getattr(obj, attr, default=None):
"""
Recursive getattr.
:param attr:
Dot delimited attribute name
:param default:
Default value
Example::
rec_getattr(obj, 'a.b.c')
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
return default
def get_dict_attr(obj, attr, default=None):
"""
Get attribute of the object without triggering its __getattr__.
:param obj:
Object
:param attr:
Attribute name
:param default:
Default value if attribute was not found
"""
for obj in [obj] + obj.__class__.mro():
if attr in obj.__dict__:
return obj.__dict__[attr]
return default
| gpl-3.0 |
sayplastic/numword | numword/numword_es.py | 2 | 5123 | # coding: utf-8
#This file is part of numword. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
'''
numword for ES
'''
from numword_eu import NumWordEU
#TODO correct orthographics
#TODO error messages
class NumWordES(NumWordEU):
'''
NumWord ES
'''
def __init__(self):
super(NumWordES, self).__init__()
self.gender_stem = ''
#TODO Is this sufficient??
def _set_high_numwords(self, high):
'''
Set high numwords
'''
max_val = 3 + 6*len(high)
for word, i in zip(high, range(max_val, 3, -6)):
self.cards[10**(i - 3)] = word + u"illòn"
def _setup(self):
'''
Setup
'''
lows = [u"cuatr", u"tr", u"b", u"m"]
self.high_numwords = self._gen_high_numwords([], [], lows)
self.negword = u"menos "
self.pointword = u"punto"
self.errmsg_nonnum = u"Only numbers may be converted to words."
self.errmsg_toobig = u"Number is too large to convert to words."
self.gender_stem = u"o"
self.exclude_title = [u"y", u"menos", u"punto"]
self.mid_numwords = [(1000, u"mil"), (100, u"cien"), (90, u"noventa"),
(80, u"ochenta"), (70, u"setenta"), (60, u"sesenta"),
(50, u"cincuenta"), (40, u"cuarenta")]
self.low_numwords = [u"vientinueve", u"vientiocho", u"vientisiete",
u"vientisèis", u"vienticinco", u"vienticuatro", u"vientitrès",
u"vientidòs", u"vientiuno", u"viente", u"diecinueve",
u"dieciocho", u"diecisiete", u"dieciseis", u"quince",
u"catorce", u"trece", u"doce", u"once", u"diez", u"nueve",
u"ocho", u"siete", u"seis", u"cinco", u"cuatro", u"tres",
u"dos", u"uno", u"cero"]
self.ords = {
1: u"primer",
2: u"segund",
3: u"tercer",
4: u"cuart",
5: u"quint",
6: u"sext",
7: u"sèptim",
8: u"octav",
9: u"noven",
10 : u"dècim",
}
def _merge(self, curr, next):
'''
Merge
'''
ctext, cnum, ntext, nnum = curr + next
if cnum == 1:
if nnum < 1000000:
return next
ctext = u"un"
elif cnum == 100:
ctext += u"t" + self.gender_stem
if nnum < cnum:
if cnum < 100:
return (u"%s y %s"%(ctext, ntext), cnum + nnum)
return (u"%s %s"%(ctext, ntext), cnum + nnum)
elif (not nnum % 1000000) and cnum > 1:
ntext = ntext[:-3] + u"ones"
if nnum == 100:
if cnum == 5:
ctext = u"quinien"
ntext = u""
elif cnum == 7:
ctext = u"sete"
elif cnum == 9:
ctext = u"nove"
ntext += u"t" + self.gender_stem + u"s"
else:
ntext = u" " + ntext
return (ctext + ntext, cnum * nnum)
def ordinal(self, value):
'''
Convert to ordinal
'''
self._verify_ordinal(value)
try:
return self.ords[value] + self.gender_stem
except KeyError:
return self.cardinal(value)
def ordinal_number(self, value):
'''
Convert to ordinal number
'''
self._verify_ordinal(value)
# Correct for fem?
return u"%s°" % value
def currency(self, val, longval=True, old=False):
'''
Convert to currency
'''
self.precision = 2
if old:
return self._split(val, hightxt=u"peso/s", lowtxt=u"peseta/s",
split_precision=0, jointxt=u"y", longval=longval)
return super(NumWordES, self).currency(val, jointxt=u"y",
longval=longval)
_NW = NumWordES()
def cardinal(value):
'''
Convert to cardinal
'''
return _NW.cardinal(value)
def ordinal(value):
'''
Convert to ordinal
'''
return _NW.ordinal(value)
def ordinal_number(value):
'''
Convert to ordinal number
'''
return _NW.ordinal_number(value)
def currency(value, longval=True, old=False):
'''
Convert to currency
'''
return _NW.currency(value, longval=longval, old=old)
def year(value, longval=True):
'''
Convert to year
'''
return _NW.year(value, longval=longval)
def main():
'''
Main
'''
for val in [ 1, 11, 12, 21, 31, 33, 71, 80, 81, 91, 99, 100, 101, 102, 120, 155,
180, 300, 308, 832, 1000, 1001, 1061, 1100, 1120, 1500, 1701, 1800,
2000, 2010, 2099, 2171, 3000, 8280, 8291, 150000, 500000, 1000000,
2000000, 2000001, -21212121211221211111, -2.121212, -1.0000100,
1325325436067876801768700107601001012212132143210473207540327057320957032975032975093275093275093270957329057320975093272950730]:
_NW.test(val)
if __name__ == "__main__":
main()
| lgpl-2.1 |
offbye/paparazzi | sw/airborne/test/stabilization/setup.py | 83 | 2143 | from distutils.core import setup, Extension
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy
from os import path, getenv
# if PAPARAZZI_SRC not set, then assume the tree containing this
# file is a reasonable substitute
pprz_src = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../../')))
pprz_airborne = path.join(pprz_src, "sw/airborne")
common_inc_dirs = ["./", path.join(pprz_src, "sw/include"), pprz_airborne, numpy.get_include()]
algebra_float = Extension("algebra_float", sources=['algebra_float.pyx', path.join(pprz_airborne, 'math/pprz_algebra_float.c')],
include_dirs=common_inc_dirs)
algebra_int = Extension("algebra_int", sources=['algebra_int.pyx', path.join(pprz_airborne, 'math/pprz_trig_int.c'), path.join(pprz_airborne, 'math/pprz_algebra_int.c')],
include_dirs=common_inc_dirs)
includedirs = common_inc_dirs + [path.join(pprz_airborne, "firmwares/rotorcraft")]
ext_quat_float = Extension("ref_quat_float",
sources=['ref_quat_float.pyx', path.join(pprz_airborne, 'math/pprz_algebra_float.c'),
path.join(pprz_airborne, "firmwares/rotorcraft/stabilization/stabilization_attitude_ref_quat_float.c")],
include_dirs=includedirs,
extra_compile_args=["-std=c99", "-DSTABILIZATION_ATTITUDE_TYPE_FLOAT"])
ext_quat_int = Extension("ref_quat_int",
sources=['ref_quat_int.pyx',
path.join(pprz_airborne, 'math/pprz_trig_int.c'),
path.join(pprz_airborne, 'math/pprz_algebra_int.c'),
path.join(pprz_airborne, "firmwares/rotorcraft/stabilization/stabilization_attitude_ref_quat_int.c")],
include_dirs=includedirs,
extra_compile_args=["-std=c99", "-DSTABILIZATION_ATTITUDE_TYPE_INT"])
extensions = [algebra_float, algebra_int, ext_quat_float, ext_quat_int]
setup(
ext_modules=cythonize(extensions)
)
| gpl-2.0 |
simgunz/anki | pylib/anki/importing/anki2.py | 1 | 17394 | # Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
from anki.collection import Collection
from anki.consts import *
from anki.decks import DeckManager
from anki.importing.base import Importer
from anki.lang import TR
from anki.utils import intTime, joinFields, splitFields, stripHTMLMedia
GUID = 1
MID = 2
MOD = 3
class Anki2Importer(Importer):
needMapper = False
deckPrefix: Optional[str] = None
allowUpdate = True
src: Collection
dst: Collection
def __init__(self, col: Collection, file: str) -> None:
super().__init__(col, file)
# set later, defined here for typechecking
self._decks: Dict[int, int] = {}
self.source_needs_upgrade = False
def run(self, media: None = None) -> None:
self._prepareFiles()
if media is not None:
# Anki1 importer has provided us with a custom media folder
self.src.media._dir = media
try:
self._import()
finally:
self.src.close(save=False, downgrade=False)
def _prepareFiles(self) -> None:
importingV2 = self.file.endswith(".anki21")
self.source_needs_upgrade = False
self.dst = self.col
self.src = Collection(self.file)
if not importingV2 and self.col.schedVer() != 1:
# any scheduling included?
if self.src.db.scalar("select 1 from cards where queue != 0 limit 1"):
self.source_needs_upgrade = True
elif importingV2 and self.col.schedVer() == 1:
raise Exception("must upgrade to new scheduler to import this file")
def _import(self) -> None:
self._decks = {}
if self.deckPrefix:
id = self.dst.decks.id(self.deckPrefix)
self.dst.decks.select(id)
self._prepareTS()
self._prepareModels()
self._importNotes()
self._importCards()
self._importStaticMedia()
self._postImport()
self.dst.optimize()
# Notes
######################################################################
def _logNoteRow(self, action: str, noteRow: List[str]) -> None:
self.log.append(
"[%s] %s" % (action, stripHTMLMedia(noteRow[6].replace("\x1f", ", ")))
)
def _importNotes(self) -> None:
# build guid -> (id,mod,mid) hash & map of existing note ids
self._notes: Dict[str, Tuple[int, int, int]] = {}
existing = {}
for id, guid, mod, mid in self.dst.db.execute(
"select id, guid, mod, mid from notes"
):
self._notes[guid] = (id, mod, mid)
existing[id] = True
# we ignore updates to changed schemas. we need to note the ignored
# guids, so we avoid importing invalid cards
self._ignoredGuids: Dict[str, bool] = {}
# iterate over source collection
add = []
update = []
dirty = []
usn = self.dst.usn()
dupesIdentical = []
dupesIgnored = []
total = 0
for note in self.src.db.execute("select * from notes"):
total += 1
# turn the db result into a mutable list
note = list(note)
shouldAdd = self._uniquifyNote(note)
if shouldAdd:
# ensure id is unique
while note[0] in existing:
note[0] += 999
existing[note[0]] = True
# bump usn
note[4] = usn
# update media references in case of dupes
note[6] = self._mungeMedia(note[MID], note[6])
add.append(note)
dirty.append(note[0])
# note we have the added the guid
self._notes[note[GUID]] = (note[0], note[3], note[MID])
else:
# a duplicate or changed schema - safe to update?
if self.allowUpdate:
oldNid, oldMod, oldMid = self._notes[note[GUID]]
# will update if incoming note more recent
if oldMod < note[MOD]:
# safe if note types identical
if oldMid == note[MID]:
# incoming note should use existing id
note[0] = oldNid
note[4] = usn
note[6] = self._mungeMedia(note[MID], note[6])
update.append(note)
dirty.append(note[0])
else:
dupesIgnored.append(note)
self._ignoredGuids[note[GUID]] = True
else:
dupesIdentical.append(note)
self.log.append(self.dst.tr(TR.IMPORTING_NOTES_FOUND_IN_FILE, val=total))
if dupesIgnored:
self.log.append(
self.dst.tr(
TR.IMPORTING_NOTES_THAT_COULD_NOT_BE_IMPORTED, val=len(dupesIgnored)
)
)
if update:
self.log.append(
self.dst.tr(
TR.IMPORTING_NOTES_UPDATED_AS_FILE_HAD_NEWER, val=len(update)
)
)
if add:
self.log.append(
self.dst.tr(TR.IMPORTING_NOTES_ADDED_FROM_FILE, val=len(add))
)
if dupesIdentical:
self.log.append(
self.dst.tr(
TR.IMPORTING_NOTES_SKIPPED_AS_THEYRE_ALREADY_IN,
val=len(dupesIdentical),
)
)
self.log.append("")
if dupesIgnored:
for row in dupesIgnored:
self._logNoteRow(self.dst.tr(TR.IMPORTING_SKIPPED), row)
if update:
for row in update:
self._logNoteRow(self.dst.tr(TR.IMPORTING_UPDATED), row)
if add:
for row in add:
self._logNoteRow(self.dst.tr(TR.ADDING_ADDED), row)
if dupesIdentical:
for row in dupesIdentical:
self._logNoteRow(self.dst.tr(TR.IMPORTING_IDENTICAL), row)
# export info for calling code
self.dupes = len(dupesIdentical)
self.added = len(add)
self.updated = len(update)
# add to col
self.dst.db.executemany(
"insert or replace into notes values (?,?,?,?,?,?,?,?,?,?,?)", add
)
self.dst.db.executemany(
"insert or replace into notes values (?,?,?,?,?,?,?,?,?,?,?)", update
)
self.dst.updateFieldCache(dirty)
# determine if note is a duplicate, and adjust mid and/or guid as required
# returns true if note should be added
def _uniquifyNote(self, note: List[Any]) -> bool:
origGuid = note[GUID]
srcMid = note[MID]
dstMid = self._mid(srcMid)
# duplicate schemas?
if srcMid == dstMid:
return origGuid not in self._notes
# differing schemas and note doesn't exist?
note[MID] = dstMid
if origGuid not in self._notes:
return True
# schema changed; don't import
self._ignoredGuids[origGuid] = True
return False
# Models
######################################################################
# Models in the two decks may share an ID but not a schema, so we need to
# compare the field & template signature rather than just rely on ID. If
# the schemas don't match, we increment the mid and try again, creating a
# new model if necessary.
def _prepareModels(self) -> None:
"Prepare index of schema hashes."
self._modelMap: Dict[int, int] = {}
def _mid(self, srcMid: int) -> Any:
"Return local id for remote MID."
# already processed this mid?
if srcMid in self._modelMap:
return self._modelMap[srcMid]
mid = srcMid
srcModel = self.src.models.get(srcMid)
srcScm = self.src.models.scmhash(srcModel)
while True:
# missing from target col?
if not self.dst.models.have(mid):
# copy it over
model = srcModel.copy()
model["id"] = mid
model["usn"] = self.col.usn()
self.dst.models.update(model)
break
# there's an existing model; do the schemas match?
dstModel = self.dst.models.get(mid)
dstScm = self.dst.models.scmhash(dstModel)
if srcScm == dstScm:
# copy styling changes over if newer
if srcModel["mod"] > dstModel["mod"]:
model = srcModel.copy()
model["id"] = mid
model["usn"] = self.col.usn()
self.dst.models.update(model)
break
# as they don't match, try next id
mid += 1
# save map and return new mid
self._modelMap[srcMid] = mid
return mid
# Decks
######################################################################
def _did(self, did: int) -> Any:
"Given did in src col, return local id."
# already converted?
if did in self._decks:
return self._decks[did]
# get the name in src
g = self.src.decks.get(did)
name = g["name"]
# if there's a prefix, replace the top level deck
if self.deckPrefix:
tmpname = "::".join(DeckManager.path(name)[1:])
name = self.deckPrefix
if tmpname:
name += f"::{tmpname}"
# manually create any parents so we can pull in descriptions
head = ""
for parent in DeckManager.immediate_parent_path(name):
if head:
head += "::"
head += parent
idInSrc = self.src.decks.id(head)
self._did(idInSrc)
# if target is a filtered deck, we'll need a new deck name
deck = self.dst.decks.byName(name)
if deck and deck["dyn"]:
name = "%s %d" % (name, intTime())
# create in local
newid = self.dst.decks.id(name)
# pull conf over
if "conf" in g and g["conf"] != 1:
conf = self.src.decks.get_config(g["conf"])
self.dst.decks.save(conf)
self.dst.decks.update_config(conf)
g2 = self.dst.decks.get(newid)
g2["conf"] = g["conf"]
self.dst.decks.save(g2)
# save desc
deck = self.dst.decks.get(newid)
deck["desc"] = g["desc"]
self.dst.decks.save(deck)
# add to deck map and return
self._decks[did] = newid
return newid
# Cards
######################################################################
def _importCards(self) -> None:
if self.source_needs_upgrade:
self.src.upgrade_to_v2_scheduler()
# build map of (guid, ord) -> cid and used id cache
self._cards: Dict[Tuple[str, int], int] = {}
existing = {}
for guid, ord, cid in self.dst.db.execute(
"select f.guid, c.ord, c.id from cards c, notes f " "where c.nid = f.id"
):
existing[cid] = True
self._cards[(guid, ord)] = cid
# loop through src
cards = []
revlog = []
cnt = 0
usn = self.dst.usn()
aheadBy = self.src.sched.today - self.dst.sched.today
for card in self.src.db.execute(
"select f.guid, f.mid, c.* from cards c, notes f " "where c.nid = f.id"
):
guid = card[0]
if guid in self._ignoredGuids:
continue
# does the card's note exist in dst col?
if guid not in self._notes:
continue
# does the card already exist in the dst col?
ord = card[5]
if (guid, ord) in self._cards:
# fixme: in future, could update if newer mod time
continue
# doesn't exist. strip off note info, and save src id for later
card = list(card[2:])
scid = card[0]
# ensure the card id is unique
while card[0] in existing:
card[0] += 999
existing[card[0]] = True
# update cid, nid, etc
card[1] = self._notes[guid][0]
card[2] = self._did(card[2])
card[4] = intTime()
card[5] = usn
# review cards have a due date relative to collection
if (
card[7] in (QUEUE_TYPE_REV, QUEUE_TYPE_DAY_LEARN_RELEARN)
or card[6] == CARD_TYPE_REV
):
card[8] -= aheadBy
# odue needs updating too
if card[14]:
card[14] -= aheadBy
# if odid true, convert card from filtered to normal
if card[15]:
# odid
card[15] = 0
# odue
card[8] = card[14]
card[14] = 0
# queue
if card[6] == CARD_TYPE_LRN: # type
card[7] = QUEUE_TYPE_NEW
else:
card[7] = card[6]
# type
if card[6] == CARD_TYPE_LRN:
card[6] = CARD_TYPE_NEW
cards.append(card)
# we need to import revlog, rewriting card ids and bumping usn
for rev in self.src.db.execute("select * from revlog where cid = ?", scid):
rev = list(rev)
rev[1] = card[0]
rev[2] = self.dst.usn()
revlog.append(rev)
cnt += 1
# apply
self.dst.db.executemany(
"""
insert or ignore into cards values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)""",
cards,
)
self.dst.db.executemany(
"""
insert or ignore into revlog values (?,?,?,?,?,?,?,?,?)""",
revlog,
)
# Media
######################################################################
# note: this func only applies to imports of .anki2. for .apkg files, the
# apkg importer does the copying
def _importStaticMedia(self) -> None:
# Import any '_foo' prefixed media files regardless of whether
# they're used on notes or not
dir = self.src.media.dir()
if not os.path.exists(dir):
return
for fname in os.listdir(dir):
if fname.startswith("_") and not self.dst.media.have(fname):
self._writeDstMedia(fname, self._srcMediaData(fname))
def _mediaData(self, fname: str, dir: Optional[str] = None) -> bytes:
if not dir:
dir = self.src.media.dir()
path = os.path.join(dir, fname)
try:
with open(path, "rb") as f:
return f.read()
except OSError:
return b""
def _srcMediaData(self, fname: str) -> bytes:
"Data for FNAME in src collection."
return self._mediaData(fname, self.src.media.dir())
def _dstMediaData(self, fname: str) -> bytes:
"Data for FNAME in dst collection."
return self._mediaData(fname, self.dst.media.dir())
def _writeDstMedia(self, fname: str, data: bytes) -> None:
path = os.path.join(self.dst.media.dir(), unicodedata.normalize("NFC", fname))
try:
with open(path, "wb") as f:
f.write(data)
except OSError:
# the user likely used subdirectories
pass
def _mungeMedia(self, mid: int, fieldsStr: str) -> str:
fields = splitFields(fieldsStr)
def repl(match):
fname = match.group("fname")
srcData = self._srcMediaData(fname)
dstData = self._dstMediaData(fname)
if not srcData:
# file was not in source, ignore
return match.group(0)
# if model-local file exists from a previous import, use that
name, ext = os.path.splitext(fname)
lname = f"{name}_{mid}{ext}"
if self.dst.media.have(lname):
return match.group(0).replace(fname, lname)
# if missing or the same, pass unmodified
elif not dstData or srcData == dstData:
# need to copy?
if not dstData:
self._writeDstMedia(fname, srcData)
return match.group(0)
# exists but does not match, so we need to dedupe
self._writeDstMedia(lname, srcData)
return match.group(0).replace(fname, lname)
for i in range(len(fields)):
fields[i] = self.dst.media.transformNames(fields[i], repl)
return joinFields(fields)
# Post-import cleanup
######################################################################
def _postImport(self) -> None:
for did in list(self._decks.values()):
self.col.sched.maybeRandomizeDeck(did)
# make sure new position is correct
self.dst.conf["nextPos"] = (
self.dst.db.scalar("select max(due)+1 from cards where type = 0") or 0
)
self.dst.save()
| agpl-3.0 |
AlbertoPeon/invenio | modules/websubmit/lib/functions/Report_Number_Generation.py | 25 | 10904 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Description: function Report_Number_Generation
This function creates a reference for the submitted
document and saves it in the specified file.
Author: T.Baron"""
__revision__ = "$Id$"
import os
import re
import time
import fcntl
import errno
from invenio.config import CFG_WEBSUBMIT_COUNTERSDIR
from invenio.websubmit_config import InvenioWebSubmitFunctionError
from invenio.shellutils import mymkdir
def Report_Number_Generation(parameters, curdir, form, user_info=None):
"""
This function creates a reference for the submitted
document and saves it in the specified 'edsrn' file.
After generating the reference, also sets the global variable 'rn'
containing this reference.
Parameters:
* edsrn: name of the file in which the reference is saved
* autorngen: one of "A", "N", "Y"
"A": The reference is the submission number
"N": The reference is taken from a file [edsrn]
"Y": The reference is generated
* rnin: name of the file containing the category
* counterpath: path to the counter file note you can use:
<PA>yy</PA> to include year
<PA>categ</PA> to include category of the
submission
<PA>file[re]:name_of_file[regular expression to match]</PA> first line of file
generated by submission, matching [re]
<PA>file*[re]:name_of_file [regular expression to match]</PA> all the lines of
a file genereated during submission, matching [re]
separated by - (dash) char .
* rnformat: format for the generated reference. You can use:
<PA>yy</PA> to include year
<PA>categ</PA> to include category of the
submission
<PA>file[re]:name_of_file[regular expression to match]</PA> first line of file
generated by submission, matching [re]
<PA>file*[re]:name_of_file [regular expression to match]</PA> all the lines of
a file genereated during submission, matching [re]
separated by - (dash) char .
* yeargen: if "AUTO", current year, else the year is
extracted from the file [yeargen]
* nblength: the number of digits for the report
number. Eg: '3' for XXX-YYYY-025 or '4'
for XXX-YYYY-0025. If more are needed
(all available digits have been used),
the length is automatically
extended. Choose 1 to never have leading
zeros. Default length: 3.
* initialvalue: Initial value for the counter, 0 by default
"""
global doctype, access, act, dir, rn
# The program must automatically generate the report number
# What is the length of the generated report number?
nb_length = 3
if parameters.has_key('nblength') and parameters['nblength'].isdigit():
nb_length = int(parameters['nblength'])
# Generate Year
if parameters['autorngen'] == "Y":
if parameters['yeargen'] == "AUTO":
# Current year is used
yy = time.strftime("%Y")
else :
# If yeargen != auto then the contents of the file named 'yeargen' is used
# Assumes file uses DD-MM-YYYY format
fp = open("%s/%s" % (curdir, parameters['yeargen']), "r")
mydate = fp.read()
fp.close()
yy = re.sub("^......", "", mydate)
# Evaluate category - Category is read from the file named 'rnin
if os.path.isfile("%s/%s" % (curdir,parameters['rnin'])):
fp = open("%s/%s" % (curdir,parameters['rnin']), "r")
category = fp.read()
category = category.replace("\n", "")
else:
category = ""
def get_pa_tag_content(pa_content):
"""Get content for <PA>XXX</PA>.
@param pa_content: MatchObject for <PA>(.*)</PA>.
return: if pa_content=yy => 4 digits year
if pa_content=categ =>category
if pa_content=file[re]:a_file => first line of file a_file matching re
if pa_content=file*p[re]:a_file => all lines of file a_file, matching re,
separated by - (dash) char.
"""
pa_content=pa_content.groupdict()['content']
sep = '-'
out = ''
if pa_content=='yy':
out = yy
elif pa_content=='categ':
out = category
elif pa_content.startswith('file'):
filename = ""
with_regexp = 0
regexp = ""
if "[" in pa_content:
with_regexp = 1
split_index_start = pa_content.find("[")
split_index_stop = pa_content.rfind("]")
regexp = pa_content[split_index_start+1:split_index_stop]
filename = pa_content[split_index_stop+2:]#]:
else :
filename = pa_content.split(":")[1]
if os.path.exists(os.path.join(curdir, filename)):
fp = open(os.path.join(curdir, filename), 'r')
if pa_content[:5]=="file*":
out = sep.join(map(lambda x: re.split(regexp,x.strip())[-1], fp.readlines()))
else:
out = re.split(regexp, fp.readline().strip())[-1]
fp.close()
return out
counter_path = re.sub('<PA>(?P<content>[^<]*)</PA>',
get_pa_tag_content,
parameters['counterpath'])
counter_path = counter_path.replace(" ", "")
counter_path = counter_path.replace("\n", "")
rn_format = re.sub('<PA>(?P<content>[^<]*)</PA>',
get_pa_tag_content,
parameters['rnformat'])
# Check if the report number does not already exists
if os.path.exists("%s/%s" % (curdir, parameters['edsrn'])):
fp = open("%s/%s" % (curdir, parameters['edsrn']), "r")
oldrn = fp.read()
fp.close()
if oldrn != "" and not re.search("\?\?\?", oldrn):
rn = oldrn
return ""
# What is the initial value, if any, of the generated report number?
initial_value = 0
if parameters.has_key('initialvalue') and parameters['initialvalue'].isdigit():
initial_value = int(parameters['initialvalue'])-1
# create it
rn = create_reference(counter_path, rn_format, nb_length, initial_value)
rn = rn.replace("\n", "")
rn = rn.replace("\r", "")
rn = rn.replace("\015", "")
rn = rn.replace("\013", "")
rn = rn.replace("\012", "")
# The file edsrn is created in the submission directory, and it stores the report number
fp = open("%s/%s" % (curdir, parameters['edsrn']), "w")
fp.write(rn)
fp.close()
# The report number is just read from a specified file
elif parameters['autorngen'] == "N":
fp = open("%s/%s" % (curdir, parameters['edsrn']), "r")
rn = fp.read()
fp.close()
# Some documents are really annoying and insist on a totally different way of doing things
# This code is for documents which have the access number in the report
# number (instead of using a counter)
elif parameters['autorngen'] == "A":
rn = parameters['rnformat'].replace("<PA>access</PA>", access)
# The file accessno/edsrn is created, and it stores the report number
fp = open("%s/%s" % (curdir, parameters['edsrn']), "w")
fp.write(rn)
fp.close()
return ""
def create_reference(counter_path, ref_format, nb_length=3, initial_value=0):
"""From the counter-file for this document submission, get the next
reference number and create the reference.
"""
## Does the WebSubmit CFG_WEBSUBMIT_COUNTERSDIR directory exist? Create it if not.
full_path = os.path.split(os.path.join(CFG_WEBSUBMIT_COUNTERSDIR, counter_path))[0]
try:
mymkdir(full_path)
except:
## Unable to create the CFG_WEBSUBMIT_COUNTERSDIR Dir.
msg = "File System: Cannot create counters directory %s" % full_path
raise InvenioWebSubmitFunctionError(msg)
counter = os.path.join(CFG_WEBSUBMIT_COUNTERSDIR, counter_path)
## Now, if the counter-file itself doesn't exist, create it:
if not os.path.exists(counter):
fp = open(counter, "a+", 0)
try:
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, err:
## See: http://docs.python.org/library/fcntl.html#fcntl.lockf
## This might mean that some other process is already creating
## the file, so no need to initialized as well.
if err.errno not in (errno.EACCES, errno.EAGAIN):
raise
else:
try:
if not fp.read():
fp.write(str(initial_value))
finally:
fp.flush()
fcntl.lockf(fp, fcntl.LOCK_UN)
fp.close()
fp = open(counter, "r+", 0)
fcntl.lockf(fp, fcntl.LOCK_EX)
try:
id_value = fp.read()
if id_value.strip() == '':
id_value = initial_value
else:
id_value = int(id_value)
id_value += 1
fp.seek(0)
fp.write(str(id_value))
## create final value
reference = ("%s-%0" + str(nb_length) + "d") % (ref_format,id_value)
## Return the report number prelude with the id_value concatenated on at the end
return reference
finally:
fp.flush()
fcntl.lockf(fp, fcntl.LOCK_UN)
fp.close()
| gpl-2.0 |
xuegang/gpdb | gpMgmt/bin/gppylib/test/unit/test_unit_leaked_schema_dropper.py | 39 | 2184 | from mock import *
from gp_unittest import *
from gpcheckcat_modules.leaked_schema_dropper import LeakedSchemaDropper
class LeakedSchemaDropperTestCase(GpTestCase):
def setUp(self):
self.db_connection = Mock(spec=['query'])
two_leaked_schemas = Mock()
two_leaked_schemas.getresult.return_value = [
('fake_leak_1', 'something_else'),
('some"test"special_#;character--schema', 'something_else')
]
self.db_connection.query.return_value = two_leaked_schemas
self.subject = LeakedSchemaDropper()
def test_drop_leaked_schemas__returns_a_list_of_leaked_schemas(self):
self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), ['fake_leak_1', 'some"test"special_#;character--schema'])
def test_drop_leaked_schemas__when_there_are_no_leaked_schemas__returns_an_empty_list(self):
no_leaked_schemas = Mock()
no_leaked_schemas.getresult.return_value = []
self.db_connection.query.return_value = no_leaked_schemas
self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), [])
def test_drop_leaked_schemas__when_query_returns_null_schema__returns_an_empty_list(self):
null_leaked_schema = Mock()
null_leaked_schema.getresult.return_value = [(None, 'something_else')]
self.db_connection.query.return_value = null_leaked_schema
self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), [])
def test_drop_leaked_schemas__when_query_returns_null__returns_an_empty_list(self):
self.db_connection.query.return_value = None
self.assertEqual(self.subject.drop_leaked_schemas(self.db_connection), [])
def test_drop_leaked_schemas__drops_orphaned_and_leaked_schemas(self):
self.subject.drop_leaked_schemas(self.db_connection)
drop_query_expected_list = [call("DROP SCHEMA IF EXISTS \"fake_leak_1\" CASCADE;"),
call("DROP SCHEMA IF EXISTS \"some\"\"test\"\"special_#;character--schema\" CASCADE;")]
self.db_connection.query.assert_has_calls(drop_query_expected_list)
if __name__ == '__main__':
run_tests()
| apache-2.0 |
fangxingli/hue | desktop/core/ext-py/Django-1.6.10/django/db/models/fields/related.py | 47 | 72699 | from operator import attrgetter
from django.db import connection, connections, router
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject, PathInfo
from django.db.models.query import QuerySet
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_text
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry, cached_property
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(Field):
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class RenameRelatedObjectDescriptorMethods(RenameMethodsBase):
renamed_methods = (
('get_query_set', 'get_queryset', PendingDeprecationWarning),
('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning),
)
class SingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **db_hints):
db = router.db_for_read(self.related.model, **db_hints)
return self.related.model._base_manager.using(db)
def get_prefetch_queryset(self, instances):
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % self.related.field.name: instances}
qs = self.get_queryset(instance=instances[0]).filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.related.model.DoesNotExist("%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()))
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple([getattr(instance, field.attname) for field in self.related.field.foreign_related_fields])
if None in related_pk:
raise ValueError('Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name))
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **db_hints):
db = router.db_for_read(self.field.rel.to, **db_hints)
rel_mgr = self.field.rel.to._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if getattr(rel_mgr, 'use_for_related_fields', False):
return rel_mgr.using(db)
else:
return QuerySet(self.field.rel.to).using(db)
def get_prefetch_queryset(self, instances):
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
qs = self.get_queryset(instance=instances[0]).filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.related.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = dict(
(rh_field.attname, getattr(instance, lh_field.attname))
for lh_field, rh_field in self.field.related_fields)
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.related.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.field.rel.to.DoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name))
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.related.get_cache_name(), None)
# Set the value of the related field
for lh_field, rh_field in self.field.related_fields:
try:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
except AttributeError:
setattr(instance, lh_field.attname, None)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
superclass = self.related.model._default_manager.__class__
rel_field = self.related.field
rel_model = self.related.model
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters= {'%s__exact' % rel_field.name: instance}
self.model = rel_model
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
qs = super(RelatedManager, self).get_queryset().using(db).filter(**self.core_filters)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances):
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
db = self._db or router.db_for_read(self.model, instance=instances[0])
query = {'%s__in' % rel_field.name: instances}
qs = super(RelatedManager, self).get_queryset().using(db).filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return qs, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = rel_field.get_foreign_related_value(self.instance)
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
remove.alters_data = True
def clear(self):
self.update(**{rel_field.name: None})
clear.alters_data = True
return RelatedManager
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
# Used for single column related auto created models
self._fk_val = self.related_val[0]
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def _get_fk_val(self, obj, field_name):
"""
Returns the correct value for this relationship's foreign key. This
might be something else than pk value when to_field is used.
"""
fk = self.through._meta.get_field(field_name)
if fk.rel.field_name and fk.rel.field_name != fk.rel.to._meta.pk.attname:
attname = fk.rel.get_related_field().get_attname()
return fk.get_prep_lookup('exact', getattr(obj, attname))
else:
return obj.pk
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances):
instance = instances[0]
db = self._db or router.db_for_read(instance.__class__, instance=instance)
query = {'%s__in' % self.query_field_name: instances}
qs = super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[db]
qn = connection.ops.quote_name
qs = qs.extra(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields))
return (qs,
lambda result: tuple([getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields]),
lambda inst: tuple([getattr(inst, f.attname) for f in fk.foreign_related_fields]),
False,
self.prefetch_cache_name)
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
fk_val = self._get_fk_val(obj, target_field_name)
if fk_val is None:
raise ValueError('Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name))
new_ids.add(self._get_fk_val(obj, target_field_name))
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self._fk_val,
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self._fk_val,
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(self._get_fk_val(obj, target_field_name))
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self._fk_val,
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_field_name: the PK colname in join table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self.related_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
# clear() can change expected output of 'value' queryset, we force evaluation
# of queryset before clear; ticket #19816
value = tuple(value)
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name,
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignObject(RelatedField):
requires_unique_target = True
generate_reverse_relation = True
def __init__(self, to, from_fields, to_fields, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field_by_name(from_field_name)[0])
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field_by_name(to_field_name)[0])
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple([lhs_field for lhs_field, rhs_field in self.related_fields])
@property
def foreign_related_fields(self):
return tuple([rhs_field for lhs_field, rhs_field in self.related_fields])
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
ret.append(instance.pk)
else:
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple([(lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source])
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(qn, connection) method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, Constraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(
(Constraint(alias, targets[0].column, targets[0]), lookup_type, raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for index, source in enumerate(sources):
root_constraint.add(
(Constraint(alias, targets[index].column, sources[index]), lookup_type,
value[index]), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
root_constraint.add(
(Constraint(alias, targets[0].column, sources[0]), lookup_type, value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for index, target in enumerate(targets):
value_constraint.add(
(Constraint(alias, target.column, sources[index]), 'exact', value[index]),
AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple([field.attname for field in self.local_related_fields])
def get_defaults(self):
return tuple([field.get_default() for field in self.local_related_fields])
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'model': self.rel.to._meta.verbose_name, 'pk': value},
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.related_field.name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value == None:
return None
else:
return self.related_field.get_db_prep_save(value,
connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint),
to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint)
})
class ManyToManyField(RelatedField):
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
# Python 2.6 and earlier require dictionary keys to be of str type,
# not unicode and class names must be ASCII (in Python 2.x), so we
# forcibly coerce it here (breaks early if there's a problem).
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
db_constraint=db_constraint,
)
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0]
linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0]
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
| apache-2.0 |
theworldbright/mainsite | aspc/events/models.py | 2 | 7699 | from django.db import models
from aspc.events.backends.facebook import FacebookBackend
from aspc.events.backends.collegiatelink import CollegiateLinkBackend
from django.template.defaultfilters import truncatewords
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime, timedelta
import time
import logging
import json
from aspc.events.exceptions import InvalidEventException, EventAlreadyExistsException
logger = logging.getLogger(__name__)
CHARFIELD_MAX_LENGTH = 255
class Event(models.Model):
name = models.CharField(max_length=CHARFIELD_MAX_LENGTH)
start = models.DateTimeField()
end = models.DateTimeField(null=True, blank=True)
location = models.CharField(max_length=CHARFIELD_MAX_LENGTH)
description = models.TextField()
host = models.CharField(max_length=CHARFIELD_MAX_LENGTH)
url = models.CharField(max_length=CHARFIELD_MAX_LENGTH, null=True, blank=True)
status = models.CharField(max_length=CHARFIELD_MAX_LENGTH, choices=(('pending', 'Pending'), ('approved', 'Approved'), ('denied', 'Denied')), default='pending')
def __unicode__(self):
return self.name
def get_status_display_colored(self):
return '<div class="eventstatus {0}">{1}</span>'.format(self.status, self.get_status_display())
get_status_display_colored.allow_tags = True
get_status_display_colored.admin_order_field = 'status'
get_status_display_colored.short_description = "Status"
class Meta:
ordering = ('start', 'name', 'end')
verbose_name_plural = "Events"
class FacebookEventPage(models.Model):
name = models.CharField(max_length=CHARFIELD_MAX_LENGTH)
url = models.CharField(max_length=CHARFIELD_MAX_LENGTH)
page_id = models.CharField(max_length=CHARFIELD_MAX_LENGTH)
def __unicode__(self):
return self.name
class FacebookEventPageController(object):
def __unicode__(self):
return self.name
@staticmethod
def new_facebook_event_page(data):
page_data = FacebookBackend().get_page_data(data['page_url'])
# Updates an existing event page or adds a new one to the database
# get_or_create returns an object and a boolean value specifying whether a new object was created or not
event_page, is_new = FacebookEventPage.objects.get_or_create(name=page_data['name'])
for key, value in page_data.items():
setattr(event_page, key, value)
event_page.save()
# TODO:
# Don't scrape on creation of a new page, because the user doesn't want to wait around for that
# Instead, schedule a task to scrape from the new page in the background
# FacebookEventPageController().scrape_page_events(event_page)
return event_page
@staticmethod
def scrape_page_events(event_page):
event_page_event_ids = FacebookBackend().get_page_event_ids(event_page.page_id)
for event_id in event_page_event_ids:
# Mimics raw data being passed via GET so we can reuse the new_event method
normalized_event_data = {
'event_source': 'facebook',
'event_url': 'http://facebook.com/events/' + event_id
}
try:
EventController().new_event(normalized_event_data)
except (InvalidEventException, EventAlreadyExistsException):
pass # No need to be concerned if a page has malformed or past events... not our problem, we just won't import them
@staticmethod
def facebook_event_pages():
return FacebookEventPage.objects.all()
class EventController(object):
def __unicode__(self):
return self.name
@staticmethod
def new_event(data):
event_data = {}
if data['event_source'] == 'facebook':
event_data = FacebookBackend().get_event_data(data['event_url'])
# Checks if an event with the same name or URL already exists
if Event.objects.filter(url=event_data.get('url', '')):
raise EventAlreadyExistsException('Event with name "' + event_data['name'] + '" has already been submitted.')
elif data['event_source'] == 'manual':
event_data = data
# Checks if an event with the same name or URL already exists
if Event.objects.filter(name=event_data['name']):
raise EventAlreadyExistsException('Event with name "' + event_data['name'] + '" already exists.')
elif Event.objects.exclude(url='').filter(url=event_data.get('url', '')):
raise EventAlreadyExistsException('Event with external URL "' + event_data['url'] + '" already exists.')
event_data['start'] = datetime.strptime(event_data['start'], '%Y-%m-%dT%H:%M')
if 'end' in event_data and event_data['end'] != '':
event_data['end'] = datetime.strptime(event_data['end'], '%Y-%m-%dT%H:%M')
del event_data['event_source']
else: # If corrupted data or erroneous POST request, do nothing
return False
# Updates an existing event or adds a new one to the database
# get_or_create returns an object and a boolean value specifying whether a new object was created or not
event, is_new = Event.objects.get_or_create(name=event_data['name'], defaults={'start': datetime.today(), 'status': 'pending'})
for key, value in event_data.items():
setattr(event, key, value)
event.save()
return event
@staticmethod
def fetch_collegiatelink_events():
events_data = CollegiateLinkBackend().get_events_data()
for event_data in events_data:
# Updates an existing event or adds a new one to the database
# get_or_create returns an object and a boolean value specifying whether a new object was created or not
event, is_new = Event.objects.get_or_create(name=event_data['name'], defaults={'start': datetime.today(), 'status': 'pending'})
for key, value in event_data.items():
setattr(event, key, value)
event.save()
return events_data
# GET methods invoked by views
@staticmethod
def all_events():
return Event.objects.all()
@staticmethod
def approved_events():
return Event.objects.all().filter(status='approved')
@staticmethod
def event_with_id(event_id):
try:
event = (EventController.approved_events()).get(id=event_id)
except ObjectDoesNotExist:
return None
else:
return event
@staticmethod
def todays_events():
try:
event = (EventController.approved_events()).filter(start__year=datetime.today().year, start__month=datetime.today().month, start__day=datetime.today().day)
except ObjectDoesNotExist:
return None
else:
return event
@staticmethod
def weeks_events():
try:
start_week = datetime.today() - timedelta(days=datetime.today().weekday() + 1)
event = (EventController.approved_events()).filter(start__range=[start_week, start_week + timedelta(7)])
except ObjectDoesNotExist:
return None
else:
return event
class EventHelper(object):
def __unicode__(self):
return self.name
@staticmethod
def earliest_event_time(event_list):
start_times = [event.start.time() for event in event_list]
if bool(len(start_times)):
return min(start_times)
else:
return None
@staticmethod
def latest_event_time(event_list):
start_times = [event.start.time() for event in event_list]
if bool(len(start_times)):
return max(start_times)
else:
return None
@staticmethod
def events_to_json(event_list):
parsed_events = [];
for event in event_list:
s = '{'
s += 'title: ' + json.dumps(event.name) + ', '
s += 'start: ' + str(time.mktime(event.start.timetuple())) + ', '
s += 'end: ' + (str(time.mktime(event.end.timetuple())) if event.end else 'null') + ', '
s += 'location: ' + (json.dumps(event.location) if event.location else 'null') + ', '
s += 'description: ' + (json.dumps(event.description) if event.description else 'null') + ', '
s += 'url: "/events/event/' + str(event.id) + '", '
s += 'allDay: false'
s += '}'
parsed_events.append(s)
return '[' + ', '.join(parsed_events) + ']' | mit |
Sing-Li/go-buildpack | builds/runtimes/python-2.7.6/lib/python2.7/test/test_generators.py | 72 | 50768 | tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print i
1
2
>>> g = f()
>>> g.next()
1
>>> g.next()
2
"Falling off the end" stops the generator:
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> g.next() # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(g2())
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print "creator", r.next()
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print "caller", i
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = me.next()
... yield i
>>> me = g()
>>> me.next()
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print list(f1())
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(f2())
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> k.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> k.next() # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print list(f())
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = range(1, 5)
>>> for k in range(len(seq) + 2):
... print "%d-combs of %s:" % (k, seq)
... for c in gcomb(seq, k):
... print " ", c
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<type 'function'>
>>> i = g()
>>> type(i)
<type 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'next', 'send', 'throw']
>>> from test.test_support import HAVE_DOCSTRINGS
>>> print(i.next.__doc__ if HAVE_DOCSTRINGS else 'x.next() -> the next value, or raise StopIteration')
x.next() -> the next value, or raise StopIteration
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<type 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
TypeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> me.next()
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return self.generator.next()
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.WichmannHill(42)
>>> while 1:
... for s in sets:
... print "%s->%s" % (s, s.find()),
... print
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print "merged", s1, "into", s2
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged D into G
A->A B->B C->C D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged C into F
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged L into A
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->A M->M
merged H into E
A->A B->B C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged B into E
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged J into G
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->G K->K L->A M->M
merged E into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->M
merged M into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->G
merged I into K
A->A B->G C->F D->G E->G F->F G->G H->G I->K J->G K->K L->A M->G
merged K into A
A->A B->G C->F D->G E->G F->F G->G H->G I->A J->G K->A L->A M->G
merged F into A
A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G
merged A into G
A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [g.next() for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = ints.next()
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = g.next()
... nh = h.next()
... while 1:
... if ng < nh:
... yield ng
... ng = g.next()
... elif ng > nh:
... yield nh
... nh = h.next()
... else:
... yield ng
... ng = g.next()
... nh = h.next()
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print firstn(result, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.next
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print [m235[j] for j in range(15*i, 15*(i+1))]
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def tail(g):
... g.next() # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print firstn(it, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def _fib():
... yield 1
... yield 2
... fibTail.next() # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
>>> def f():
... return 22
... yield 1
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[0]>, line 3)
>>> def f():
... yield 1
... return 22
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[1]>, line 3)
"return None" is not the same as "return" in a generator:
>>> def f():
... yield 1
... return None
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[2]>, line 3)
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<type 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<type 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... lambda x: x # shouldn't trigger here
... return # or here
... def f(i):
... return 2*i # or here
... if 0:
... return 3 # but *this* sucks (line 8)
... if 0:
... yield 2 # because it's a generator (line 10)
Traceback (most recent call last):
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[24]>, line 10)
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print g.next()
0
>>> print g.next()
1
>>> print g.next()
2
>>> print g.next()
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.func_code
True
>>> g.next()
5
>>> g.next()
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.func_code
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().next
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1L << j) | # column ordinal
(1L << (n + i-j + n-1)) | # NW-SE ordinal
(1L << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print sep
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print "|" + "|".join(squares) + "|"
print sep
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print sep
for i in range(m):
row = squares[i]
print "|" + "|".join(row) + "|"
print sep
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print c
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print n, len(all), all[0] == [0] * n, all[-1] == [1] * n
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print count, "solutions in all."
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print (yield 1)
... yield 2
>>> g = f()
>>> g.next()
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<type 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> c.next()
>>> print seq
[]
>>> c.send(10)
>>> print seq
[10]
>>> c.send(10)
>>> print seq
[10, 20]
>>> c.send(10)
>>> print seq
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[21]>", line 1
SyntaxError: 'yield' outside function
>>> def f(): return lambda x=(yield): 1
Traceback (most recent call last):
...
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.coroutine[22]>, line 1)
>>> def f(): x = yield = y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[23]>", line 1
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[24]>", line 1
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[25]>", line 1
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print (yield)
... except ValueError,v:
... print "caught ValueError (%s)" % (v),
>>> import sys
>>> g = f()
>>> g.next()
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print g.gi_frame
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
>>> f().throw("abc") # throw on just-opened generator
Traceback (most recent call last):
...
TypeError: exceptions must be classes, or instances, not str
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print "exiting"
>>> g = f()
>>> g.next()
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> g.next()
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print "exiting"
>>> g = f()
>>> g.next()
>>> del g
exiting
>>> class context(object):
... def __enter__(self): pass
... def __exit__(self, *args): print 'exiting'
>>> def f():
... with context():
... yield
>>> g = f()
>>> g.next()
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception: print 'except'
... finally: print 'finally'
>>> g = f()
>>> g.next()
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, StringIO
>>> old, sys.stderr = sys.stderr, StringIO.StringIO()
>>> g = f()
>>> g.next()
>>> del g
>>> sys.stderr.getvalue().startswith(
... "Exception RuntimeError: 'generator ignored GeneratorExit' in "
... )
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<type 'generator'>
>>> def f(): x = yield
>>> type(f())
<type 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<type 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<type 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<type 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def next(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = it.next()
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, StringIO
>>> old = sys.stderr
>>> try:
... sys.stderr = StringIO.StringIO()
... class Leaker:
... def __del__(self):
... raise RuntimeError
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... err.startswith(
... "Exception RuntimeError: RuntimeError() in <"
... )
... err.endswith("> ignored")
... len(err.splitlines())
... finally:
... sys.stderr = old
True
True
1
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import test_support, test_generators
test_support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| mit |
kurli/blink-crosswalk | Tools/Scripts/webkitpy/layout_tests/port/mac.py | 1 | 4978 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Chromium Mac implementation of the Port interface."""
import logging
import signal
from webkitpy.layout_tests.port import base
_log = logging.getLogger(__name__)
class MacPort(base.Port):
SUPPORTED_VERSIONS = ('snowleopard', 'lion', 'retina', 'mountainlion', 'mavericks')
port_name = 'mac'
# FIXME: We treat Retina (High-DPI) devices as if they are running
# a different operating system version. This is lame and should be fixed.
# Note that the retina versions fallback to the non-retina versions and so no
# baselines are shared between retina versions; this keeps the fallback graph as a tree
# and maximizes the number of baselines we can share that way.
# We also currently only support Retina on 10.8; we need to either upgrade to 10.9 or support both.
FALLBACK_PATHS = {}
FALLBACK_PATHS['mavericks'] = ['mac']
FALLBACK_PATHS['mountainlion'] = ['mac-mountainlion'] + FALLBACK_PATHS['mavericks']
FALLBACK_PATHS['retina'] = ['mac-retina'] + FALLBACK_PATHS['mountainlion']
FALLBACK_PATHS['lion'] = ['mac-lion'] + FALLBACK_PATHS['mountainlion']
FALLBACK_PATHS['snowleopard'] = ['mac-snowleopard'] + FALLBACK_PATHS['lion']
DEFAULT_BUILD_DIRECTORIES = ('xcodebuild', 'out')
CONTENT_SHELL_NAME = 'Content Shell'
BUILD_REQUIREMENTS_URL = 'https://code.google.com/p/chromium/wiki/MacBuildInstructions'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name.endswith('mac'):
if host.platform.os_version in ('future',):
version = 'mavericks'
else:
version = host.platform.os_version
if host.platform.is_highdpi():
version = 'retina'
return port_name + '-' + version
return port_name
def __init__(self, host, port_name, **kwargs):
super(MacPort, self).__init__(host, port_name, **kwargs)
self._version = port_name[port_name.index('mac-') + len('mac-'):]
assert self._version in self.SUPPORTED_VERSIONS
def _modules_to_search_for_symbols(self):
return [self._build_path('ffmpegsumo.so')]
def check_build(self, needs_http, printer):
result = super(MacPort, self).check_build(needs_http, printer)
if result:
_log.error('For complete Mac build requirements, please see:')
_log.error('')
_log.error(' http://code.google.com/p/chromium/wiki/MacBuildInstructions')
return result
def operating_system(self):
return 'mac'
#
# PROTECTED METHODS
#
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install from MacPorts or elsewhere'
def path_to_apache(self):
return '/usr/sbin/httpd'
def path_to_apache_config_file(self):
config_file_name = 'apache2-httpd-' + self._apache_version() + '.conf'
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
def _path_to_driver(self, configuration=None):
# FIXME: make |configuration| happy with case-sensitive file systems.
return self._build_path_with_configuration(configuration, self.driver_name() + '.app', 'Contents', 'MacOS', self.driver_name())
def _path_to_helper(self):
binary_name = 'layout_test_helper'
return self._build_path(binary_name)
def _path_to_wdiff(self):
return 'wdiff'
| bsd-3-clause |
richardcs/ansible | lib/ansible/modules/windows/win_iis_webbinding.py | 7 | 3866 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Noah Sparks <nsparks@outlook.com>
# Copyright: (c) 2017, Henrik Wallström <henrik@wallstroms.nu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_webbinding
version_added: "2.0"
short_description: Configures a IIS Web site binding
description:
- Creates, removes and configures a binding to an existing IIS Web site.
options:
name:
description:
- Names of web site.
required: yes
aliases: [ website ]
state:
description:
- State of the binding.
choices: [ absent, present ]
default: present
port:
description:
- The port to bind to / use for the new site.
default: 80
ip:
description:
- The IP address to bind to / use for the new site.
default: '*'
host_header:
description:
- The host header to bind to / use for the new site.
- If you are creating/removing a catch-all binding, omit this parameter rather than defining it as '*'.
protocol:
description:
- The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP).
default: http
certificate_hash:
description:
- Certificate hash (thumbprint) for the SSL binding. The certificate hash is the unique identifier for the certificate.
certificate_store_name:
description:
- Name of the certificate store where the certificate for the binding is located.
default: my
ssl_flags:
description:
- This parameter is only valid on Server 2012 and newer.
- Primarily used for enabling and disabling server name indication (SNI).
- Set to c(0) to disable SNI.
- Set to c(1) to enable SNI.
version_added: "2.5"
author:
- Noah Sparks (@nwsparks)
- Henrik Wallström (@henrikwallstrom)
'''
EXAMPLES = r'''
- name: Add a HTTP binding on port 9090
win_iis_webbinding:
name: Default Web Site
port: 9090
state: present
- name: Remove the HTTP binding on port 9090
win_iis_webbinding:
name: Default Web Site
port: 9090
state: absent
- name: Remove the default http binding
win_iis_webbinding:
name: Default Web Site
port: 80
ip: '*'
state: absent
- name: Add a HTTPS binding
win_iis_webbinding:
name: Default Web Site
protocol: https
port: 443
ip: 127.0.0.1
certificate_hash: B0D0FA8408FC67B230338FCA584D03792DA73F4C
state: present
- name: Add a HTTPS binding with host header and SNI enabled
win_iis_webbinding:
name: Default Web Site
protocol: https
port: 443
host_header: test.com
ssl_flags: 1
certificate_hash: D1A3AF8988FD32D1A3AF8988FD323792DA73F4C
state: present
'''
RETURN = r'''
website_state:
description:
- The state of the website being targetted
- Can be helpful in case you accidentally cause a binding collision
which can result in the targetted site being stopped
returned: always
type: string
sample: "Started"
version_added: "2.5"
operation_type:
description:
- The type of operation performed
- Can be removed, updated, matched, or added
returned: on success
type: string
sample: "removed"
version_added: "2.5"
binding_info:
description:
- Information on the binding being manipulated
returned: on success
type: dictionary
sample: |-
"binding_info": {
"bindingInformation": "127.0.0.1:443:",
"certificateHash": "FF3910CE089397F1B5A77EB7BAFDD8F44CDE77DD",
"certificateStoreName": "MY",
"hostheader": "",
"ip": "127.0.0.1",
"port": 443,
"protocol": "https",
"sslFlags": "not supported"
}
version_added: "2.5"
'''
| gpl-3.0 |
OnroerendErfgoed/atramhasis | atramhasis/scripts/initializedb.py | 1 | 3979 | import os
import sys
from pyramid.paster import get_appsettings
from pyramid.paster import setup_logging
from pyramid.scripts.common import parse_vars
from skosprovider_sqlalchemy.models import ConceptScheme
from skosprovider_sqlalchemy.models import Label
from skosprovider_sqlalchemy.utils import import_provider
from sqlalchemy import engine_from_config
from sqlalchemy.orm import sessionmaker
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
from fixtures.data import trees, geo
from fixtures.styles_and_cultures import styles_and_cultures
from fixtures.materials import materials
from fixtures.eventtypes import eventtypes
from fixtures.heritagetypes import heritagetypes
from fixtures.periods import periods
from fixtures.species import species
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
db_session = sessionmaker(bind=engine)()
import_provider(
trees,
ConceptScheme(
id=1,
uri='urn:x-skosprovider:trees',
labels=[
Label('Verschillende soorten bomen', u'prefLabel', u'nl'),
Label('Different types of trees', u'prefLabel', u'en')
]
),
db_session
)
import_provider(
geo,
ConceptScheme(
id=2,
uri='urn:x-skosprovider:geo',
labels=[
Label('Geografie', u'prefLabel', u'nl'),
Label('Geography', u'prefLabel', u'en')
]
),
db_session
)
import_provider(
styles_and_cultures,
ConceptScheme(
id=3,
uri='https://id.erfgoed.net/thesauri/stijlen_en_culturen',
labels=[
Label('Stijlen en Culturen', u'prefLabel', u'nl'),
Label('Styles and Cultures', u'prefLabel', u'en')
]
),
db_session
)
import_provider(
materials,
ConceptScheme(
id=4,
uri='https://id.erfgoed.net/thesauri/materialen',
labels=[
Label('Materialen', u'prefLabel', u'nl'),
Label('Materials', u'prefLabel', u'en')
]
),
db_session
)
import_provider(
eventtypes,
ConceptScheme(
id=5,
uri='https://id.erfgoed.net/thesauri/gebeurtenistypes',
labels=[
Label('Gebeurtenistypes', u'prefLabel', u'nl'),
Label('Event types', u'prefLabel', u'en')
]
),
db_session
)
import_provider(
heritagetypes,
ConceptScheme(
id=6,
uri='https://id.erfgoed.net/thesauri/erfgoedtypes',
labels=[
Label('Erfgoedtypes', u'prefLabel', u'nl'),
Label('Heritage types', u'prefLabel', u'en')
]
),
db_session
)
import_provider(
periods,
ConceptScheme(
id=7,
uri='https://id.erfgoed.net/thesauri/dateringen',
labels=[
Label('Dateringen', u'prefLabel', u'nl'),
Label('Periods', u'prefLabel', u'en')
]
),
db_session
)
import_provider(
species,
ConceptScheme(
id=8,
uri='https://id.erfgoed.net/thesauri/soorten',
labels=[
Label('Soorten', u'prefLabel', u'nl'),
Label('Species', u'prefLabel', u'en')
]
),
db_session
)
db_session.commit()
db_session.close()
print('--atramhasis-db-initialized--')
| gpl-3.0 |
Quikling/gpdb | gpMgmt/bin/gprestore_filter.py | 4 | 20742 | #!/usr/bin/env python
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.operations.backup_utils import split_fqn, checkAndRemoveEnclosingDoubleQuote, removeEscapingDoubleQuoteInSQLString,\
escapeDoubleQuoteInSQLString
import re
import os
import sys
search_path_expr = 'SET search_path = '
set_start = 'S'
set_assignment = '='
len_search_path_expr = len(search_path_expr)
copy_expr = 'COPY '
copy_start = 'C'
copy_expr_end = 'FROM stdin;\n'
len_copy_expr = len(copy_expr)
copy_end_expr = '\\.'
copy_end_start = '\\'
set_expr = 'SET '
drop_start = 'D'
drop_expr = 'DROP '
drop_table_expr = 'DROP TABLE '
drop_external_table_expr = 'DROP EXTERNAL TABLE '
alter_table_only_expr = 'ALTER TABLE ONLY '
alter_table_expr = 'ALTER TABLE '
comment_start_expr = '--'
comment_expr = '-- Name: '
type_expr = '; Type: '
schema_expr = '; Schema: '
owner_expr = '; Owner: '
comment_data_expr_a = '-- Data: '
comment_data_expr_b = '-- Data for Name: '
begin_start = 'B'
begin_expr = 'BEGIN'
end_start = 'E'
end_expr = 'END'
def get_table_info(line, cur_comment_expr):
"""
It's complex to split when table name/schema name/user name/ tablespace name
contains full context of one of others', which is very unlikely, but in
case it happens, return None.
Since we only care about table name, type, and schema name, strip the input
is safe here.
line: contains the true (un-escaped) schema name, table name, and user name.
"""
temp = line.strip('\n')
type_start = find_all_expr_start(temp, type_expr)
schema_start = find_all_expr_start(temp, schema_expr)
owner_start = find_all_expr_start(temp, owner_expr)
if len(type_start) != 1 or len(schema_start) != 1 or len(owner_start) != 1:
return (None, None, None)
name = temp[len(cur_comment_expr) : type_start[0]]
data_type = temp[type_start[0] + len(type_expr) : schema_start[0]]
schema = temp[schema_start[0] + len(schema_expr) : owner_start[0]]
return (name, data_type, schema)
def get_table_from_alter_table(line, alter_expr):
"""
Parse the content and return full qualified schema.table from the line if
schema provided, else return the table name.
Fact: if schema name or table name contains any special chars, each should be
double quoted already in dump file.
"""
dot_separator_idx = line.find('.')
last_double_quote_idx = line.rfind('"')
has_schema_table_fmt = True if dot_separator_idx != -1 else False
has_special_chars = True if last_double_quote_idx != -1 else False
if not has_schema_table_fmt and not has_special_chars:
return line[len(alter_expr):].split()[0]
elif has_schema_table_fmt and not has_special_chars:
full_table_name = line[len(alter_expr):].split()[0]
_, table = split_fqn(full_table_name)
return table
elif not has_schema_table_fmt and has_special_chars:
return line[len(alter_expr) + 1 : last_double_quote_idx + 1]
else:
if dot_separator_idx < last_double_quote_idx:
# table name is double quoted
full_table_name = line[len(alter_expr) : last_double_quote_idx + 1]
else:
# only schema name double quoted
ending_space_idx = line.find(' ', dot_separator_idx)
full_table_name = line[len(alter_expr) : ending_space_idx]
_, table = split_fqn(full_table_name)
return table
def find_all_expr_start(line, expr):
"""
Find all overlapping matches
"""
return [m.start() for m in re.finditer('(?=%s)' % expr, line)]
class ParserState:
def __init__(self):
self.output = False # to help decide whether or not to filter out
self.function_ddl = False # to help exclude SET clause within a function's ddl statement
self.further_investigation_required = False
# we need to set search_path to true after every ddl change due to the
# fact that the schema "set search_path" may change on the next ddl command
self.cast_func_schema = None
self.change_cast_func_schema = False
self.in_block = False
self.line_buff = ''
self.schema = None
def _handle_begin_end_block(state, line, _):
if (line[0] == begin_start) and line.startswith(begin_expr) and not state.function_ddl:
state.in_block = True
state.output = True
elif (line[0] == end_start) and line.startswith(end_expr) and not state.function_ddl:
state.in_block = False
state.output = True
elif state.in_block:
state.output = True
else:
return False, state, line
return True, state, line
def _handle_change_schema(schema_to_replace, change_schema, line):
if change_schema and len(change_schema) > 0:
# change schema name can contain special chars including white space, double quote that.
# if original schema name is already quoted, replaced it with quoted change schema name
quoted_schema = '"' + schema_to_replace + '"'
if quoted_schema in line:
line = line.replace(quoted_schema, escapeDoubleQuoteInSQLString(change_schema))
else:
line = line.replace(schema_to_replace, escapeDoubleQuoteInSQLString(change_schema))
return line
def _handle_set_statement(state, line, arguments):
schemas_in_table_file = arguments.schemas
change_schema = arguments.change_schema_name
schemas_in_schema_file = arguments.schemas_in_schema_file
if (line[0] == set_start) and line.startswith(search_path_expr):
# NOTE: The goal is to output the correct mapping to the search path
# for the schema
state.further_investigation_required = False
# schema in set state.search_path line is already escaped in dump file
state.schema = extract_schema(line)
schema_wo_escaping = removeEscapingDoubleQuoteInSQLString(state.schema, False)
if state.schema == "pg_catalog":
state.output = True
elif (schemas_in_table_file and schema_wo_escaping in schemas_in_table_file or
schemas_in_schema_file and schema_wo_escaping in schemas_in_schema_file):
line = _handle_change_schema(state.schema, change_schema, line)
state.cast_func_schema = state.schema # Save the schema in case we need to replace a cast's function's schema later
state.output = True
else:
state.output = False
return True, state, line
return False, state, line
def _handle_set_assignment(state, line, _):
# set_assignment must be in the line to filter out dump line: SET SUBPARTITION TEMPLATE
if (line[0] == set_start) and line.startswith(set_expr) and set_assignment in line and not state.function_ddl:
state.output = True
return True, state, line
return False, state, line
def _handle_expressions_in_comments(state, line, arguments):
schemas_in_table_file = arguments.schemas
tables_in_table_file = arguments.tables
schemas_in_schema_file = arguments.schemas_in_schema_file
if line[:2] == comment_start_expr and line.startswith(comment_expr):
# Parse the line using get_table_info for SCHEMA relation type as well,
# if type is SCHEMA, then the value of name returned is schema's name, and returned schema is represented by '-'
name, data_type, state.schema = get_table_info(line, comment_expr)
state.output = False
state.function_ddl = False
if data_type in ['SCHEMA']:
# Make sure that schemas are created before restoring the desired tables.
state.output = check_valid_schema(name, schemas_in_table_file, schemas_in_schema_file)
elif data_type in ['TABLE', 'EXTERNAL TABLE', 'VIEW', 'SEQUENCE']:
state.further_investigation_required = False
state.output = check_valid_relname(state.schema, name, tables_in_table_file, schemas_in_schema_file)
elif data_type in ['CONSTRAINT']:
state.further_investigation_required = True
if check_valid_schema(state.schema, schemas_in_table_file, schemas_in_schema_file):
state.line_buff = line
elif data_type in ['ACL']:
relname_valid = check_valid_relname(state.schema, name, tables_in_table_file, schemas_in_schema_file)
schema_valid = False
if state.schema == "-":
schema_valid = check_valid_schema(name, schemas_in_table_file, schemas_in_schema_file)
state.output = relname_valid or schema_valid
elif data_type in ['FUNCTION']:
state.function_ddl = True
state.output = check_valid_schema(state.schema, schemas_in_table_file, schemas_in_schema_file)
elif data_type in ['CAST', 'PROCEDURAL LANGUAGE']: # Restored to pg_catalog, so always filtered in
state.output = True
state.change_cast_func_schema = True # When changing schemas, we need to ensure that functions used in casts reference the new schema
return True, state, line
return False, state, line
def _handle_data_expressions_in_comments(state, line, arguments):
tables_in_table_file = arguments.tables
schemas_in_schema_file = arguments.schemas_in_schema_file
if (line[:2] == comment_start_expr) and (line.startswith(comment_data_expr_a) or line.startswith(comment_data_expr_b)):
state.further_investigation_required = False
if line.startswith(comment_data_expr_a):
name, data_type, state.schema = get_table_info(line, comment_data_expr_a)
else:
name, data_type, state.schema = get_table_info(line, comment_data_expr_b)
if data_type == 'TABLE DATA':
state.output = check_valid_relname(state.schema, name, tables_in_table_file, schemas_in_schema_file)
else:
state.output = False
return True, state, line
return False, state, line
def _handle_further_investigation(state, line, arguments):
tables_in_table_file = arguments.tables
schemas_in_schema_file = arguments.schemas_in_schema_file
if state.further_investigation_required:
if line.startswith(alter_table_expr):
state.further_investigation_required = False
# Get the full qualified table name with the correct split
if line.startswith(alter_table_only_expr):
tablename = get_table_from_alter_table(line, alter_table_only_expr)
else:
tablename = get_table_from_alter_table(line, alter_table_expr)
tablename = checkAndRemoveEnclosingDoubleQuote(tablename)
tablename = removeEscapingDoubleQuoteInSQLString(tablename, False)
state.output = check_valid_relname(state.schema, tablename, tables_in_table_file, schemas_in_schema_file)
if state.output:
if state.line_buff:
line = state.line_buff + line
state.line_buff = ''
return True, state, line
return False, state, line
def _handle_cast_function_schema(state, line, arguments):
change_schema = arguments.change_schema_name
if state.change_cast_func_schema:
if "CREATE CAST" in line and "WITH FUNCTION" in line:
state.change_cast_func_schema = False
line = _handle_change_schema(state.cast_func_schema, change_schema, line)
state.cast_func_schema = None
return True, state, line
return False, state, line
def process_line(state, line, arguments):
# NOTE: We are checking the first character before actually verifying
# the line with "startswith" due to the performance gain.
fns = [ _handle_begin_end_block,
_handle_set_statement,
_handle_set_assignment,
_handle_expressions_in_comments,
_handle_data_expressions_in_comments,
_handle_further_investigation,
_handle_cast_function_schema ]
for fn in fns:
result, state , line= fn(state, line, arguments)
if result:
return state, line
state.further_investigation_required = False
return state, line
def process_schema(arguments, fdin, fdout):
state = ParserState()
for line in fdin:
state, output_line = process_line(state, line, arguments)
if state.output:
fdout.write(output_line)
def check_valid_schema(schema, schemas_in_table_file, schemas_in_schema_file=None):
if ((schemas_in_schema_file and schema in schemas_in_schema_file) or
(schemas_in_table_file and schema in schemas_in_table_file)):
return True
return False
def check_valid_relname(schema, relname, tables_in_table_file, schemas_in_schema_file=None):
"""
check if relation is valid (can be from schema level restore)
"""
if ((schemas_in_schema_file and schema in schemas_in_schema_file) or
(tables_in_table_file and (schema, relname) in tables_in_table_file)):
return True
return False
def get_table_schema_set(filename):
"""
filename: file with true schema and table name (none escaped), don't strip white space
on schema and table name in case it's part of the name
"""
schemas_in_table_file = set()
tables_in_table_file = set()
with open(filename) as fd:
contents = fd.read()
tables = contents.splitlines()
for t in tables:
schema, table = split_fqn(t)
tables_in_table_file.add((schema, table))
schemas_in_table_file.add(schema)
return (schemas_in_table_file, tables_in_table_file)
def extract_schema(line):
"""
Instead of searching ',' in forwarding way, search ', pg_catalog;'
reversely, in case schema name contains comma.
Remove enclosing double quotes only, in case quote is part of the
schema name
"""
temp = line[len_search_path_expr:]
idx = temp.rfind(", pg_catalog;")
if idx == -1:
if "SET search_path = pg_catalog;" == line.strip(): # search_path may just be pg_catalog, as in the case of CASTs
schema = "pg_catalog"
else:
raise Exception('Failed to extract schema name from line %s' % line)
else:
schema = temp[:idx]
return checkAndRemoveEnclosingDoubleQuote(schema)
def extract_table(line):
"""
Instead of looking for table name ending index based on
empty space, find it in the reverse way based on the ' ('
whereas the column definition starts.
Removing the enclosing double quote only, don't do strip('"') in case table name has double quote
"""
temp = line[len_copy_expr:]
idx = temp.rfind(" (")
if idx != -1:
table = temp[:idx]
return checkAndRemoveEnclosingDoubleQuote(table)
idx = temp.rfind(" FROM")
if idx != -1:
table = temp[:idx]
return checkAndRemoveEnclosingDoubleQuote(table)
else:
raise Exception('Failed to extract table name from line %s' % line)
def check_dropped_table(line, tables_in_table_file, schemas_in_schema_file, drop_table_expr):
"""
check if table to drop is valid (can be dropped from schema level restore)
"""
temp = line[len(drop_table_expr):].strip()[:-1]
(schema, table) = split_fqn(temp)
schema = removeEscapingDoubleQuoteInSQLString(checkAndRemoveEnclosingDoubleQuote(schema), False)
table = removeEscapingDoubleQuoteInSQLString(checkAndRemoveEnclosingDoubleQuote(table), False)
if (schemas_in_schema_file and schema in schemas_in_schema_file) or ((schema, table) in tables_in_table_file):
return True
return False
def process_data(arguments, fdin, fdout):
schemas_in_table_file = arguments.schemas
tables_in_table_file = arguments.tables
change_schema = arguments.change_schema_name
schemas_in_schema_file = arguments.schemas_in_schema_file
schema, table, schema_wo_escaping = None, None, None
output = False
#PYTHON PERFORMANCE IS TRICKY .... THIS CODE IS LIKE THIS BECAUSE ITS FAST
for line in fdin:
if (line[0] == set_start) and line.startswith(search_path_expr):
schema = extract_schema(line)
schema_wo_escaping = removeEscapingDoubleQuoteInSQLString(schema, False)
if ((schemas_in_table_file and schema_wo_escaping in schemas_in_table_file) or
(schemas_in_schema_file and schema_wo_escaping in schemas_in_schema_file)):
if change_schema:
# change schema name can contain special chars including white space, double quote that.
# if original schema name is already quoted, replaced it with quoted change schema name
quoted_schema = '"' + schema + '"'
if quoted_schema in line:
line = line.replace(quoted_schema, escapeDoubleQuoteInSQLString(change_schema))
else:
line = line.replace(schema, escapeDoubleQuoteInSQLString(change_schema))
else:
schema = schema_wo_escaping
fdout.write(line)
elif (line[0] == copy_start) and line.startswith(copy_expr) and line.endswith(copy_expr_end):
table = extract_table(line)
table = removeEscapingDoubleQuoteInSQLString(table, False)
if (schemas_in_schema_file and schema_wo_escaping in schemas_in_schema_file) or (tables_in_table_file and (schema_wo_escaping, table) in tables_in_table_file):
output = True
elif output and (line[0] == copy_end_start) and line.startswith(copy_end_expr):
table = None
output = False
fdout.write(line)
if output:
fdout.write(line)
def get_schemas_in_schema_file(schema_level_restore_file=None):
"""
Note: white space in schema and table name is supported now, don't do strip on them
"""
if not os.path.exists(schema_level_restore_file):
raise Exception('schema level restore file path %s does not exist' % schema_level_restore_file)
schemas_in_schema_file = []
with open(schema_level_restore_file) as fr:
schema_entries = fr.read()
schemas_in_schema_file = schema_entries.splitlines()
return schemas_in_schema_file
def get_change_schema_name(change_schema_file):
"""
Only strip the '\n' as it is one of the non-supported chars to be part
of the schema or table name
"""
if not os.path.exists(change_schema_file):
raise Exception('change schema file path %s does not exist' % change_schema_file)
change_schema_name = None
with open(change_schema_file) as fr:
line = fr.read()
change_schema_name = line.strip('\n')
return change_schema_name
class Arguments:
"""
schemas_in_table_file: set of schemas to restore
tables_in_table_file: set of (schema, table) tuple to restore
change_schema_name: different schema name to restore
schemas_in_schema_file: list of schemas to restore all tables under them
"""
def __init__(self, schemas_in_table_file=None, tables_in_table_file=None):
self.schemas = schemas_in_table_file
self.tables = tables_in_table_file
self.change_schema_name = None
self.schemas_in_schema_file = None
if __name__ == "__main__":
parser = OptParser(option_class=OptChecker)
parser.remove_option('-h')
parser.add_option('-h', '-?', '--help', action='store_true')
parser.add_option('-t', '--tablefile', type='string', default=None)
parser.add_option('-m', '--master_only', action='store_true')
parser.add_option('-c', '--change-schema-file', type='string', default=None)
parser.add_option('-s', '--schema-level-file', type='string', default=None)
(options, args) = parser.parse_args()
if not (options.tablefile or options.schema_level_file):
raise Exception('-t table file name or -s schema level file name must be specified')
elif options.schema_level_file and options.change_schema_file:
raise Exception('-s schema level file option can not be specified with -c change schema file option')
schemas, tables = None, None
if options.tablefile:
(schemas, tables) = get_table_schema_set(options.tablefile)
args = Arguments(schemas, tables)
if options.change_schema_file:
args.change_schema_name = get_change_schema_name(options.change_schema_file)
if options.schema_level_file:
args.schemas_in_schema_file = get_schemas_in_schema_file(options.schema_level_file)
if options.master_only:
process_schema(args, sys.stdin, sys.stdout)
else:
process_data(args, sys.stdin, sys.stdout)
| apache-2.0 |
sid88in/incubator-airflow | airflow/contrib/operators/vertica_to_hive.py | 17 | 5346 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import chr
from collections import OrderedDict
import unicodecsv as csv
from tempfile import NamedTemporaryFile
from airflow.hooks.hive_hooks import HiveCliHook
from airflow.contrib.hooks.vertica_hook import VerticaHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class VerticaToHiveTransfer(BaseOperator):
"""
Moves data from Vertia to Hive. The operator runs
your query against Vertia, stores the file locally
before loading it into a Hive table. If the ``create`` or
``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the Vertia database. (templated)
:type sql: str
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:type hive_table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values. (templated)
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
:param vertica_conn_id: source Vertica connection
:type vertica_conn_id: str
:param hive_conn_id: destination hive connection
:type hive_conn_id: str
"""
template_fields = ('sql', 'partition', 'hive_table')
template_ext = ('.sql',)
ui_color = '#b4e0ff'
@apply_defaults
def __init__(
self,
sql,
hive_table,
create=True,
recreate=False,
partition=None,
delimiter=chr(1),
vertica_conn_id='vertica_default',
hive_cli_conn_id='hive_cli_default',
*args, **kwargs):
super(VerticaToHiveTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = str(delimiter)
self.vertica_conn_id = vertica_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
@classmethod
def type_map(cls, vertica_type):
# vertica-python datatype.py donot provied the full type mapping access.
# Manual hack.
# Reference:
# https://github.com/uber/vertica-python/blob/master/vertica_python/vertica/column.py
d = {
5: 'BOOLEAN',
6: 'INT',
7: 'FLOAT',
8: 'STRING',
9: 'STRING',
16: 'FLOAT',
}
return d[vertica_type] if vertica_type in d else 'STRING'
def execute(self, context):
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
vertica = VerticaHook(vertica_conn_id=self.vertica_conn_id)
self.log.info("Dumping Vertica query results to local file")
conn = vertica.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
with NamedTemporaryFile("w") as f:
csv_writer = csv.writer(f, delimiter=self.delimiter, encoding='utf-8')
field_dict = OrderedDict()
col_count = 0
for field in cursor.description:
col_count += 1
col_position = "Column{position}".format(position=col_count)
field_dict[col_position if field[0] == '' else field[0]] = \
self.type_map(field[1])
csv_writer.writerows(cursor.iterate())
f.flush()
cursor.close()
conn.close()
self.log.info("Loading file into Hive")
hive.load_file(
f.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate)
| apache-2.0 |
lduarte1991/edx-platform | lms/djangoapps/shoppingcart/utils.py | 24 | 2849 | """
Utility methods for the Shopping Cart app
"""
from django.conf import settings
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTFigure, LTTextBox, LTTextLine
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
def is_shopping_cart_enabled():
"""
Utility method to check the various configuration to verify that
all of the settings have been enabled
"""
enable_paid_course_registration = configuration_helpers.get_value(
'ENABLE_PAID_COURSE_REGISTRATION',
settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')
)
enable_shopping_cart = configuration_helpers.get_value(
'ENABLE_SHOPPING_CART',
settings.FEATURES.get('ENABLE_SHOPPING_CART')
)
return enable_paid_course_registration and enable_shopping_cart
def parse_pages(pdf_buffer, password):
"""
With an PDF buffer object, get the pages, parse each one, and return the entire pdf text
"""
# Create a PDF parser object associated with the file object.
parser = PDFParser(pdf_buffer)
# Create a PDF document object that stores the document structure.
# Supply the password for initialization.
document = PDFDocument(parser, password)
resource_manager = PDFResourceManager()
la_params = LAParams()
device = PDFPageAggregator(resource_manager, laparams=la_params)
interpreter = PDFPageInterpreter(resource_manager, device)
text_content = [] # a list of strings, each representing text collected from each page of the doc
for page in PDFPage.create_pages(document):
interpreter.process_page(page)
# receive the LTPage object for this page
layout = device.get_result()
# layout is an LTPage object which may contain
# child objects like LTTextBox, LTFigure, LTImage, etc.
text_content.append(parse_lt_objects(layout._objs)) # pylint: disable=protected-access
return text_content
def parse_lt_objects(lt_objects):
"""
Iterate through the list of LT* objects and capture the text data contained in each object
"""
text_content = []
for lt_object in lt_objects:
if isinstance(lt_object, LTTextBox) or isinstance(lt_object, LTTextLine):
# text
text_content.append(lt_object.get_text().encode('utf-8'))
elif isinstance(lt_object, LTFigure):
# LTFigure objects are containers for other LT* objects, so recurse through the children
text_content.append(parse_lt_objects(lt_object._objs)) # pylint: disable=protected-access
return '\n'.join(text_content)
| agpl-3.0 |
ujdhesa/unisubs | apps/profiles/migrations/0005_auto__del_language.py | 6 | 4421 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Language'
db.delete_table('profiles_language')
def backwards(self, orm):
# Adding model 'Language'
db.create_table('profiles_language', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('profiles', ['Language'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| agpl-3.0 |
conejoninja/xbmc-seriesly | servers/ovfile.py | 1 | 3968 | # -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# Conector para ovfile
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import unpackerjs
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[ovfile.py] url="+page_url)
if page_url.startswith('http'):
page_url = extract_id(page_url)
if page_url=="":return []
page_url = 'http://ovfile.com/embed-'+page_url+'-600x340.html'
# Lo pide una vez
data = scrapertools.cache_page( page_url)
# Extrae el trozo cifrado
patron = "src='http://ovfile.com/player/swfobject.js'></script>[^<]+"
patron +="<script type='text/javascript'>(.*?)</script>"
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
data = ""
if len(matches)>0:
data = matches[0]
logger.info("[ovfile.py] bloque packed="+data)
else:
logger.info("[ovfile.py] no encuentra bloque packed="+data)
return ""
# Lo descifra
descifrado = unpackerjs.unpackjs(data)
descifrado = descifrado.replace("\\'","'")
# Extrae la URL del vídeo
logger.info("descifrado="+descifrado)
# Extrae la URL
patron = "'file','([^']+)'"
matches = re.compile(patron,re.DOTALL).findall(descifrado)
scrapertools.printMatches(matches)
video_urls = []
if len(matches)>0:
url = "%s?file=%s" %(matches[0],matches[0])
video_urls.append( ["."+matches[0].rsplit('.',1)[1]+" [ovfile]",url])
for video_url in video_urls:
logger.info("[ovfile.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos de este servidor en el texto pasado
def find_videos(text):
encontrados = set()
devuelve = []
# http://www.peliculasaudiolatino.com/show/ovfile.php?url=3nzfd2cny8c1
patronvideos = 'ovfile\.php\?url=([A-Z0-9a-z]+)'
logger.info("[ovfile.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[ovfile]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'ovfile' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://www.ovfile.com/qya0qmf3k502
patronvideos = 'http://www.ovfile.com/([\w]+)'
logger.info("[ovfile.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[ovfile]"
url = "http://www.ovfile.com/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'ovfile' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://ovfile.com/embed-ijohcc1dvs5m-600x340.html
patronvideos = 'http://ovfile.com/embed-([\w]+)-600x340.html'
logger.info("[ovfile.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[ovfile]"
url = "http://www.ovfile.com/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'ovfile' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def extract_id(url):
return get_match(url, 'ovfile\.com/([\w]+)')
def get_match(data, regex) :
match = "";
m = re.search(regex, data)
if m != None :
match = m.group(1)
return match | gpl-3.0 |
ATIX-AG/ansible | lib/ansible/modules/network/cloudengine/ce_netstream_aging.py | 43 | 18023 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_netstream_aging
version_added: "2.4"
short_description: Manages timeout mode of NetStream on HUAWEI CloudEngine switches.
description:
- Manages timeout mode of NetStream on HUAWEI CloudEngine switches.
author: YangYang (@CloudEngine-Ansible)
options:
timeout_interval:
description:
- Netstream timeout interval.
If is active type the interval is 1-60.
If is inactive ,the interval is 5-600.
default: 30
type:
description:
- Specifies the packet type of netstream timeout active interval.
choices: ['ip', 'vxlan']
state:
description:
- Specify desired state of the resource.
choices: ['present', 'absent']
default: present
timeout_type:
description:
- Netstream timeout type.
choices: ['active', 'inactive', 'tcp-session', 'manual']
manual_slot:
description:
- Specifies the slot number of netstream manual timeout.
"""
EXAMPLES = '''
- name: netstream aging module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure netstream ip timeout active interval , the interval is 40 minutes.
ce_netstream_aging:
timeout_interval: 40
type: ip
timeout_type: active
state: present
provider: "{{ cli }}"
- name: Configure netstream vxlan timeout active interval , the interval is 40 minutes.
ce_netstream_aging:
timeout_interval: 40
type: vxlan
timeout_type: active
active_state: present
provider: "{{ cli }}"
- name: Delete netstream ip timeout active interval , set the ip timeout interval to 30 minutes.
ce_netstream_aging:
type: ip
timeout_type: active
state: absent
provider: "{{ cli }}"
- name: Delete netstream vxlan timeout active interval , set the vxlan timeout interval to 30 minutes.
ce_netstream_aging:
type: vxlan
timeout_type: active
state: absent
provider: "{{ cli }}"
- name: Enable netstream ip tcp session timeout.
ce_netstream_aging:
type: ip
timeout_type: tcp-session
state: present
provider: "{{ cli }}"
- name: Enable netstream vxlan tcp session timeout.
ce_netstream_aging:
type: vxlan
timeout_type: tcp-session
state: present
provider: "{{ cli }}"
- name: Disable netstream ip tcp session timeout.
ce_netstream_aging:
type: ip
timeout_type: tcp-session
state: absent
provider: "{{ cli }}"
- name: Disable netstream vxlan tcp session timeout.
ce_netstream_aging:
type: vxlan
timeout_type: tcp-session
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"timeout_interval": "40",
"type": "ip",
"state": "absent",
"timeout_type": active}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"active_timeout": [
{
"ip": "40",
"vxlan": 30
}
],
"inactive_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"tcp_timeout": [
{
"ip": "disable",
"vxlan": "disable"
}
]}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"active_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"inactive_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"tcp_timeout": [
{
"ip": "disable",
"vxlan": "disable"
}
]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["undo netstream timeout ip active 40"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
class NetStreamAging(object):
"""
Manages netstream aging.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.timeout_interval = self.module.params['timeout_interval']
self.type = self.module.params['type']
self.state = self.module.params['state']
self.timeout_type = self.module.params['timeout_type']
self.manual_slot = self.module.params['manual_slot']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
# local parameters
self.existing["active_timeout"] = list()
self.existing["inactive_timeout"] = list()
self.existing["tcp_timeout"] = list()
self.end_state["active_timeout"] = list()
self.end_state["inactive_timeout"] = list()
self.end_state["tcp_timeout"] = list()
self.active_changed = False
self.inactive_changed = False
self.tcp_changed = False
def init_module(self):
"""init module"""
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd)
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd)
def get_exist_timer_out_para(self):
"""Get exist netstream timeout parameters"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
active_tmp["ip"] = "30"
active_tmp["vxlan"] = "30"
inactive_tmp["ip"] = "30"
inactive_tmp["vxlan"] = "30"
tcp_tmp["ip"] = "absent"
tcp_tmp["vxlan"] = "absent"
flags = list()
exp = " | ignore-case include netstream timeout"
flags.append(exp)
config = get_config(self.module, flags)
if config:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if config_mem_list[2] == "ip":
if config_mem_list[3] == "active":
active_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "inactive":
inactive_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "tcp-session":
tcp_tmp["ip"] = "present"
if config_mem_list[2] == "vxlan":
if config_mem_list[4] == "active":
active_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "inactive":
inactive_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "tcp-session":
tcp_tmp["vxlan"] = "present"
self.existing["active_timeout"].append(active_tmp)
self.existing["inactive_timeout"].append(inactive_tmp)
self.existing["tcp_timeout"].append(tcp_tmp)
def get_end_timer_out_para(self):
"""Get end netstream timeout parameters"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
active_tmp["ip"] = "30"
active_tmp["vxlan"] = "30"
inactive_tmp["ip"] = "30"
inactive_tmp["vxlan"] = "30"
tcp_tmp["ip"] = "absent"
tcp_tmp["vxlan"] = "absent"
flags = list()
exp = " | ignore-case include netstream timeout"
exp = "| ignore-case include evpn-overlay enable"
flags.append(exp)
config = get_config(self.module, flags)
if config:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if config_mem_list[2] == "ip":
if config_mem_list[3] == "active":
active_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "inactive":
inactive_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "tcp-session":
tcp_tmp["ip"] = "present"
if config_mem_list[2] == "vxlan":
if config_mem_list[4] == "active":
active_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "inactive":
inactive_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "tcp-session":
tcp_tmp["vxlan"] = "present"
self.end_state["active_timeout"].append(active_tmp)
self.end_state["inactive_timeout"].append(inactive_tmp)
self.end_state["tcp_timeout"].append(tcp_tmp)
def check_params(self):
"""Check all input params"""
# interval check
if not str(self.timeout_interval).isdigit():
self.module.fail_json(
msg='Error: Timeout interval should be numerical.')
if self.timeout_type == "active":
if int(self.timeout_interval) < 1 or int(self.timeout_interval) > 60:
self.module.fail_json(
msg="Error: Active interval should between 1 - 60 minutes.")
if self.timeout_type == "inactive":
if int(self.timeout_interval) < 5 or int(self.timeout_interval) > 600:
self.module.fail_json(
msg="Error: Inactive interval should between 5 - 600 seconds.")
if self.timeout_type == "manual":
if not self.manual_slot:
self.module.fail_json(
msg="Error: If use manual timeout mode,slot number is needed.")
if not str(self.manual_slot).isdigit():
self.module.fail_json(
msg='Error: Slot number should be numerical.')
def get_proposed(self):
"""get proposed info"""
if self.timeout_interval:
self.proposed["timeout_interval"] = self.timeout_interval
if self.timeout_type:
self.proposed["timeout_type"] = self.timeout_type
if self.type:
self.proposed["type"] = self.type
if self.state:
self.proposed["state"] = self.state
if self.manual_slot:
self.proposed["manual_slot"] = self.manual_slot
def get_existing(self):
"""get existing info"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
self.get_exist_timer_out_para()
if self.timeout_type == "active":
for active_tmp in self.existing["active_timeout"]:
if self.state == "present":
if str(active_tmp[self.type]) != self.timeout_interval:
self.active_changed = True
else:
if self.timeout_interval != "30":
if str(active_tmp[self.type]) != "30":
if str(active_tmp[self.type]) != self.timeout_interval:
self.module.fail_json(
msg='Error: The specified active interval do not exist.')
if str(active_tmp[self.type]) != "30":
self.timeout_interval = active_tmp[self.type]
self.active_changed = True
if self.timeout_type == "inactive":
for inactive_tmp in self.existing["inactive_timeout"]:
if self.state == "present":
if str(inactive_tmp[self.type]) != self.timeout_interval:
self.inactive_changed = True
else:
if self.timeout_interval != "30":
if str(inactive_tmp[self.type]) != "30":
if str(inactive_tmp[self.type]) != self.timeout_interval:
self.module.fail_json(
msg='Error: The specified inactive interval do not exist.')
if str(inactive_tmp[self.type]) != "30":
self.timeout_interval = inactive_tmp[self.type]
self.inactive_changed = True
if self.timeout_type == "tcp-session":
for tcp_tmp in self.existing["tcp_timeout"]:
if str(tcp_tmp[self.type]) != self.state:
self.tcp_changed = True
def operate_time_out(self):
"""configure timeout parameters"""
cmd = ""
if self.timeout_type == "manual":
if self.type == "ip":
self.cli_add_command("quit")
cmd = "reset netstream cache ip slot %s" % self.manual_slot
self.cli_add_command(cmd)
elif self.type == "vxlan":
self.cli_add_command("quit")
cmd = "reset netstream cache vxlan inner-ip slot %s" % self.manual_slot
self.cli_add_command(cmd)
if not self.active_changed and not self.inactive_changed and not self.tcp_changed:
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
return
if self.active_changed or self.inactive_changed:
if self.type == "ip":
cmd = "netstream timeout ip %s %s" % (self.timeout_type, self.timeout_interval)
elif self.type == "vxlan":
cmd = "netstream timeout vxlan inner-ip %s %s" % (self.timeout_type, self.timeout_interval)
if self.state == "absent":
self.cli_add_command(cmd, undo=True)
else:
self.cli_add_command(cmd)
if self.timeout_type == "tcp-session" and self.tcp_changed:
if self.type == "ip":
if self.state == "present":
cmd = "netstream timeout ip tcp-session"
else:
cmd = "undo netstream timeout ip tcp-session"
elif self.type == "vxlan":
if self.state == "present":
cmd = "netstream timeout vxlan inner-ip tcp-session"
else:
cmd = "undo netstream timeout vxlan inner-ip tcp-session"
self.cli_add_command(cmd)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def get_end_state(self):
"""get end state info"""
self.get_end_timer_out_para()
def work(self):
"""worker"""
self.check_params()
self.get_existing()
self.get_proposed()
self.operate_time_out()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
timeout_interval=dict(required=False, type='str', default='30'),
type=dict(required=False, choices=['ip', 'vxlan']),
state=dict(required=False, choices=['present', 'absent'], default='present'),
timeout_type=dict(required=False, choices=['active', 'inactive', 'tcp-session', 'manual']),
manual_slot=dict(required=False, type='str'),
)
argument_spec.update(ce_argument_spec)
module = NetStreamAging(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
indirectlylit/kolibri | kolibri/core/discovery/test/test_api.py | 2 | 4426 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import mock
import requests
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from .. import models
from ..utils.network import connections
from .helpers import info as mock_device_info
from .helpers import mock_request
from kolibri.core.auth.test.helpers import create_superuser
from kolibri.core.auth.test.helpers import DUMMY_PASSWORD
from kolibri.core.auth.test.helpers import provision_device
from kolibri.core.auth.test.test_api import FacilityFactory
from kolibri.core.auth.test.test_api import FacilityUserFactory
@mock.patch.object(requests.Session, "get", mock_request)
@mock.patch.object(connections, "check_connection_info", mock_device_info)
@mock.patch.object(connections, "check_if_port_open", lambda *a: True)
class NetworkLocationAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
provision_device()
cls.facility = FacilityFactory.create()
cls.superuser = create_superuser(cls.facility)
cls.learner = FacilityUserFactory(facility=cls.facility)
cls.existing_happy_netloc = models.NetworkLocation.objects.create(
base_url="https://kolibrihappyurl.qqq/"
)
cls.existing_nonkolibri_netloc = models.NetworkLocation.objects.create(
base_url="https://nonkolibrihappyurl.qqq/"
)
cls.existing_sad_netloc = models.NetworkLocation.objects.create(
base_url="https://sadurl.qqq/"
)
def login(self, user):
self.client.login(
username=user.username, password=DUMMY_PASSWORD, facility=user.facility
)
def test_creating_good_address(self):
self.login(self.superuser)
response = self.client.post(
reverse("kolibri:core:staticnetworklocation-list"),
data={"base_url": "kolibrihappyurl.qqq"},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data["base_url"], "https://kolibrihappyurl.qqq/")
def test_creating_good_address_with_one_url_timing_out(self):
self.login(self.superuser)
response = self.client.post(
reverse("kolibri:core:staticnetworklocation-list"),
data={"base_url": "timeoutonport80url.qqq"},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
response.data["base_url"], "http://timeoutonport80url.qqq:8080/"
)
def test_creating_bad_address(self):
self.login(self.superuser)
response = self.client.post(
reverse("kolibri:core:staticnetworklocation-list"),
data={"base_url": "nonkolibrihappyurl.qqq"},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_reading_network_location_list(self):
self.login(self.superuser)
response = self.client.get(reverse("kolibri:core:staticnetworklocation-list"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_cannot_read_network_location_list_as_anon_user(self):
response = self.client.get(reverse("kolibri:core:staticnetworklocation-list"))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_read_network_location_list_as_learner(self):
self.login(self.learner)
response = self.client.get(reverse("kolibri:core:staticnetworklocation-list"))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_create_location_as_anon_user(self):
response = self.client.post(
reverse("kolibri:core:staticnetworklocation-list"),
data={"base_url": "kolibrihappyurl.qqq"},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_create_location_as_learner(self):
self.login(self.learner)
response = self.client.post(
reverse("kolibri:core:staticnetworklocation-list"),
data={"base_url": "kolibrihappyurl.qqq"},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| mit |
shanemcd/ansible | lib/ansible/modules/packaging/os/pacman.py | 14 | 14647 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pacman
short_description: Manage packages with I(pacman)
description:
- Manage packages with the I(pacman) package manager, which is used by
Arch Linux and its variants.
version_added: "1.0"
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "'Aaron Bull Schaefer (@elasticdog)' <aaron@elasticdog.com>"
- "Afterburn"
notes: []
requirements: []
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
recurse:
description:
- When removing a package, also remove its dependencies, provided
that they are not required by other packages and were not
explicitly installed by a user.
required: false
default: no
choices: ["yes", "no"]
version_added: "1.3"
force:
description:
- When removing package - force remove package, without any
checks. When update_cache - force redownload repo
databases.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
update_cache:
description:
- Whether or not to refresh the master package lists. This can be
run as part of a package installation or as a separate step.
required: false
default: no
choices: ["yes", "no"]
aliases: [ 'update-cache' ]
upgrade:
description:
- Whether or not to upgrade whole system
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when upgrade is set to yes
type: list
sample: ['package', 'other-package']
'''
EXAMPLES = '''
# Install package foo
- pacman:
name: foo
state: present
# Upgrade package foo
- pacman:
name: foo
state: latest
update_cache: yes
# Remove packages foo and bar
- pacman:
name: foo,bar
state: absent
# Recursively remove package baz
- pacman:
name: baz
state: absent
recurse: yes
# Run the equivalent of "pacman -Sy" as a separate step
- pacman:
update_cache: yes
# Run the equivalent of "pacman -Su" as a separate step
- pacman:
upgrade: yes
# Run the equivalent of "pacman -Syu" as a separate step
- pacman:
update_cache: yes
upgrade: yes
# Run the equivalent of "pacman -Rdd", force remove package baz
- pacman:
name: baz
state: absent
force: yes
'''
import re
def get_version(pacman_output):
"""Take pacman -Qi or pacman -Si output and get the Version"""
lines = pacman_output.split('\n')
for line in lines:
if 'Version' in line:
return line.split(':')[1].strip()
return None
def query_package(module, pacman_path, name, state="present"):
"""Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second
boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available
"""
if state == "present":
lcmd = "%s -Qi %s" % (pacman_path, name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False, False
# get the version installed locally (if any)
lversion = get_version(lstdout)
rcmd = "%s -Si %s" % (pacman_path, name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version in the repository
rversion = get_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally, and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion), False
# package is installed but cannot fetch remote Version. Last True stands for the error
return True, True, True
def update_package_db(module, pacman_path):
if module.params["force"]:
args = "Syy"
else:
args = "Sy"
cmd = "%s -%s" % (pacman_path, args)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def upgrade(module, pacman_path):
cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
cmdneedrefresh = "%s -Qu" % (pacman_path)
rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
data = stdout.split('\n')
data.remove('')
packages = []
diff = {
'before': '',
'after': '',
}
if rc == 0:
regex = re.compile('([\w-]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
for p in data:
m = regex.search(p)
packages.append(m.group(1))
if module._diff:
diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
if module.check_mode:
module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
else:
module.fail_json(msg="Could not upgrade")
else:
module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
def remove_packages(module, pacman_path, packages):
data = []
diff = {
'before': '',
'after': '',
}
if module.params["recurse"] or module.params["force"]:
if module.params["recurse"]:
args = "Rs"
if module.params["force"]:
args = "Rdd"
if module.params["recurse"] and module.params["force"]:
args = "Rdds"
else:
args = "R"
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated, unknown = query_package(module, pacman_path, package)
if not installed:
continue
cmd = "%s -%s %s --noconfirm --noprogressbar" % (pacman_path, args, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
if module._diff:
d = stdout.split('\n')[2].split(' ')[2:]
for i, pkg in enumerate(d):
d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
diff['before'] += "%s\n" % pkg
data.append('\n'.join(d))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pacman_path, state, packages, package_files):
install_c = 0
package_err = []
message = ""
data = []
diff = {
'before': '',
'after': '',
}
to_install_repos = []
to_install_files = []
for i, package in enumerate(packages):
# if the package is installed and state == present or state == latest and is up-to-date then skip
installed, updated, latestError = query_package(module, pacman_path, package)
if latestError and state == 'latest':
package_err.append(package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if package_files[i]:
to_install_files.append(package_files[i])
else:
to_install_repos.append(package)
if to_install_repos:
cmd = "%s -S %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_repos))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
install_c += len(to_install_repos)
if to_install_files:
cmd = "%s -U %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_files))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
install_c += len(to_install_files)
if state == 'latest' and len(package_err) > 0:
message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
def check_packages(module, pacman_path, packages, state):
would_be_changed = []
diff = {
'before': '',
'after': '',
'before_header': '',
'after_header': ''
}
for package in packages:
installed, updated, unknown = query_package(module, pacman_path, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
if module._diff and (state == 'removed'):
diff['before_header'] = 'removed'
diff['before'] = '\n'.join(would_be_changed) + '\n'
elif module._diff and ((state == 'present') or (state == 'latest')):
diff['after_header'] = 'installed'
diff['after'] = '\n'.join(would_be_changed) + '\n'
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state), diff=diff)
else:
module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
def expand_package_groups(module, pacman_path, pkgs):
expanded = []
for pkg in pkgs:
if pkg: # avoid empty strings
cmd = "%s -Sgq %s" % (pacman_path, pkg)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
# A group was found matching the name, so expand it
for name in stdout.split('\n'):
name = name.strip()
if name:
expanded.append(name)
else:
expanded.append(pkg)
return expanded
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg', 'package'], type='list'),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
recurse = dict(default=False, type='bool'),
force = dict(default=False, type='bool'),
upgrade = dict(default=False, type='bool'),
update_cache = dict(default=False, aliases=['update-cache'], type='bool')
),
required_one_of = [['name', 'update_cache', 'upgrade']],
supports_check_mode = True)
pacman_path = module.get_bin_path('pacman', True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p["update_cache"] and not module.check_mode:
update_package_db(module, pacman_path)
if not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Updated the package master lists')
if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Would have updated the package cache')
if p['upgrade']:
upgrade(module, pacman_path)
if p['name']:
pkgs = expand_package_groups(module, pacman_path, p['name'])
pkg_files = []
for i, pkg in enumerate(pkgs):
if not pkg: # avoid empty strings
continue
elif re.match(".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg):
# The package given is a filename, extract the raw pkg name from
# it and store the filename
pkg_files.append(pkg)
pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
else:
pkg_files.append(None)
if module.check_mode:
check_packages(module, pacman_path, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
elif p['state'] == 'absent':
remove_packages(module, pacman_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 |
asimshankar/tensorflow | tensorflow/python/ops/resource_variable_ops.py | 2 | 60767 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to use variables as resources."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_resource_variable_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
def get_resource_handle_data(graph_op):
assert type(graph_op) == ops.Tensor # pylint: disable=unidiomatic-typecheck
handle_data = pywrap_tensorflow.GetHandleShapeAndType(
graph_op.graph._c_graph, graph_op._as_tf_output()) # pylint: disable=protected-access
return cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData.FromString(
compat.as_bytes(handle_data))
def eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
"""Creates a variable handle with information to do shape inference."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if graph_mode:
handle._handle_data = get_resource_handle_data(handle) # pylint: disable=protected-access
return handle
# We do not want two distinct ResourceVariable objects for the same
# underlying resource in the runtime.
# When in eager mode, explicitly ensure so here. When in graph mode, it's
# ensured by always generating different variable names.
exists = gen_resource_variable_ops.var_is_initialized_op(handle)
if exists:
raise ValueError("variable object with name '%s' already created. Use "
"get_variable() if reuse is desired." %
shared_name)
with context.graph_mode(), ops.Graph().as_default() as graph:
h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
# Tensor._handle_data contains information for the shape-inference code to
# know the shape and dtype of the variable pointed to by a handle. Since
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
# pylint: disable=protected-access
handle._handle_data = get_resource_handle_data(h)
# pylint: enable=protected-access
# Clean up op->graph->op reference cycles.
ops.dismantle_graph(graph)
return handle
@contextlib.contextmanager
def _handle_graph(handle):
# Note: might have an eager tensor but not be executing eagerly when building
# functions.
if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor)
or ops.has_default_graph()):
yield
else:
with handle.graph.as_default():
yield
class EagerResourceDeleter(object):
"""An object which cleans up a resource handle.
An alternative to defining a __del__ method on an object. The intended use is
that ResourceVariables or other objects with resource handles will maintain a
single reference to this object. When the parent object is collected, this
object will be too. Even if the parent object is part of a reference cycle,
the cycle will be collectable.
"""
def __init__(self, handle, handle_device):
if not isinstance(handle, ops.Tensor):
raise ValueError(
("Passed handle=%s to EagerResourceDeleter. Was expecting a handle "
"Tensor." % (handle,)))
self._handle = handle
self._handle_device = handle_device
def __del__(self):
# Resources follow object-identity when executing eagerly, so it is safe to
# delete the resource we have a handle to.
try:
# This resource was created in eager mode. However, this destructor may be
# running in graph mode (especially during unit tests). To clean up
# successfully, we switch back into eager mode temporarily.
with context.eager_mode():
with ops.device(self._handle_device):
gen_resource_variable_ops.destroy_resource_op(
self._handle, ignore_lookup_error=True)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
def shape_safe_assign_variable_handle(handle, shape, value, name=None):
"""Helper that checks shape compatibility and assigns variable."""
with _handle_graph(handle):
value_tensor = ops.convert_to_tensor(value)
shape.assert_is_compatible_with(value_tensor.shape)
return gen_resource_variable_ops.assign_variable_op(handle,
value_tensor,
name=name)
# TODO(apassos) make this be variables.Variable
class ResourceVariable(variables.RefVariable):
"""Variable based on resource handles.
See the [Variables How To](https://tensorflow.org/guide/variables)
for a high level overview.
A `ResourceVariable` allows you to maintain state across subsequent calls to
session.run.
The `ResourceVariable` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
Just like any `Tensor`, variables created with
`tf.Variable(use_resource=True)` can be used as inputs for other Ops in the
graph. Additionally, all the operators overloaded for the `Tensor` class are
carried over to variables, so you can also add nodes to the graph by just
doing arithmetic on variables.
Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each
usage of a ResourceVariable in a TensorFlow graph adds a read_value operation
to the graph. The Tensors returned by a read_value operation are guaranteed to
see all modifications to the value of the variable which happen in any
operation on which the read_value depends on (either directly, indirectly, or
via a control dependency) and guaranteed to not see any modification to the
value of the variable from operations that depend on the read_value operation.
Updates from operations that have no dependency relationship to the read_value
operation might or might not be visible to read_value.
For example, if there is more than one assignment to a ResourceVariable in
a single session.run call there is a well-defined value for each operation
which uses the variable's value if the assignments and the read are connected
by edges in the graph. Consider the following example, in which two writes
can cause tf.Variable and tf.ResourceVariable to behave differently:
```python
a = tf.Variable(1.0, use_resource=True)
a.initializer.run()
assign = a.assign(2.0)
with tf.control_dependencies([assign]):
b = a.read_value()
with tf.control_dependencies([b]):
other_assign = a.assign(3.0)
with tf.control_dependencies([other_assign]):
# Will print 2.0 because the value was read before other_assign ran. If
# `a` was a tf.Variable instead, 2.0 or 3.0 could be printed.
tf.Print(b, [b]).eval()
```
"""
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
variable_def=None,
import_scope=None,
constraint=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
variable_def: `VariableDef` protocol buffer. If not None, recreates the
`ResourceVariable` object with its contents. `variable_def` and other
arguments (except for import_scope) are mutually exclusive.
import_scope: Optional `string`. Name scope to add to the
ResourceVariable. Only used when `variable_def` is provided.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, the default for the `collections` argument
is `None`, which signifies that this `Variable` will not be added to any
collections.
@end_compatibility
"""
if variable_def:
if initial_value is not None:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
if context.executing_eagerly():
raise ValueError("Creating ResourceVariable from variable_def is "
"not supported when eager execution is enabled.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint)
# pylint: disable=unused-argument
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
constraint=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, variables are never added to collections.
It is not implicitly added to the `GLOBAL_VARIABLES` or
`TRAINABLE_VARIABLES` collections, and the `collections` argument is
ignored.
@end_compatibility
"""
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if isinstance(initial_value, ops.Tensor) and hasattr(
initial_value, "graph") and initial_value.graph.building_function:
raise ValueError("Tensor-typed variable initializers must either be "
"wrapped in an init_scope or callable "
"(e.g., `tf.Variable(lambda : "
"tf.truncated_normal([10, 40]))`) when building "
"functions. Please file a feature request if this "
"restriction inconveniences you.")
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
if isinstance(initial_value, checkpointable.CheckpointInitialValue):
self._maybe_initialize_checkpointable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
self._trainable = trainable
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
self._save_slice_info = None
# Store the graph key so optimizers know how to only retrieve variables from
# this graph.
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "Variable", []
if init_from_fn else [initial_value]) as name:
# pylint: disable=protected-access
handle_name = ops._name_from_scope_name(name)
if self._in_graph_mode:
shared_name = handle_name
else:
# When in eager mode use a uid for the shared_name, to prevent
# accidental sharing.
shared_name = "%s_%d" % (handle_name, ops.uid())
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % handle_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
initial_value = ops.convert_to_tensor(
initial_value() if init_from_fn else initial_value,
name="initial_value", dtype=dtype)
self._handle = eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode)
self._shape = initial_value.shape
# pylint: disable=protected-access
if (self._in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
self._unique_id = shared_name
self._initial_value = initial_value if self._in_graph_mode else None
self._handle_name = handle_name + ":0"
self._dtype = initial_value.dtype.base_dtype
self._constraint = constraint
if self._in_graph_mode:
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initializer_op = (
gen_resource_variable_ops.assign_variable_op(
self._handle,
self._try_guard_against_uninitialized_dependencies(
initial_value),
name=n))
with ops.name_scope("Read"), ops.colocate_with(self._handle):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(self._handle.device):
value = self._read_variable_op()
self._graph_element = value
if caching_device is not None:
# Variables may be created in a tf.device() or ops.colocate_with()
# context. At the same time, users would expect caching device to
# be independent of this context, and/or would not expect the
# current device context to be merged with the caching device
# spec. Therefore we reset the colocation stack before creating
# the cached value. Note that resetting the colocation stack will
# also reset the device stack.
with ops.colocate_with(None, ignore_existing=True):
with ops.device(caching_device):
self._cached_value = array_ops.identity(value)
else:
self._cached_value = None
else:
gen_resource_variable_ops.assign_variable_op(self._handle,
initial_value)
self._is_initialized_op = None
self._initializer_op = None
self._graph_element = None
if caching_device:
with ops.device(caching_device):
self._cached_value = self._read_variable_op()
else:
self._cached_value = None
if not context.executing_eagerly():
# Eager variables are only added to collections if they are part of an
# eager variable store (otherwise in an interactive session they would
# hog memory and cause OOM). This is done in ops/variable_scope.py.
ops.add_to_collections(collections, self)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)
if not self._in_graph_mode:
# After the handle has been created, set up a way to clean it up when
# executing eagerly. We'll hold the only reference to the deleter, so that
# when this object is garbage collected the deleter will be too. This
# means ResourceVariables can be part of reference cycles without those
# cycles being uncollectable, and means that no __del__ will be defined at
# all in graph mode.
self._handle_deleter = EagerResourceDeleter(
handle=self._handle, handle_device=self._handle.device)
self._cached_shape_as_list = None
def _init_from_proto(self, variable_def, import_scope=None):
"""Initializes from `VariableDef` proto."""
# Note that init_from_proto is currently not supported in Eager mode.
assert not context.executing_eagerly()
self._in_graph_mode = True
assert isinstance(variable_def, variable_pb2.VariableDef)
if not variable_def.is_resource:
raise ValueError("Trying to restore Variable as ResourceVariable.")
# Create from variable_def.
g = ops.get_default_graph()
self._handle = g.as_graph_element(
ops.prepend_name_scope(
variable_def.variable_name, import_scope=import_scope))
self._shape = tensor_shape.TensorShape(
self._handle.op.get_attr("shape"))
self._handle_name = self._handle.name
self._unique_id = self._handle_name
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initializer_name, import_scope=import_scope))
# Check whether initial_value_name exists for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(variable_def.initial_value_name,
import_scope=import_scope))
else:
self._initial_value = None
self._trainable = getattr(variable_def, "trainable", True)
if variable_def.snapshot_name:
snapshot = g.as_graph_element(
ops.prepend_name_scope(
variable_def.snapshot_name, import_scope=import_scope))
if snapshot.op.type != "ReadVariableOp":
self._cached_value = snapshot
else:
self._cached_value = None
while snapshot.op.type != "ReadVariableOp":
snapshot = snapshot.op.inputs[0]
self._graph_element = snapshot
else:
self._cached_value = None
# Legacy case for protos without the snapshot name; assume it's the
# following.
self._graph_element = g.get_tensor_by_name(
self._handle.op.name + "/Read/ReadVariableOp:0")
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = variables.Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype"))
self._constraint = None
self._cached_shape_as_list = None
@contextlib.contextmanager
def _assign_dependencies(self):
"""Makes assignments depend on the cached value, if any.
This prevents undefined behavior with reads not ordered wrt writes.
Yields:
None.
"""
if self._cached_value is not None:
with ops.control_dependencies([self._cached_value]):
yield
else:
yield
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return bool(self.read_value())
def __copy__(self):
return self
def __deepcopy__(self, memo):
if not context.executing_eagerly():
raise NotImplementedError(
"__deepcopy__() is only available when eager execution is enabled.")
copied_variable = ResourceVariable(
initial_value=self.read_value(),
trainable=self._trainable,
constraint=self._constraint,
dtype=self._dtype,
name=self._shared_name + "_copy")
memo[self._unique_id] = copied_variable
return copied_variable
@property
def dtype(self):
"""The dtype of this variable."""
return self._dtype
@property
def device(self):
"""The device this variable is on."""
return self._handle.device
@property
def graph(self):
"""The `Graph` of this variable."""
return self._handle.graph
@property
def name(self):
"""The name of the handle for this variable."""
return self._handle_name
@property
def shape(self):
"""The shape of this variable."""
return self._shape
def _shape_as_list(self):
if self._cached_shape_as_list:
return self._cached_shape_as_list
if self.shape.ndims is None:
return None
self._cached_shape_as_list = [dim.value for dim in self.shape.dims]
return self._cached_shape_as_list
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
@property
def create(self):
"""The op responsible for initializing this variable."""
if not self._in_graph_mode:
raise RuntimeError("Calling create is not supported when eager execution"
" is enabled.")
return self._initializer_op
@property
def handle(self):
"""The handle by which this variable can be accessed."""
return self._handle
def value(self):
"""A cached operation which reads the value of this variable."""
if self._cached_value is not None:
return self._cached_value
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._handle.device):
return self._read_variable_op()
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._graph_element
@property
def initializer(self):
"""The op responsible for initializing this variable."""
return self._initializer_op
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable."""
if context.executing_eagerly():
raise RuntimeError("initial_value not supported in EAGER mode.")
return self._initial_value
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
return self._constraint
@property
def op(self):
"""The op for this variable."""
return self._handle.op
def eval(self, session=None):
"""Evaluates and returns the value of this variable."""
if context.executing_eagerly():
raise RuntimeError("Trying to eval in EAGER mode")
return self._graph_element.eval(session=session)
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
@deprecated(None, "Prefer Dataset.range instead.")
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return gen_state_ops.resource_count_up_to(self.handle, limit=limit,
T=self.dtype)
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `ResourceVariable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
def _read_variable_op(self):
if self.trainable:
tape.variable_accessed(self)
result = gen_resource_variable_ops.read_variable_op(self._handle,
self._dtype)
if not context.executing_eagerly():
# Note that if a control flow context is active the input of the read op
# might not actually be the handle. This line bypasses it.
tape.record_operation(
"ReadVariableOp", [result], [self._handle], lambda x: [x])
return result
def read_value(self):
"""Constructs an op which reads the value of this variable.
Should be used when there are multiple reads, or when it is desirable to
read the value only after some condition is true.
Returns:
the read operation.
"""
with ops.name_scope("Read"):
# Ensure we read the variable in the same device as the handle.
with ops.device(self._handle.device):
value = self._read_variable_op()
# Return an identity so it can get placed on whatever device the context
# specifies instead of the device where the variable is.
return array_ops.identity(value)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
with ops.name_scope("Gather" if name is None else name) as name:
if self.trainable:
tape.variable_accessed(self)
value = gen_resource_variable_ops.resource_gather(
self._handle, indices, dtype=self._dtype, name=name)
return array_ops.identity(value)
def to_proto(self, export_scope=None):
"""Converts a `ResourceVariable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Raises:
RuntimeError: If run in EAGER mode.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if context.executing_eagerly():
raise RuntimeError("to_proto not supported in EAGER mode.")
if export_scope is None or self.handle.name.startswith(export_scope):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(self.handle.name,
export_scope)
if self._initial_value is not None:
# This is inside an if-statement for backwards compatibility, since
# self._initial_value might be None for variables constructed from old
# protos.
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(self.initializer.name,
export_scope)
if self._cached_value is not None:
var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name,
export_scope)
else:
# Store the graph_element here
var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name,
export_scope)
var_def.is_resource = True
var_def.trainable = self.trainable
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(
self._save_slice_info.to_proto(export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
if context.executing_eagerly():
raise RuntimeError("from_proto not supported in EAGER mode.")
return ResourceVariable(
variable_def=variable_def, import_scope=import_scope)
def _ref(self):
"""Unsupported."""
raise NotImplementedError("ResourceVariable does not implement _ref()")
def set_shape(self, shape):
"""Unsupported."""
raise NotImplementedError("ResourceVariable does not implement set_shape()")
__array_priority__ = 100
def is_initialized(self, name=None):
"""Checks whether a resource variable has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
"""Subtracts a value from this variable.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# TODO(apassos): this here and below is not atomic. Consider making it
# atomic if there's a way to do so without a performance cost for those who
# don't need it.
with _handle_graph(self.handle), self._assign_dependencies():
assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(
self.handle, ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_sub_op)
return assign_sub_op
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
"""Adds a value to this variable.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(
self.handle, ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_add_op)
return assign_add_op
def _lazy_read(self, op):
if self.trainable:
tape.variable_accessed(self)
return _UnreadVariable(
handle=self._handle, dtype=self.dtype, shape=self._shape,
in_graph_mode=self._in_graph_mode,
deleter=self._handle_deleter if not self._in_graph_mode else None,
parent_op=op, unique_id=self._unique_id)
def assign(self, value, use_locking=None, name=None, read_value=True):
"""Assigns a new value to this variable.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name to use for the assignment.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# Note: not depending on the cached value here since this can used to
# initialize the variable.
with _handle_graph(self.handle):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
self._shape.assert_is_compatible_with(value_tensor.shape)
assign_op = gen_resource_variable_ops.assign_variable_op(
self.handle, value_tensor, name=name)
if read_value:
return self._lazy_read(assign_op)
return assign_op
def __reduce__(self):
return (ResourceVariable, (self.numpy(),))
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub(
self.handle, sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return self._lazy_read(gen_resource_variable_ops.resource_scatter_add(
self.handle, sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `IndexedSlices` to this variable.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return self._lazy_read(gen_resource_variable_ops.resource_scatter_update(
self.handle, sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return self._lazy_read(gen_state_ops.resource_scatter_nd_sub(
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return self._lazy_read(gen_state_ops.resource_scatter_nd_add(
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return self._lazy_read(gen_state_ops.resource_scatter_nd_update(
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
name=name))
def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask,
end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask):
with _handle_graph(self.handle), self._assign_dependencies():
return self._lazy_read(
gen_array_ops.resource_strided_slice_assign(
ref=self.handle,
begin=begin,
end=end,
strides=strides,
value=ops.convert_to_tensor(value, dtype=self.dtype),
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask))
def __int__(self):
if self.dtype != dtypes.int32 and self.dtype != dtypes.int64:
raise TypeError("Non-integer variable can't be converted to integer.")
return int(self.value().numpy())
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
if dtype is not None and dtype != self.dtype:
return NotImplemented
if as_ref:
return self.read_value().op.inputs[0]
else:
return self.value()
def __iadd__(self, unused_other):
raise RuntimeError("Variable += value not supported. Use "
"variable.assign_add(value) to modify the variable "
"value and variable = variable + value to get a new "
"Tensor object.")
def __isub__(self, unused_other):
raise RuntimeError("Variable -= value not supported. Use "
"variable.assign_sub(value) to modify the variable "
"value and variable = variable - value to get a new "
"Tensor object.")
def __imul__(self, unused_other):
raise RuntimeError("Variable *= value not supported. Use "
"`var.assign(var * value)` to modify the variable or "
"`var = var * value` to get a new Tensor object.")
def __idiv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
"`var.assign(var / value)` to modify the variable or "
"`var = var / value` to get a new Tensor object.")
def __itruediv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
"`var.assign(var / value)` to modify the variable or "
"`var = var / value` to get a new Tensor object.")
def __irealdiv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
"`var.assign(var / value)` to modify the variable or "
"`var = var / value` to get a new Tensor object.")
def __ipow__(self, unused_other):
raise RuntimeError("Variable **= value not supported. Use "
"`var.assign(var ** value)` to modify the variable or "
"`var = var ** value` to get a new Tensor object.")
pywrap_tensorflow.TFE_Py_RegisterResourceVariableType(ResourceVariable)
math_ops._resource_variable_type = ResourceVariable # pylint: disable=protected-access
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
class _UnreadVariable(ResourceVariable):
"""Represents a future for a read of a variable.
Pretends to be the tensor if anyone looks.
"""
def __init__(self, handle, dtype, # pylint: disable=super-init-not-called
shape, in_graph_mode, deleter, parent_op, unique_id):
# We do not call super init on purpose.
self._trainable = False
self._save_slice_info = None
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
self._in_graph_mode = in_graph_mode
self._handle = handle
self._shape = shape
self._initial_value = None
if isinstance(self._handle, ops.EagerTensor):
self._handle_name = ""
else:
self._handle_name = self._handle.name
self._unique_id = unique_id
self._dtype = dtype
self._constraint = None
self._cached_value = None
self._is_initialized_op = None
self._initializer_op = None
self._parent_op = parent_op
if context.executing_eagerly():
self._graph_element = None
else:
self._graph_element = self.read_value()
self._handle_deleter = deleter
@property
def name(self):
if self._in_graph_mode:
return self._parent_op.name
else:
return "UnreadVariable"
def value(self):
return self._read_variable_op()
def read_value(self):
return self._read_variable_op()
def _read_variable_op(self):
with ops.control_dependencies([self._parent_op]):
return gen_resource_variable_ops.read_variable_op(self._handle,
self._dtype)
def set_shape(self, shape):
self._shape = shape
self._cached_shape_as_list = None
@property
def op(self):
"""The op for this variable."""
return self._parent_op
ops.register_tensor_conversion_function(_UnreadVariable, _dense_var_to_tensor)
ops.register_dense_tensor_like_type(_UnreadVariable)
class _MixedPrecisionVariable(ResourceVariable):
"""Represents a variable that can return in desired dtype when read.
In mixed precision training, it is usually desirable to use different dtypes
for variables and computation. This class will be used to wrap created
ResourceVariable when mixed precision training is enabled. It allows layers to
perform computation in a different dtype than their variable dtypes, in order
to achieve higher performance without causing quality loss.
"""
def __init__(self, var, read_dtype):
"""Creates a MixedPrecisionVariable.
Args:
var: A ResourceVariable instance.
read_dtype: A tf.DType, the returned dtype when read, default to None.
Casting is performed if read_dtype is not None and differs from
var.dtype.
Returns:
An MixedPrecisionVariable instance.
Raises:
ValueError: if var is not a ResourceVariable instance, or read_dtype is
not a tf.DType instance.
"""
# pylint: disable=super-init-not-called
# We do not call super init on purpose.
if not isinstance(var, ResourceVariable):
raise ValueError("InvalidArgument: var must be a ResourceVariable type.")
if not isinstance(read_dtype, dtypes.DType):
raise ValueError("InvalidArgument: read_dtype must be a tf.DType type.")
self._var = var
self._trainable = var.trainable
self._save_slice_info = None
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
self._in_graph_mode = var._in_graph_mode # pylint: disable=protected-access
self._handle = var.handle
self._shape = var.shape
self._initial_value = None
if isinstance(self.handle, ops.EagerTensor):
self._handle_name = ""
else:
self._handle_name = self.handle.name
self._unique_id = var._unique_id # pylint: disable=protected-access
self._dtype = var.dtype
self._constraint = None
self._cached_value = None
self._is_initialized_op = var._is_initialized_op # pylint: disable=protected-access
self._initializer_op = var._initializer_op # pylint: disable=protected-access
# This needs to be set before read_value() is called.
self._read_dtype = read_dtype
if context.executing_eagerly():
self._graph_element = None
else:
self._graph_element = self.read_value()
self._handle_deleter = (
var._handle_deleter if not self._in_graph_mode # pylint: disable=protected-access
else None)
# pylint: enable=super-init-not-called
@property
def name(self):
return self._var.name
def value(self):
return self._read_variable_op()
def read_value(self):
return self._read_variable_op()
def _read_variable_op(self):
with ops.colocate_with(self._handle):
res = gen_resource_variable_ops.read_variable_op(self._handle,
self._dtype)
if self._read_dtype != self._dtype:
return math_ops.cast(res, self._read_dtype)
else:
return res
def set_shape(self, shape):
self._shape = shape
self._cached_shape_as_list = None
@property
def op(self):
"""The op for this variable."""
return self._var.op
@property
def read_dtype(self):
"""The dtype of the returned tensor when reading the var."""
return self._read_dtype
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
dtype = dtype or self.read_dtype
if dtype != self.read_dtype or as_ref:
return NotImplemented
else:
res = self.value()
return res
def _should_act_as_resource_variable(self):
"""To pass resource_variable_ops.is_resource_variable check."""
pass
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
# Note: registering for Variable after ResourceVariable because inheritance will
# otherwise lead to the wrong behavior.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)
ops.register_tensor_conversion_function(
variables.Variable, variables.Variable._TensorConversionFunction) # pylint: disable=protected-access
# pylint: disable=protected-access
ops.register_dense_tensor_like_type(ResourceVariable)
@ops.RegisterGradient("ReadVariableOp")
def _ReadGrad(_, grad):
"""Gradient for read op."""
return grad
def variable_shape(handle, out_type=dtypes.int32):
if getattr(
handle, "_handle_data", None) is None or not handle._handle_data.is_set:
return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)
shape_proto = handle._handle_data.shape_and_type[0].shape
if shape_proto.unknown_rank or any(x.size == -1 for x in shape_proto.dim):
return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)
return constant_op.constant([x.size for x in shape_proto.dim], dtype=out_type)
@ops.RegisterGradient("ResourceGather")
def _GatherGrad(op, grad):
"""Gradient for gather op."""
# Build appropriately shaped IndexedSlices
handle = op.inputs[0]
indices = op.inputs[1]
params_shape = variable_shape(handle)
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return (ops.IndexedSlices(values, indices, params_shape), None)
def _to_proto_fn(v, export_scope=None):
"""Converts Variable and ResourceVariable to VariableDef for collections."""
return v.to_proto(export_scope=export_scope)
def _from_proto_fn(v, import_scope=None):
"""Creates Variable or ResourceVariable from VariableDef as needed."""
if v.is_resource:
return ResourceVariable.from_proto(v, import_scope=import_scope)
return variables.Variable.from_proto(v, import_scope=import_scope)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.TRAINABLE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.LOCAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MODEL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_STEP,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
def is_resource_variable(var):
""""Returns True if `var` is to be considered a ResourceVariable."""
return isinstance(var, ResourceVariable) or hasattr(
var, "_should_act_as_resource_variable")
def copy_to_graph_uninitialized(var):
"""Copies an existing variable to a new graph, with no initializer."""
# Like ResourceVariable.__deepcopy__, but does not set an initializer on the
# new variable.
# pylint: disable=protected-access
new_variable = ResourceVariable(
initial_value=array_ops.placeholder(
shape=var.shape, dtype=var.dtype,
name="unused_initial_variable_value"),
trainable=var.trainable,
constraint=var._constraint,
dtype=var.dtype,
name=var._shared_name)
new_variable._maybe_initialize_checkpointable()
# pylint: enable=protected-access
return new_variable
ops.NotDifferentiable("VarIsInitializedOp")
ops.NotDifferentiable("VariableShape")
| apache-2.0 |
tsotetsi/textily-web | temba/schedules/migrations/0003_reset_1.py | 4 | 2581 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-06 22:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True, help_text='Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(auto_now_add=True, help_text='When this item was originally created')),
('modified_on', models.DateTimeField(auto_now=True, help_text='When this item was last modified')),
('status', models.CharField(choices=[('U', 'Unscheduled'), ('S', 'Scheduled')], default='U', max_length=1)),
('repeat_hour_of_day', models.IntegerField(help_text='The hour of the day', null=True)),
('repeat_minute_of_hour', models.IntegerField(help_text='The minute of the hour', null=True)),
('repeat_day_of_month', models.IntegerField(help_text='The day of the month to repeat on', null=True)),
('repeat_period', models.CharField(choices=[('O', 'Never'), ('D', 'Daily'), ('W', 'Weekly'), ('M', 'Monthly')], help_text='When this schedule repeats', max_length=1, null=True)),
('repeat_days', models.IntegerField(blank=True, default=0, help_text='bit mask of days of the week', null=True)),
('last_fire', models.DateTimeField(blank=True, default=None, help_text='When this schedule last fired', null=True)),
('next_fire', models.DateTimeField(blank=True, default=None, help_text='When this schedule fires next', null=True)),
('created_by', models.ForeignKey(help_text='The user which originally created this item', on_delete=django.db.models.deletion.CASCADE, related_name='schedules_schedule_creations', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(help_text='The user which last modified this item', on_delete=django.db.models.deletion.CASCADE, related_name='schedules_schedule_modifications', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| agpl-3.0 |
HiroIshikawa/21playground | learning/venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit |
AnishShah/tensorflow | tensorflow/contrib/opt/python/training/multitask_optimizer_wrapper_test.py | 25 | 5440 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultitaskOptimizerWrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.opt.python.training import multitask_optimizer_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
class MultitaskOptimizerWrapperTest(test.TestCase):
"""Tests for the multitask optimizer wrapper.
"""
def testWrapper(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtypes.float32)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtypes.float32)
grads_allzero = constant_op.constant([0.0, 0.0], dtype=dtypes.float32)
mom_opt_impl = momentum.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_opt = multitask_optimizer_wrapper.MultitaskOptimizerWrapper(
mom_opt_impl)
mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
mom_update_partial = mom_opt.apply_gradients(
zip([grads_allzero, grads1], [var0, var1]))
mom_update_no_action = mom_opt.apply_gradients(
zip([grads_allzero, grads_allzero], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Step 1: normal momentum update.
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: momentum update that changes only slot1 but not slot0.
self.evaluate(mom_update_partial)
# Check that only the relevant momentum accumulator has been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Step 3: momentum update that does not change anything.
self.evaluate(mom_update_no_action)
# Check that the momentum accumulators have *NOT* been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
def testGradientClipping(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtypes.float32)
var1 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
var2 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
var3 = variables.Variable([3.0, 4.0], dtype=dtypes.float32)
grads0 = constant_op.constant([10.0, 15.0], dtype=dtypes.float32)
grads1 = constant_op.constant([0.0, 5.0], dtype=dtypes.float32)
grads2 = constant_op.constant([0.0, 0.0], dtype=dtypes.float32)
grads3 = None
varlist = [var0, var1, var2, var3]
gradients = [grads0, grads1, grads2, grads3]
clipped_gradvars, global_norm = (
multitask_optimizer_wrapper.clip_gradients_by_global_norm(
six.moves.zip(gradients, varlist), clip_norm=1.0))
clipped_grads = list(six.moves.zip(*clipped_gradvars))[0]
reference_global_norm = np.sqrt(np.sum(np.square([10.0, 15.0, 0.0, 5.0])))
self.assertAllCloseAccordingToType(
self.evaluate(global_norm), reference_global_norm)
self.assertAllCloseAccordingToType(
self.evaluate(clipped_grads[2]), np.array([0., 0.]))
self.assertEqual(clipped_grads[3], None)
if __name__ == "__main__":
test.main()
| apache-2.0 |
setsulla/owanimo | script/battle_20BP.py | 1 | 1260 | import os
import sys
import time
from owanimo.app.error import ERROR as e
from owanimo.script import allegory_normal
from owanimo.util import define
from owanimo.util.log import LOG as L
class Allegory(allegory_normal.Allegory):
def __init__(self, runner, profile, player):
allegory_normal.Allegory.__init__(self, runner, profile, player)
time.sleep(2)
def before(self):
L.info("*** Start Allegory : %s *** " % __file__)
self.start()
def test(self):
# Step 1 : Login
result = self.check(True, self.login(), e.LOGIN)
self.flush(self.step())
if not result: return
# Step 2 : Select Normal Battle
result = self.check(True, self.battle("battle_20BP.png"), e.START_BATTLE)
self.flush(self.step())
if not result: return
# Step 3 : Normal Battle
result = self.check(True, self.battle_puyo(), e.PUYO_BATTLE)
self.flush(self.step())
if not result: return
# Step 4 : Battle Result
result = self.check(True, self.battle_result(), e.CHECK_BATTLE)
self.flush(self.step())
if not result: return
def after(self):
L.info("*** End Allegory : %s *** " % __file__)
self.stop()
| mit |
dbbhattacharya/kitsune | vendor/packages/sqlalchemy/test/aaa_profiling/test_zoomark_orm.py | 6 | 13694 | """Benchmark for SQLAlchemy.
An adaptation of Robert Brewers' ZooMark speed tests. """
import datetime
import sys
import time
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.test import *
ITERATIONS = 1
dbapi_session = engines.ReplayableSession()
metadata = None
class ZooMarkTest(TestBase):
"""Runs the ZooMark and squawks if method counts vary from the norm.
Each test has an associated `call_range`, the total number of
accepted function calls made during the test. The count can vary
between Python 2.4 and 2.5.
Unlike a unit test, this is a ordered collection of steps. Running
components individually will fail.
"""
__only_on__ = 'postgresql+psycopg2'
__skip_if__ = lambda : sys.version_info < (2, 5), # TODO: get 2.4
# support
def test_baseline_0_setup(self):
global metadata, session
creator = testing.db.pool._creator
recorder = lambda : dbapi_session.recorder(creator())
engine = engines.testing_engine(options={'creator': recorder})
metadata = MetaData(engine)
session = sessionmaker()()
engine.connect()
def test_baseline_1_create_tables(self):
zoo = Table(
'Zoo',
metadata,
Column('ID', Integer, Sequence('zoo_id_seq'),
primary_key=True, index=True),
Column('Name', Unicode(255)),
Column('Founded', Date),
Column('Opens', Time),
Column('LastEscape', DateTime),
Column('Admission', Float),
)
animal = Table(
'Animal',
metadata,
Column('ID', Integer, Sequence('animal_id_seq'),
primary_key=True),
Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True),
Column('Name', Unicode(100)),
Column('Species', Unicode(100)),
Column('Legs', Integer, default=4),
Column('LastEscape', DateTime),
Column('Lifespan', Float(4)),
Column('MotherID', Integer, ForeignKey('Animal.ID')),
Column('PreferredFoodID', Integer),
Column('AlternateFoodID', Integer),
)
metadata.create_all()
global Zoo, Animal
class Zoo(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class Animal(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
mapper(Zoo, zoo)
mapper(Animal, animal)
def test_baseline_1a_populate(self):
wap = Zoo(Name=u'Wild Animal Park', Founded=datetime.date(2000,
1, 1), Opens=datetime.time(8, 15, 59),
LastEscape=datetime.datetime( 2004, 7, 29, 5, 6, 7, ),
Admission=4.95)
session.add(wap)
sdz = Zoo(Name=u'San Diego Zoo', Founded=datetime.date(1835, 9,
13), Opens=datetime.time(9, 0, 0), Admission=0)
session.add(sdz)
bio = Zoo(Name=u'Montr\xe9al Biod\xf4me',
Founded=datetime.date(1992, 6, 19),
Opens=datetime.time(9, 0, 0), Admission=11.75)
session.add(bio)
seaworld = Zoo(Name=u'Sea_World', Admission=60)
session.add(seaworld)
# Let's add a crazy futuristic Zoo to test large date values.
lp = Zoo(Name=u'Luna Park', Founded=datetime.date(2072, 7, 17),
Opens=datetime.time(0, 0, 0), Admission=134.95)
session.add(lp)
session.flush()
# Animals
leopard = Animal(Species=u'Leopard', Lifespan=73.5)
session.add(leopard)
leopard.ZooID = wap.ID
leopard.LastEscape = \
datetime.datetime(2004, 12, 21, 8, 15, 0, 999907, )
session.add(Animal(Species=u'Lion', ZooID=wap.ID))
session.add(Animal(Species=u'Slug', Legs=1, Lifespan=.75))
session.add(Animal(Species=u'Tiger', ZooID=sdz.ID))
# Override Legs.default with itself just to make sure it works.
session.add(Animal(Species=u'Bear', Legs=4))
session.add(Animal(Species=u'Ostrich', Legs=2, Lifespan=103.2))
session.add(Animal(Species=u'Centipede', Legs=100))
session.add(Animal(Species=u'Emperor Penguin', Legs=2,
ZooID=seaworld.ID))
session.add(Animal(Species=u'Adelie Penguin', Legs=2,
ZooID=seaworld.ID))
session.add(Animal(Species=u'Millipede', Legs=1000000,
ZooID=sdz.ID))
# Add a mother and child to test relationships
bai_yun = Animal(Species=u'Ape', Nameu=u'Bai Yun', Legs=2)
session.add(bai_yun)
session.add(Animal(Species=u'Ape', Name=u'Hua Mei', Legs=2,
MotherID=bai_yun.ID))
session.flush()
session.commit()
def test_baseline_2_insert(self):
for x in xrange(ITERATIONS):
session.add(Animal(Species=u'Tick', Name=u'Tick %d' % x,
Legs=8))
session.flush()
def test_baseline_3_properties(self):
for x in xrange(ITERATIONS):
# Zoos
WAP = list(session.query(Zoo).filter(Zoo.Name
== u'Wild Animal Park'))
SDZ = list(session.query(Zoo).filter(Zoo.Founded
== datetime.date(1835, 9, 13)))
Biodome = list(session.query(Zoo).filter(Zoo.Name
== u'Montr\xe9al Biod\xf4me'))
seaworld = list(session.query(Zoo).filter(Zoo.Admission
== float(60)))
# Animals
leopard = list(session.query(Animal).filter(Animal.Species
== u'Leopard'))
ostrich = list(session.query(Animal).filter(Animal.Species
== u'Ostrich'))
millipede = list(session.query(Animal).filter(Animal.Legs
== 1000000))
ticks = list(session.query(Animal).filter(Animal.Species
== u'Tick'))
def test_baseline_4_expressions(self):
for x in xrange(ITERATIONS):
assert len(list(session.query(Zoo))) == 5
assert len(list(session.query(Animal))) == ITERATIONS + 12
assert len(list(session.query(Animal).filter(Animal.Legs
== 4))) == 4
assert len(list(session.query(Animal).filter(Animal.Legs
== 2))) == 5
assert len(list(session.query(Animal).filter(and_(Animal.Legs
>= 2, Animal.Legs < 20)))) == ITERATIONS + 9
assert len(list(session.query(Animal).filter(Animal.Legs
> 10))) == 2
assert len(list(session.query(Animal).filter(Animal.Lifespan
> 70))) == 2
assert len(list(session.query(Animal).
filter(Animal.Species.like(u'L%')))) == 2
assert len(list(session.query(Animal).
filter(Animal.Species.like(u'%pede')))) == 2
assert len(list(session.query(Animal).filter(Animal.LastEscape
!= None))) == 1
assert len(list(session.query(Animal).filter(Animal.LastEscape
== None))) == ITERATIONS + 11
# In operator (containedby)
assert len(list(session.query(Animal).filter(
Animal.Species.like(u'%pede%')))) == 2
assert len(list(session.query(Animal).
filter(Animal.Species.in_((u'Lion'
, u'Tiger', u'Bear'))))) == 3
# Try In with cell references
class thing(object):
pass
pet, pet2 = thing(), thing()
pet.Name, pet2.Name = u'Slug', u'Ostrich'
assert len(list(session.query(Animal).
filter(Animal.Species.in_((pet.Name,
pet2.Name))))) == 2
# logic and other functions
name = u'Lion'
assert len(list(session.query(Animal).
filter(func.length(Animal.Species)
== len(name)))) == ITERATIONS + 3
assert len(list(session.query(Animal).
filter(Animal.Species.like(u'%i%'
)))) == ITERATIONS + 7
# Test now(), today(), year(), month(), day()
assert len(list(session.query(Zoo).filter(and_(Zoo.Founded
!= None, Zoo.Founded < func.now())))) == 3
assert len(list(session.query(Animal).filter(Animal.LastEscape
== func.now()))) == 0
assert len(list(session.query(Animal).filter(func.date_part('year'
, Animal.LastEscape) == 2004))) == 1
assert len(list(session.query(Animal).
filter(func.date_part('month'
, Animal.LastEscape) == 12))) == 1
assert len(list(session.query(Animal).filter(func.date_part('day'
, Animal.LastEscape) == 21))) == 1
def test_baseline_5_aggregates(self):
Animal = metadata.tables['Animal']
Zoo = metadata.tables['Zoo']
# TODO: convert to ORM
for x in xrange(ITERATIONS):
# views
view = select([Animal.c.Legs]).execute().fetchall()
legs = [x[0] for x in view]
legs.sort()
expected = {
'Leopard': 73.5,
'Slug': .75,
'Tiger': None,
'Lion': None,
'Bear': None,
'Ostrich': 103.2,
'Centipede': None,
'Emperor Penguin': None,
'Adelie Penguin': None,
'Millipede': None,
'Ape': None,
'Tick': None,
}
for species, lifespan in select([Animal.c.Species,
Animal.c.Lifespan]).execute().fetchall():
assert lifespan == expected[species]
expected = [u'Montr\xe9al Biod\xf4me', 'Wild Animal Park']
e = select([Zoo.c.Name], and_(Zoo.c.Founded != None,
Zoo.c.Founded <= func.current_timestamp(),
Zoo.c.Founded >= datetime.date(1990, 1, 1)))
values = [val[0] for val in e.execute().fetchall()]
assert set(values) == set(expected)
# distinct
legs = [x[0] for x in select([Animal.c.Legs],
distinct=True).execute().fetchall()]
legs.sort()
def test_baseline_6_editing(self):
for x in xrange(ITERATIONS):
# Edit
SDZ = session.query(Zoo).filter(Zoo.Name == u'San Diego Zoo'
).one()
SDZ.Name = u'The San Diego Zoo'
SDZ.Founded = datetime.date(1900, 1, 1)
SDZ.Opens = datetime.time(7, 30, 0)
SDZ.Admission = 35.00
# Test edits
SDZ = session.query(Zoo).filter(Zoo.Name
== u'The San Diego Zoo').one()
assert SDZ.Founded == datetime.date(1900, 1, 1), SDZ.Founded
# Change it back
SDZ.Name = u'San Diego Zoo'
SDZ.Founded = datetime.date(1835, 9, 13)
SDZ.Opens = datetime.time(9, 0, 0)
SDZ.Admission = 0
# Test re-edits
SDZ = session.query(Zoo).filter(Zoo.Name == u'San Diego Zoo'
).one()
assert SDZ.Founded == datetime.date(1835, 9, 13), \
SDZ.Founded
def test_baseline_7_drop(self):
session.rollback()
metadata.drop_all()
# Now, run all of these tests again with the DB-API driver factored
# out: the ReplayableSession playback stands in for the database.
#
# How awkward is this in a unittest framework? Very.
def test_profile_0(self):
global metadata, session
player = lambda : dbapi_session.player()
engine = create_engine('postgresql:///', creator=player)
metadata = MetaData(engine)
session = sessionmaker()()
engine.connect()
@profiling.function_call_count(4898)
def test_profile_1_create_tables(self):
self.test_baseline_1_create_tables()
@profiling.function_call_count(9225)
def test_profile_1a_populate(self):
self.test_baseline_1a_populate()
@profiling.function_call_count(640)
def test_profile_2_insert(self):
self.test_baseline_2_insert()
# this number...
@profiling.function_call_count(6783, {
'2.6': 7194,
'2.7': 7298,
'2.7+cextension': 7288,
'2.6+cextension': 7184,
})
def test_profile_3_properties(self):
self.test_baseline_3_properties()
# and this number go down slightly when using the C extensions
@profiling.function_call_count(22510, {'2.6': 24055, '2.7': 24214})
def test_profile_4_expressions(self):
self.test_baseline_4_expressions()
@profiling.function_call_count(1313, {'2.6+cextension': 1236,
'2.7+cextension': 1207},
variance=0.1)
def test_profile_5_aggregates(self):
self.test_baseline_5_aggregates()
@profiling.function_call_count(2929)
def test_profile_6_editing(self):
self.test_baseline_6_editing()
def test_profile_7_drop(self):
self.test_baseline_7_drop()
| bsd-3-clause |
iguzu/gae-django | django/contrib/auth/models.py | 6 | 14736 | import datetime
import urllib
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from django.utils.hashcompat import md5_constructor, sha_constructor
from django.utils.translation import ugettext_lazy as _
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" password algorithm not supported in this environment')
return crypt.crypt(raw_password, salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return hsh == get_hexdigest(algo, salt, raw_password)
class SiteProfileNotAvailable(Exception):
pass
class Permission(models.Model):
"""The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form and add an object.
- The "change" permission limits a user's ability to view the change list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
class Group(models.Model):
"""Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only e-mail messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True)
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"Creates and saves a User with the given username, e-mail and password."
now = datetime.datetime.now()
user = self.model(None, username, '', '', email.strip().lower(), 'placeholder', False, True, False, now, now)
if password:
user.set_password(password)
else:
user.set_unusable_password()
user.save()
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save()
return u
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
class User(models.Model):
"""Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True, help_text=_("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128, help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_("Designates whether the user can log into this admin site."))
is_active = models.BooleanField(_('active'), default=True, help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = models.DateTimeField(_('last login'), default=datetime.datetime.now)
date_joined = models.DateTimeField(_('date joined'), default=datetime.datetime.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True,
help_text=_("In addition to the permissions manually assigned, this user will also get all permissions granted to each group he/she is in."))
user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"Always returns False. This is a way of comparing User objects to anonymous users."
return False
def is_authenticated(self):
"""Always return True. This is a way to tell if the user has been authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.save()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
return self.password != UNUSABLE_PASSWORD
def get_group_permissions(self):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(self))
return permissions
def has_perm(self, perm):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general.
"""
# Inactive users have no permissions.
if not self.is_active:
return False
# Superusers have all permissions.
if self.is_superuser:
return True
# Otherwise we need to check the backends.
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(self, perm):
return True
return False
def has_perms(self, perm_list):
"""Returns True if the user has each of the specified permissions."""
for perm in perm_list:
if not self.has_perm(perm):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
if not self.is_active:
return False
if self.is_superuser:
return True
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(self, app_label):
return True
return False
def get_and_delete_messages(self):
messages = []
for m in self.message_set.all():
messages.append(m.message)
m.delete()
return messages
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
model = models.get_model(app_label, model_name)
self._profile_cache = model._default_manager.get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class Message(models.Model):
"""
The message system is a lightweight way to queue messages for given
users. A message is associated with a User instance (so it is only
applicable for registered users). There's no concept of expiration or
timestamps. Messages are created by the Django admin after successful
actions. For example, "The poll Foo was created successfully." is a
message.
"""
user = models.ForeignKey(User)
message = models.TextField(_('message'))
def __unicode__(self):
return self.message
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def has_perm(self, perm):
return False
def has_perms(self, perm_list):
return False
def has_module_perms(self, module):
return False
def get_and_delete_messages(self):
return []
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| bsd-3-clause |
nesl/mercury | Services/Elevation/ElevationService.py | 1 | 9994 |
# --- IMPORTS ---
import simplejson
import numpy as np
import os
import sys
import traceback
import time
try:
import urllib.parse as urllibParse
except ImportError:
import urllib as urllibParse
try:
import urllib.request as urllibRequest
except ImportError:
import urllib as urllibRequest
# Before using this class especially in the first time:
# - Make sure the folder hierarchy is correct, namely under folder <Data>/<EleTile>/
# we should find the folder named by resolution exponent. Or, should
# execute activate() method once.
#
# Terminology:
# +--------+
# | |
# | v v |
# | |
# | v v |
# | |
# +--------+ = tile, v in the tile = vertex
#
# (edge length is equal to specified tile resolution)
# The correctionness of this file has been examined in the following ways and
# thus the output of the query() should be correct:
# - compared the created tiles with previous downloaded tiles (with previous
# program) and the differences of all the vertice are within 1e-9
# - have been tested in testSunsetInterpolationES.py. inside this test script,
# it both query from ElevationGridQuerier and this file and plot on the
# matlab and has similar elevation trend (the trajectory includes 2 hills)
# - also be called from ElevationGridRequester and successfully output the
# result (but didn't check the result correctness)
# - also tested all the possible resolutions
# --- CONSTANTS ---
EARTH_RAD_KM = 6371
#MAX_REQUEST_POINTS = 600000
REQUEST_BLOCKSIZE = 75
REQUEST_MAXATTEMPTS = 20
RESOLUTION_MIN = 3
RESOLUTION_MAX = 5
# --- HTTP API URLs ---
ELEVATION_BASE_URL = 'https://maps.googleapis.com/maps/api/elevation/json'
CHART_BASE_URL = 'http://chart.apis.google.com/chart'
class ElevationRequester:
def __init__(self, resolution=4):
# Check resolution sanity
if isinstance(resolution, int) == False or resolution < RESOLUTION_MIN or resolution > RESOLUTION_MAX:
raise ValueError('expected variable type of resolution to be int with range 3-5')
# Root folder for tile storage
self.tileRootFolderPath = '../../Data/EleTile/';
# tile meta information
self.tileResolution = {3:1e-2, 4:1e-3, 5:1e-4}[resolution]
self.numVertexPerEdge = {3:10, 4:20, 5:10}[resolution] + 1 # including end points
self.numVertexInTile = self.numVertexPerEdge**2
self.vertexInterval = self.tileResolution / (self.numVertexPerEdge - 1)
# cache subpath
self.tileSetPath = self.tileRootFolderPath + str(resolution) + '/'
# by default, we'll be verbose
self.verbose = True
self.tiles = {}
def query(self, latLngSeries):
# also support signle point query
# (force input to be list)
if isinstance(latLngSeries, list) == False:
latLngSeries = [ latLngSeries ]
# index corresponding to latLngSeries, store info related to tiles
metaSeries = []
# What tiles should be queried from Google service
tilesToRequest = []
# retrieve appropriate tile data
for latLng in latLngSeries:
tileinfo = self._getTileInfo(latLng)
#print(latLng, tinfo)
metaSeries += [tileinfo]
# if we don't have this tile downloaded, we need to ask Google for it
# (tileinfo[0] is tile file name)
if os.path.isfile(self.tileSetPath + tileinfo[0]) == False and tileinfo[0] not in tilesToRequest:
tilesToRequest += [ tileinfo[0] ]
# -- REQUEST ALL TILES --
blockPoints = []
eleReturn = []
errorFlag = False
try:
for tile in tilesToRequest:
# gather points to request for this tile
#print(a, latLng, a[:-6])
latLng = tuple(map(float, tile[:-6].split('_'))) # split into lat and lng from tile name
for i in range(self.numVertexPerEdge):
for j in range(self.numVertexPerEdge):
blockPoints += [ (latLng[0] + self.vertexInterval * i, latLng[1] + self.vertexInterval * j) ]
# if we've gathered enough points, request this block
if len(blockPoints) == REQUEST_BLOCKSIZE:
#print(qp, len(qp))
if self.verbose:
print('query %d-%d of %d' % (len(eleReturn), len(eleReturn)+len(blockPoints), len(tilesToRequest)*self.numVertexInTile))
eleReturn += self._requestElevationBlock(blockPoints)
blockPoints = []
# get the left over (underfull) block after all tiles have been run through
if len(blockPoints) > 0:
if self.verbose:
print('query %d-%d of %d' % (len(eleReturn), len(eleReturn)+len(blockPoints), len(tilesToRequest)*self.numVertexInTile))
eleReturn += self._requestElevationBlock(blockPoints)
except:
# remember that we got the exception. don't raise right now since we need to save files
exc_type, exc_value, exc_traceback = sys.exc_info()
errorFlag = True
traceback.print_exception(exc_type, exc_value, exc_traceback)
# store succesfully requested tiles into files
#print(eleReturn, len(eleReturn))
for i in range( (len(eleReturn) + 1) // self.numVertexInTile ): # number of complete tiles downloaded
#print(i, tilesToRequest[i])
f = open(self.tileSetPath + tilesToRequest[i], 'w')
for j in range(self.numVertexPerEdge):
s = i * self.numVertexInTile + j * self.numVertexPerEdge # start index
line = ",".join( list( map(str, eleReturn[s:(s+self.numVertexPerEdge)]) ) ) + '\n'
f.write(line)
f.close()
# raise the exception if the whole downloaded process is incomplete
if errorFlag:
traceback.print_exception(exc_type, exc_value, exc_traceback)
raise EnvironmentError('got exception from requestElevationBlock(), query aborted (and saved)')
# to query
ret = [] # final result to return
for i in range(len(latLngSeries)):
latLng = latLngSeries[i]
meta = metaSeries[i]
fn = meta[0] # tile file name
if fn not in self.tiles:
f = open(self.tileSetPath + fn)
content = [ list(map(float, x.strip().split(','))) for x in f.readlines()[:self.numVertexPerEdge] ]
self.tiles[fn] = content
dlati, dlngi, latfrac, lngfrac = meta[1], meta[2], meta[3], meta[4] # vertex index and fractions
#print(fn, latLng, dlati, dlngi)
ret += [ self._bilinearInterpolation(latfrac, lngfrac,
self.tiles[fn][dlati ][dlngi ],
self.tiles[fn][dlati ][dlngi+1],
self.tiles[fn][dlati+1][dlngi ],
self.tiles[fn][dlati+1][dlngi+1]) ]
return ret
def setVerbose(self, flag):
self.verbose = flag
def currentCacheSize(self):
return len(self.tiles)
def activate():
for resolutionID in range(RESOLUTION_MIN - 1, RESOLUTION_MAX):
dirName = self.tileRootFolderPath + str(resolutionID)
if not os.path.exists(dirName):
os.makedirs(dirName)
def _requestElevationBlock(self, block_pts):
# make sure the request is short enough
if len(block_pts) > REQUEST_BLOCKSIZE:
raise ValueError('requested block is too large')
# convert positions to string
pts_str = ''
for p in block_pts:
#pts_str += str(p[0]) + "," + str(p[1])
pts_str += '%.6f,%.6f' % (p[0], p[1])
pts_str += "|"
# remove final "|"
pts_str = pts_str[0:-1]
# request elevations
elvtn_args = {
'locations': pts_str,
}
requestAttempt = 0
goodResponse = False
while requestAttempt < REQUEST_MAXATTEMPTS and not goodResponse:
requestAttempt += 1
time.sleep(0.1)
url = ELEVATION_BASE_URL + '?' + urllibParse.urlencode(elvtn_args)
response = simplejson.load(urllibRequest.urlopen(url))
# parse elevations
elevations = []
for resultset in response['results']:
elevations.append(resultset['elevation'])
if len(elevations) == len(block_pts):
goodResponse = True
return elevations
raise EnvironmentError('No response from google after %d attempts' % REQUEST_MAXATTEMPTS)
def _getTileInfo(self, latLng):
# return (filename,
# ind of point to immediate south lat line in this tile,
# ind of point to immediate west lng line in this tile,
# fraction of <point to first vertex to the south> / <vertice interval>,
# fraction of <point to first vertex to the west> / <vertice interval>)
# assume tileResolution=1e-3 and vertexInterval=1e-4
# => lat/lng = -118.325479 = -118.326 + 0.0001 * 5 + 0.000021
# = tileLng + deltaTileLng
# = " + vertexLng + deltaVertexLng
deltaTileLat = latLng[0] % self.tileResolution
deltaTileLng = latLng[1] % self.tileResolution
tileLat = latLng[0] - deltaTileLat
tileLng = latLng[1] - deltaTileLng
vertexLatInd = int(deltaTileLat // self.vertexInterval)
vertexLngInd = int(deltaTileLng // self.vertexInterval)
deltaVertexLat = deltaTileLat % self.vertexInterval
deltaVertexLng = deltaTileLng % self.vertexInterval
fracDeltaVertexLat = deltaVertexLat / self.vertexInterval
fracDeltaVertexLng = deltaVertexLng / self.vertexInterval
return ("%.6lf_%.6lf.etile" % (tileLat, tileLng),
vertexLatInd, vertexLngInd, fracDeltaVertexLat, fracDeltaVertexLng)
def _bilinearInterpolation(self, latf, lngf, e1a, e1b, e2a, e2b):
# <e2a> ------ <e2b>
# | |
# | * | * at (latf, lngf) value range within (0,0) to (1,1)
# | |
# <e1a> ------ <e1b>
#print(latf, lngf, e1a, e1b, e2a, e2b)
e1c = self._interpolation(e1a, e1b, lngf) # e1a -- e1c ------ e1b
e2c = self._interpolation(e2a, e2b, lngf)
return self._interpolation(e1c, e2c, latf)
def _interpolation(self, va, vb, f): # value a, value b, fraction (0 = va, 1 = vb)
return va + (vb - va) * f
if __name__ == '__main__':
# testing bilinear interpolation
pass
| gpl-2.0 |
rjschwei/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/models/virtual_machine_scale_set_vm.py | 1 | 5933 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachineScaleSetVM(Resource):
"""Describes a virtual machine scale set virtual machine.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:ivar instance_id: The virtual machine instance ID.
:vartype instance_id: str
:ivar sku: The virtual machine SKU.
:vartype sku: :class:`Sku <azure.mgmt.compute.models.Sku>`
:ivar latest_model_applied: Specifies whether the latest model has been
applied to the virtual machine.
:vartype latest_model_applied: bool
:ivar vm_id: Azure VM unique ID.
:vartype vm_id: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view: :class:`VirtualMachineInstanceView
<azure.mgmt.compute.models.VirtualMachineInstanceView>`
:param hardware_profile: The hardware profile.
:type hardware_profile: :class:`HardwareProfile
<azure.mgmt.compute.models.HardwareProfile>`
:param storage_profile: The storage profile.
:type storage_profile: :class:`StorageProfile
<azure.mgmt.compute.models.StorageProfile>`
:param os_profile: The OS profile.
:type os_profile: :class:`OSProfile <azure.mgmt.compute.models.OSProfile>`
:param network_profile: The network profile.
:type network_profile: :class:`NetworkProfile
<azure.mgmt.compute.models.NetworkProfile>`
:param diagnostics_profile: The diagnostics profile.
:type diagnostics_profile: :class:`DiagnosticsProfile
<azure.mgmt.compute.models.DiagnosticsProfile>`
:param availability_set: The reference Id of the availability set to which
this virtual machine belongs.
:type availability_set: :class:`SubResource
<azure.mgmt.compute.models.SubResource>`
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param license_type: The license type, which is for bring your own license
scenario.
:type license_type: str
:param plan: The purchase plan when deploying virtual machine from VM
Marketplace images.
:type plan: :class:`Plan <azure.mgmt.compute.models.Plan>`
:ivar resources: The virtual machine child extension resources.
:vartype resources: list of :class:`VirtualMachineExtension
<azure.mgmt.compute.models.VirtualMachineExtension>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'instance_id': {'readonly': True},
'sku': {'readonly': True},
'latest_model_applied': {'readonly': True},
'vm_id': {'readonly': True},
'instance_view': {'readonly': True},
'provisioning_state': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'latest_model_applied': {'key': 'properties.latestModelApplied', 'type': 'bool'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
}
def __init__(self, location, tags=None, hardware_profile=None, storage_profile=None, os_profile=None, network_profile=None, diagnostics_profile=None, availability_set=None, license_type=None, plan=None):
super(VirtualMachineScaleSetVM, self).__init__(location=location, tags=tags)
self.instance_id = None
self.sku = None
self.latest_model_applied = None
self.vm_id = None
self.instance_view = None
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.os_profile = os_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.provisioning_state = None
self.license_type = license_type
self.plan = plan
self.resources = None
| mit |
millanp/django-paypal | paypal/standard/ipn/views.py | 12 | 3222 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.http import HttpResponse, QueryDict
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from paypal.standard.ipn.forms import PayPalIPNForm
from paypal.standard.ipn.models import PayPalIPN
from paypal.standard.models import DEFAULT_ENCODING
log = logging.getLogger(__name__)
@require_POST
@csrf_exempt
def ipn(request, item_check_callable=None):
"""
PayPal IPN endpoint (notify_url).
Used by both PayPal Payments Pro and Payments Standard to confirm transactions.
http://tinyurl.com/d9vu9d
PayPal IPN Simulator:
https://developer.paypal.com/cgi-bin/devscr?cmd=_ipn-link-session
"""
# TODO: Clean up code so that we don't need to set None here and have a lot
# of if checks just to determine if flag is set.
flag = None
ipn_obj = None
# Clean up the data as PayPal sends some weird values such as "N/A"
# Also, need to cope with custom encoding, which is stored in the body (!).
# Assuming the tolerant parsing of QueryDict and an ASCII-like encoding,
# such as windows-1252, latin1 or UTF8, the following will work:
encoding = request.POST.get('charset', None)
encoding_missing = encoding is None
if encoding_missing:
encoding = DEFAULT_ENCODING
try:
data = QueryDict(request.body, encoding=encoding).copy()
except LookupError:
data = None
flag = "Invalid form - invalid charset"
if data is not None:
if hasattr(PayPalIPN._meta, 'get_fields'):
date_fields = [f.attname for f in PayPalIPN._meta.get_fields() if f.__class__.__name__ == 'DateTimeField']
else:
date_fields = [f.attname for f, m in PayPalIPN._meta.get_fields_with_model() if f.__class__.__name__ == 'DateTimeField']
for date_field in date_fields:
if data.get(date_field) == 'N/A':
del data[date_field]
form = PayPalIPNForm(data)
if form.is_valid():
try:
# When commit = False, object is returned without saving to DB.
ipn_obj = form.save(commit=False)
except Exception as e:
flag = "Exception while processing. (%s)" % e
else:
flag = "Invalid form. ({0})".format(", ".join(["{0}: {1}".format(k, ", ".join(v)) for k, v in form.errors.items()]))
if ipn_obj is None:
ipn_obj = PayPalIPN()
# Set query params and sender's IP address
ipn_obj.initialize(request)
if flag is not None:
# We save errors in the flag field
ipn_obj.set_flag(flag)
else:
# Secrets should only be used over SSL.
if request.is_secure() and 'secret' in request.GET:
ipn_obj.verify_secret(form, request.GET['secret'])
else:
ipn_obj.verify(item_check_callable)
ipn_obj.save()
ipn_obj.send_signals()
if encoding_missing:
# Wait until we have an ID to log warning
log.warning("No charset passed with PayPalIPN: %s. Guessing %s", ipn_obj.id, encoding)
return HttpResponse("OKAY")
| mit |
pomegranited/edx-platform | lms/djangoapps/mobile_api/video_outlines/serializers.py | 97 | 8891 | """
Serializer for video outline
"""
from rest_framework.reverse import reverse
from xmodule.modulestore.mongo.base import BLOCK_TYPES_WITH_CHILDREN
from xmodule.modulestore.django import modulestore
from courseware.access import has_access
from courseware.courses import get_course_by_id
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from util.module_utils import get_dynamic_descriptor_children
from edxval.api import (
get_video_info_for_course_and_profiles, ValInternalError
)
class BlockOutline(object):
"""
Serializes course videos, pulling data from VAL and the video modules.
"""
def __init__(self, course_id, start_block, block_types, request, video_profiles):
"""Create a BlockOutline using `start_block` as a starting point."""
self.start_block = start_block
self.block_types = block_types
self.course_id = course_id
self.request = request # needed for making full URLS
self.local_cache = {}
try:
self.local_cache['course_videos'] = get_video_info_for_course_and_profiles(
unicode(course_id), video_profiles
)
except ValInternalError: # pragma: nocover
self.local_cache['course_videos'] = {}
def __iter__(self):
def parent_or_requested_block_type(usage_key):
"""
Returns whether the usage_key's block_type is one of self.block_types or a parent type.
"""
return (
usage_key.block_type in self.block_types or
usage_key.block_type in BLOCK_TYPES_WITH_CHILDREN
)
def create_module(descriptor):
"""
Factory method for creating and binding a module for the given descriptor.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_id, self.request.user, descriptor, depth=0,
)
course = get_course_by_id(self.course_id)
return get_module_for_descriptor(
self.request.user, self.request, descriptor, field_data_cache, self.course_id, course=course
)
with modulestore().bulk_operations(self.course_id):
child_to_parent = {}
stack = [self.start_block]
while stack:
curr_block = stack.pop()
if curr_block.hide_from_toc:
# For now, if the 'hide_from_toc' setting is set on the block, do not traverse down
# the hierarchy. The reason being is that these blocks may not have human-readable names
# to display on the mobile clients.
# Eventually, we'll need to figure out how we want these blocks to be displayed on the
# mobile clients. As they are still accessible in the browser, just not navigatable
# from the table-of-contents.
continue
if curr_block.location.block_type in self.block_types:
if not has_access(self.request.user, 'load', curr_block, course_key=self.course_id):
continue
summary_fn = self.block_types[curr_block.category]
block_path = list(path(curr_block, child_to_parent, self.start_block))
unit_url, section_url = find_urls(self.course_id, curr_block, child_to_parent, self.request)
yield {
"path": block_path,
"named_path": [b["name"] for b in block_path],
"unit_url": unit_url,
"section_url": section_url,
"summary": summary_fn(self.course_id, curr_block, self.request, self.local_cache)
}
if curr_block.has_children:
children = get_dynamic_descriptor_children(
curr_block,
self.request.user.id,
create_module,
usage_key_filter=parent_or_requested_block_type
)
for block in reversed(children):
stack.append(block)
child_to_parent[block] = curr_block
def path(block, child_to_parent, start_block):
"""path for block"""
block_path = []
while block in child_to_parent:
block = child_to_parent[block]
if block is not start_block:
block_path.append({
# to be consistent with other edx-platform clients, return the defaulted display name
'name': block.display_name_with_default,
'category': block.category,
'id': unicode(block.location)
})
return reversed(block_path)
def find_urls(course_id, block, child_to_parent, request):
"""
Find the section and unit urls for a block.
Returns:
unit_url, section_url:
unit_url (str): The url of a unit
section_url (str): The url of a section
"""
block_path = []
while block in child_to_parent:
block = child_to_parent[block]
block_path.append(block)
block_list = list(reversed(block_path))
block_count = len(block_list)
chapter_id = block_list[1].location.block_id if block_count > 1 else None
section = block_list[2] if block_count > 2 else None
position = None
if block_count > 3:
position = 1
for block in section.children:
if block.name == block_list[3].url_name:
break
position += 1
kwargs = {'course_id': unicode(course_id)}
if chapter_id is None:
course_url = reverse("courseware", kwargs=kwargs, request=request)
return course_url, course_url
kwargs['chapter'] = chapter_id
if section is None:
chapter_url = reverse("courseware_chapter", kwargs=kwargs, request=request)
return chapter_url, chapter_url
kwargs['section'] = section.url_name
section_url = reverse("courseware_section", kwargs=kwargs, request=request)
if position is None:
return section_url, section_url
kwargs['position'] = position
unit_url = reverse("courseware_position", kwargs=kwargs, request=request)
return unit_url, section_url
def video_summary(video_profiles, course_id, video_descriptor, request, local_cache):
"""
returns summary dict for the given video module
"""
always_available_data = {
"name": video_descriptor.display_name,
"category": video_descriptor.category,
"id": unicode(video_descriptor.scope_ids.usage_id),
"only_on_web": video_descriptor.only_on_web,
}
if video_descriptor.only_on_web:
ret = {
"video_url": None,
"video_thumbnail_url": None,
"duration": 0,
"size": 0,
"transcripts": {},
"language": None,
}
ret.update(always_available_data)
return ret
# Get encoded videos
video_data = local_cache['course_videos'].get(video_descriptor.edx_video_id, {})
# Get highest priority video to populate backwards compatible field
default_encoded_video = {}
if video_data:
for profile in video_profiles:
default_encoded_video = video_data['profiles'].get(profile, {})
if default_encoded_video:
break
if default_encoded_video:
video_url = default_encoded_video['url']
# Then fall back to VideoDescriptor fields for video URLs
elif video_descriptor.html5_sources:
video_url = video_descriptor.html5_sources[0]
else:
video_url = video_descriptor.source
# Get duration/size, else default
duration = video_data.get('duration', None)
size = default_encoded_video.get('file_size', 0)
# Transcripts...
transcripts_info = video_descriptor.get_transcripts_info()
transcript_langs = video_descriptor.available_translations(transcripts_info, verify_assets=False)
transcripts = {
lang: reverse(
'video-transcripts-detail',
kwargs={
'course_id': unicode(course_id),
'block_id': video_descriptor.scope_ids.usage_id.block_id,
'lang': lang
},
request=request,
)
for lang in transcript_langs
}
ret = {
"video_url": video_url,
"video_thumbnail_url": None,
"duration": duration,
"size": size,
"transcripts": transcripts,
"language": video_descriptor.get_default_transcript_language(transcripts_info),
"encoded_videos": video_data.get('profiles')
}
ret.update(always_available_data)
return ret
| agpl-3.0 |
righthandabacus/shadowsocks-2.8.2 | shadowsocks/common.py | 945 | 8921 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 2 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 |
toshywoshy/ansible | lib/ansible/modules/network/fortimanager/fmgr_secprof_profile_group.py | 39 | 8605 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_secprof_profile_group
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Manage security profiles within FortiManager
description:
- Manage security profile group which allows you to create a group of security profiles and apply that to a policy.
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values.
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
webfilter_profile:
type: str
description:
- Name of an existing Web filter profile.
required: false
waf_profile:
type: str
description:
- Name of an existing Web application firewall profile.
required: false
voip_profile:
type: str
description:
- Name of an existing VoIP profile.
required: false
ssl_ssh_profile:
type: str
description:
- Name of an existing SSL SSH profile.
required: false
ssh_filter_profile:
type: str
description:
- Name of an existing SSH filter profile.
required: false
spamfilter_profile:
type: str
description:
- Name of an existing Spam filter profile.
required: false
profile_protocol_options:
type: str
description:
- Name of an existing Protocol options profile.
required: false
name:
type: str
description:
- Profile group name.
required: false
mms_profile:
type: str
description:
- Name of an existing MMS profile.
required: false
ips_sensor:
type: str
description:
- Name of an existing IPS sensor.
required: false
icap_profile:
type: str
description:
- Name of an existing ICAP profile.
required: false
dnsfilter_profile:
type: str
description:
- Name of an existing DNS filter profile.
required: false
dlp_sensor:
type: str
description:
- Name of an existing DLP sensor.
required: false
av_profile:
type: str
description:
- Name of an existing Antivirus profile.
required: false
application_list:
type: str
description:
- Name of an existing Application list.
required: false
'''
EXAMPLES = '''
- name: DELETE Profile
fmgr_secprof_profile_group:
name: "Ansible_TEST_Profile_Group"
mode: "delete"
- name: CREATE Profile
fmgr_secprof_profile_group:
name: "Ansible_TEST_Profile_Group"
mode: "set"
av_profile: "Ansible_AV_Profile"
profile_protocol_options: "default"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
from ansible.module_utils.network.fortimanager.common import prepare_dict
from ansible.module_utils.network.fortimanager.common import scrub_dict
###############
# START METHODS
###############
def fmgr_firewall_profile_group_modify(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
url = ""
datagram = {}
response = DEFAULT_RESULT_OBJ
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/obj/firewall/profile-group'.format(adom=adom)
datagram = scrub_dict(prepare_dict(paramgram))
# EVAL THE MODE PARAMETER FOR DELETE
elif mode == "delete":
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/firewall/profile-group/{name}'.format(adom=adom, name=paramgram["name"])
datagram = {}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
#############
# END METHODS
#############
def main():
argument_spec = dict(
adom=dict(type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"),
webfilter_profile=dict(required=False, type="str"),
waf_profile=dict(required=False, type="str"),
voip_profile=dict(required=False, type="str"),
ssl_ssh_profile=dict(required=False, type="str"),
ssh_filter_profile=dict(required=False, type="str"),
spamfilter_profile=dict(required=False, type="str"),
profile_protocol_options=dict(required=False, type="str"),
name=dict(required=False, type="str"),
mms_profile=dict(required=False, type="str"),
ips_sensor=dict(required=False, type="str"),
icap_profile=dict(required=False, type="str"),
dnsfilter_profile=dict(required=False, type="str"),
dlp_sensor=dict(required=False, type="str"),
av_profile=dict(required=False, type="str"),
application_list=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"webfilter-profile": module.params["webfilter_profile"],
"waf-profile": module.params["waf_profile"],
"voip-profile": module.params["voip_profile"],
"ssl-ssh-profile": module.params["ssl_ssh_profile"],
"ssh-filter-profile": module.params["ssh_filter_profile"],
"spamfilter-profile": module.params["spamfilter_profile"],
"profile-protocol-options": module.params["profile_protocol_options"],
"name": module.params["name"],
"mms-profile": module.params["mms_profile"],
"ips-sensor": module.params["ips_sensor"],
"icap-profile": module.params["icap_profile"],
"dnsfilter-profile": module.params["dnsfilter_profile"],
"dlp-sensor": module.params["dlp_sensor"],
"av-profile": module.params["av_profile"],
"application-list": module.params["application_list"],
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
results = DEFAULT_RESULT_OBJ
try:
results = fmgr_firewall_profile_group_modify(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
ahmadio/edx-platform | common/lib/symmath/symmath/formula.py | 66 | 25851 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Flexible python representation of a symbolic mathematical formula.
Acceptes Presentation MathML, Content MathML (and could also do OpenMath).
Provides sympy representation.
"""
#
# File: formula.py
# Date: 04-May-12 (creation)
# Author: I. Chuang <ichuang@mit.edu>
#
import os
import string # pylint: disable=deprecated-module
import re
import logging
import operator
import requests
import sympy
from sympy.printing.latex import LatexPrinter
from sympy.printing.str import StrPrinter
from sympy import latex, sympify
from sympy.physics.quantum.qubit import Qubit
from sympy.physics.quantum.state import Ket
from xml.sax.saxutils import unescape
import unicodedata
from lxml import etree
#import subprocess
from copy import deepcopy
log = logging.getLogger(__name__)
log.warning("Dark code. Needs review before enabling in prod.")
os.environ['PYTHONIOENCODING'] = 'utf-8'
#-----------------------------------------------------------------------------
class dot(sympy.operations.LatticeOp): # pylint: disable=invalid-name, no-member
"""my dot product"""
zero = sympy.Symbol('dotzero')
identity = sympy.Symbol('dotidentity')
def _print_dot(_self, expr):
"""Print statement used for LatexPrinter"""
return r'{((%s) \cdot (%s))}' % (expr.args[0], expr.args[1])
LatexPrinter._print_dot = _print_dot # pylint: disable=protected-access
#-----------------------------------------------------------------------------
# unit vectors (for 8.02)
def _print_hat(_self, expr):
"""Print statement used for LatexPrinter"""
return '\\hat{%s}' % str(expr.args[0]).lower()
LatexPrinter._print_hat = _print_hat # pylint: disable=protected-access
StrPrinter._print_hat = _print_hat # pylint: disable=protected-access
#-----------------------------------------------------------------------------
# helper routines
def to_latex(expr):
"""
Convert expression to latex mathjax format
"""
if expr is None:
return ''
expr_s = latex(expr)
expr_s = expr_s.replace(r'\XI', 'XI') # workaround for strange greek
# substitute back into latex form for scripts
# literally something of the form
# 'scriptN' becomes '\\mathcal{N}'
# note: can't use something akin to the _print_hat method above because we sometimes get 'script(N)__B' or more complicated terms
expr_s = re.sub(
r'script([a-zA-Z0-9]+)',
'\\mathcal{\\1}',
expr_s
)
#return '<math>%s{}{}</math>' % (xs[1:-1])
if expr_s[0] == '$':
return '[mathjax]%s[/mathjax]<br>' % (expr_s[1:-1]) # for sympy v6
return '[mathjax]%s[/mathjax]<br>' % (expr_s) # for sympy v7
def my_evalf(expr, chop=False):
"""
Enhanced sympy evalf to handle lists of expressions
and catch eval failures without dropping out.
"""
if isinstance(expr, list):
try:
return [x.evalf(chop=chop) for x in expr]
except:
return expr
try:
return expr.evalf(chop=chop)
except:
return expr
def my_sympify(expr, normphase=False, matrix=False, abcsym=False, do_qubit=False, symtab=None):
"""
Version of sympify to import expression into sympy
"""
# make all lowercase real?
if symtab:
varset = symtab
else:
varset = {'p': sympy.Symbol('p'),
'g': sympy.Symbol('g'),
'e': sympy.E, # for exp
'i': sympy.I, # lowercase i is also sqrt(-1)
'Q': sympy.Symbol('Q'), # otherwise it is a sympy "ask key"
'I': sympy.Symbol('I'), # otherwise it is sqrt(-1)
'N': sympy.Symbol('N'), # or it is some kind of sympy function
'ZZ': sympy.Symbol('ZZ'), # otherwise it is the PythonIntegerRing
'XI': sympy.Symbol('XI'), # otherwise it is the capital \XI
'hat': sympy.Function('hat'), # for unit vectors (8.02)
}
if do_qubit: # turn qubit(...) into Qubit instance
varset.update({'qubit': Qubit,
'Ket': Ket,
'dot': dot,
'bit': sympy.Function('bit'),
})
if abcsym: # consider all lowercase letters as real symbols, in the parsing
for letter in string.lowercase:
if letter in varset: # exclude those already done
continue
varset.update({letter: sympy.Symbol(letter, real=True)})
sexpr = sympify(expr, locals=varset)
if normphase: # remove overall phase if sexpr is a list
if isinstance(sexpr, list):
if sexpr[0].is_number:
ophase = sympy.sympify('exp(-I*arg(%s))' % sexpr[0])
sexpr = [sympy.Mul(x, ophase) for x in sexpr]
def to_matrix(expr):
"""
Convert a list, or list of lists to a matrix.
"""
# if expr is a list of lists, and is rectangular, then return Matrix(expr)
if not isinstance(expr, list):
return expr
for row in expr:
if not isinstance(row, list):
return expr
rdim = len(expr[0])
for row in expr:
if not len(row) == rdim:
return expr
return sympy.Matrix(expr)
if matrix:
sexpr = to_matrix(sexpr)
return sexpr
#-----------------------------------------------------------------------------
# class for symbolic mathematical formulas
class formula(object):
"""
Representation of a mathematical formula object. Accepts mathml math expression
for constructing, and can produce sympy translation. The formula may or may not
include an assignment (=).
"""
def __init__(self, expr, asciimath='', options=None):
self.expr = expr.strip()
self.asciimath = asciimath
self.the_cmathml = None
self.the_sympy = None
self.options = options
def is_presentation_mathml(self):
"""
Check if formula is in mathml presentation format.
"""
return '<mstyle' in self.expr
def is_mathml(self):
"""
Check if formula is in mathml format.
"""
return '<math ' in self.expr
def fix_greek_in_mathml(self, xml):
"""
Recursively fix greek letters in passed in xml.
"""
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
for k in xml:
tag = gettag(k)
if tag == 'mi' or tag == 'ci':
usym = unicode(k.text)
try:
udata = unicodedata.name(usym)
except Exception:
udata = None
# print "usym = %s, udata=%s" % (usym,udata)
if udata: # eg "GREEK SMALL LETTER BETA"
if 'GREEK' in udata:
usym = udata.split(' ')[-1]
if 'SMALL' in udata:
usym = usym.lower()
#print "greek: ",usym
k.text = usym
self.fix_greek_in_mathml(k)
return xml
def preprocess_pmathml(self, xml):
r"""
Pre-process presentation MathML from ASCIIMathML to make it more
acceptable for SnuggleTeX, and also to accomodate some sympy
conventions (eg hat(i) for \hat{i}).
This method would be a good spot to look for an integral and convert
it, if possible...
"""
if isinstance(xml, (str, unicode)):
xml = etree.fromstring(xml) # TODO: wrap in try
xml = self.fix_greek_in_mathml(xml) # convert greek utf letters to greek spelled out in ascii
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
def fix_pmathml(xml):
"""
f and g are processed as functions by asciimathml, eg "f-2" turns
into "<mrow><mi>f</mi><mo>-</mo></mrow><mn>2</mn>" this is
really terrible for turning into cmathml. undo this here.
"""
for k in xml:
tag = gettag(k)
if tag == 'mrow':
if len(k) == 2:
if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo':
idx = xml.index(k)
xml.insert(idx, deepcopy(k[0])) # drop the <mrow> container
xml.insert(idx + 1, deepcopy(k[1]))
xml.remove(k)
fix_pmathml(k)
fix_pmathml(xml)
def fix_hat(xml):
"""
hat i is turned into <mover><mi>i</mi><mo>^</mo></mover> ; mangle
this into <mi>hat(f)</mi> hat i also somtimes turned into
<mover><mrow> <mi>j</mi> </mrow><mo>^</mo></mover>
"""
for k in xml:
tag = gettag(k)
if tag == 'mover':
if len(k) == 2:
if gettag(k[0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':
newk = etree.Element('mi')
newk.text = 'hat(%s)' % k[0].text
xml.replace(k, newk)
if gettag(k[0]) == 'mrow' and gettag(k[0][0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':
newk = etree.Element('mi')
newk.text = 'hat(%s)' % k[0][0].text
xml.replace(k, newk)
fix_hat(k)
fix_hat(xml)
def flatten_pmathml(xml):
"""
Give the text version of certain PMathML elements
Sometimes MathML will be given with each letter separated (it
doesn't know if its implicit multiplication or what). From an xml
node, find the (text only) variable name it represents. So it takes
<mrow>
<mi>m</mi>
<mi>a</mi>
<mi>x</mi>
</mrow>
and returns 'max', for easier use later on.
"""
tag = gettag(xml)
if tag == 'mn':
return xml.text
elif tag == 'mi':
return xml.text
elif tag == 'mrow':
return ''.join([flatten_pmathml(y) for y in xml])
raise Exception('[flatten_pmathml] unknown tag %s' % tag)
def fix_mathvariant(parent):
"""
Fix certain kinds of math variants
Literally replace <mstyle mathvariant="script"><mi>N</mi></mstyle>
with 'scriptN'. There have been problems using script_N or script(N)
"""
for child in parent:
if gettag(child) == 'mstyle' and child.get('mathvariant') == 'script':
newchild = etree.Element('mi')
newchild.text = 'script%s' % flatten_pmathml(child[0])
parent.replace(child, newchild)
fix_mathvariant(child)
fix_mathvariant(xml)
# find "tagged" superscripts
# they have the character \u200b in the superscript
# replace them with a__b so snuggle doesn't get confused
def fix_superscripts(xml):
""" Look for and replace sup elements with 'X__Y' or 'X_Y__Z'
In the javascript, variables with '__X' in them had an invisible
character inserted into the sup (to distinguish from powers)
E.g. normal:
<msubsup>
<mi>a</mi>
<mi>b</mi>
<mi>c</mi>
</msubsup>
to be interpreted '(a_b)^c' (nothing done by this method)
And modified:
<msubsup>
<mi>b</mi>
<mi>x</mi>
<mrow>
<mo>​</mo>
<mi>d</mi>
</mrow>
</msubsup>
to be interpreted 'a_b__c'
also:
<msup>
<mi>x</mi>
<mrow>
<mo>​</mo>
<mi>B</mi>
</mrow>
</msup>
to be 'x__B'
"""
for k in xml:
tag = gettag(k)
# match things like the last example--
# the second item in msub is an mrow with the first
# character equal to \u200b
if (
tag == 'msup' and
len(k) == 2 and gettag(k[1]) == 'mrow' and
gettag(k[1][0]) == 'mo' and k[1][0].text == u'\u200b' # whew
):
# replace the msup with 'X__Y'
k[1].remove(k[1][0])
newk = etree.Element('mi')
newk.text = '%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]))
xml.replace(k, newk)
# match things like the middle example-
# the third item in msubsup is an mrow with the first
# character equal to \u200b
if (
tag == 'msubsup' and
len(k) == 3 and gettag(k[2]) == 'mrow' and
gettag(k[2][0]) == 'mo' and k[2][0].text == u'\u200b' # whew
):
# replace the msubsup with 'X_Y__Z'
k[2].remove(k[2][0])
newk = etree.Element('mi')
newk.text = '%s_%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]), flatten_pmathml(k[2]))
xml.replace(k, newk)
fix_superscripts(k)
fix_superscripts(xml)
def fix_msubsup(parent):
"""
Snuggle returns an error when it sees an <msubsup> replace such
elements with an <msup>, except the first element is of
the form a_b. I.e. map a_b^c => (a_b)^c
"""
for child in parent:
# fix msubsup
if gettag(child) == 'msubsup' and len(child) == 3:
newchild = etree.Element('msup')
newbase = etree.Element('mi')
newbase.text = '%s_%s' % (flatten_pmathml(child[0]), flatten_pmathml(child[1]))
newexp = child[2]
newchild.append(newbase)
newchild.append(newexp)
parent.replace(child, newchild)
fix_msubsup(child)
fix_msubsup(xml)
self.xml = xml # pylint: disable=attribute-defined-outside-init
return self.xml
def get_content_mathml(self):
if self.the_cmathml:
return self.the_cmathml
# pre-process the presentation mathml before sending it to snuggletex to convert to content mathml
try:
xml = self.preprocess_pmathml(self.expr)
except Exception, err:
log.warning('Err %s while preprocessing; expr=%s', err, self.expr)
return "<html>Error! Cannot process pmathml</html>"
pmathml = etree.tostring(xml, pretty_print=True)
self.the_pmathml = pmathml # pylint: disable=attribute-defined-outside-init
# convert to cmathml
self.the_cmathml = self.GetContentMathML(self.asciimath, pmathml)
return self.the_cmathml
cmathml = property(get_content_mathml, None, None, 'content MathML representation')
def make_sympy(self, xml=None):
"""
Return sympy expression for the math formula.
The math formula is converted to Content MathML then that is parsed.
This is a recursive function, called on every CMML node. Support for
more functions can be added by modifying opdict, abould halfway down
"""
if self.the_sympy:
return self.the_sympy
if xml is None: # root
if not self.is_mathml():
return my_sympify(self.expr)
if self.is_presentation_mathml():
cmml = None
try:
cmml = self.cmathml
xml = etree.fromstring(str(cmml))
except Exception, err:
if 'conversion from Presentation MathML to Content MathML was not successful' in cmml:
msg = "Illegal math expression"
else:
msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)
raise Exception(msg)
xml = self.fix_greek_in_mathml(xml)
self.the_sympy = self.make_sympy(xml[0])
else:
xml = etree.fromstring(self.expr)
xml = self.fix_greek_in_mathml(xml)
self.the_sympy = self.make_sympy(xml[0])
return self.the_sympy
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
# simple math
def op_divide(*args):
if not len(args) == 2:
raise Exception('divide given wrong number of arguments!')
# print "divide: arg0=%s, arg1=%s" % (args[0],args[1])
return sympy.Mul(args[0], sympy.Pow(args[1], -1))
def op_plus(*args):
return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]
def op_times(*args):
return reduce(operator.mul, args)
def op_minus(*args):
if len(args) == 1:
return -args[0]
if not len(args) == 2:
raise Exception('minus given wrong number of arguments!')
#return sympy.Add(args[0],-args[1])
return args[0] - args[1]
opdict = {
'plus': op_plus,
'divide': operator.div, # should this be op_divide?
'times': op_times,
'minus': op_minus,
'root': sympy.sqrt,
'power': sympy.Pow,
'sin': sympy.sin,
'cos': sympy.cos,
'tan': sympy.tan,
'cot': sympy.cot,
'sinh': sympy.sinh,
'cosh': sympy.cosh,
'coth': sympy.coth,
'tanh': sympy.tanh,
'asin': sympy.asin,
'acos': sympy.acos,
'atan': sympy.atan,
'atan2': sympy.atan2,
'acot': sympy.acot,
'asinh': sympy.asinh,
'acosh': sympy.acosh,
'atanh': sympy.atanh,
'acoth': sympy.acoth,
'exp': sympy.exp,
'log': sympy.log,
'ln': sympy.ln,
}
# simple symbols - TODO is this code used?
nums1dict = {
'pi': sympy.pi,
}
def parsePresentationMathMLSymbol(xml):
"""
Parse <msub>, <msup>, <mi>, and <mn>
"""
tag = gettag(xml)
if tag == 'mn':
return xml.text
elif tag == 'mi':
return xml.text
elif tag == 'msub':
return '_'.join([parsePresentationMathMLSymbol(y) for y in xml])
elif tag == 'msup':
return '^'.join([parsePresentationMathMLSymbol(y) for y in xml])
raise Exception('[parsePresentationMathMLSymbol] unknown tag %s' % tag)
# parser tree for Content MathML
tag = gettag(xml)
# first do compound objects
if tag == 'apply': # apply operator
opstr = gettag(xml[0])
if opstr in opdict:
op = opdict[opstr] # pylint: disable=invalid-name
args = [self.make_sympy(expr) for expr in xml[1:]]
try:
res = op(*args)
except Exception, err:
self.args = args # pylint: disable=attribute-defined-outside-init
self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name
raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args))
return res
else:
raise Exception('[formula]: unknown operator tag %s' % (opstr))
elif tag == 'list': # square bracket list
if gettag(xml[0]) == 'matrix':
return self.make_sympy(xml[0])
else:
return [self.make_sympy(expr) for expr in xml]
elif tag == 'matrix':
return sympy.Matrix([self.make_sympy(expr) for expr in xml])
elif tag == 'vector':
return [self.make_sympy(expr) for expr in xml]
# atoms are below
elif tag == 'cn': # number
return sympy.sympify(xml.text)
# return float(xml.text)
elif tag == 'ci': # variable (symbol)
if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'): # subscript or superscript
usym = parsePresentationMathMLSymbol(xml[0])
sym = sympy.Symbol(str(usym))
else:
usym = unicode(xml.text)
if 'hat' in usym:
sym = my_sympify(usym)
else:
if usym == 'i' and self.options is not None and 'imaginary' in self.options: # i = sqrt(-1)
sym = sympy.I
else:
sym = sympy.Symbol(str(usym))
return sym
else: # unknown tag
raise Exception('[formula] unknown tag %s' % tag)
sympy = property(make_sympy, None, None, 'sympy representation')
def GetContentMathML(self, asciimath, mathml):
"""
Handle requests to snuggletex API to convert the Ascii math to MathML
"""
# url = 'http://192.168.1.2:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
# url = 'http://127.0.0.1:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
url = 'https://math-xserver.mitx.mit.edu/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
if 1:
payload = {
'asciiMathInput': asciimath,
'asciiMathML': mathml,
#'asciiMathML':unicode(mathml).encode('utf-8'),
}
headers = {'User-Agent': "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080311 Firefox/2.0.0.13"}
request = requests.post(url, data=payload, headers=headers, verify=False)
request.encoding = 'utf-8'
ret = request.text
# print "encoding: ", request.encoding
mode = 0
cmathml = []
for k in ret.split('\n'):
if 'conversion to Content MathML' in k:
mode = 1
continue
if mode == 1:
if '<h3>Maxima Input Form</h3>' in k:
mode = 0
continue
cmathml.append(k)
cmathml = '\n'.join(cmathml[2:])
cmathml = '<math xmlns="http://www.w3.org/1998/Math/MathML">\n' + unescape(cmathml) + '\n</math>'
# print cmathml
return cmathml
#-----------------------------------------------------------------------------
def test1():
"""Test XML strings - addition"""
xmlstr = """
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<plus/>
<cn>1</cn>
<cn>2</cn>
</apply>
</math>
"""
return formula(xmlstr)
def test2():
"""Test XML strings - addition, Greek alpha"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<plus/>
<cn>1</cn>
<apply>
<times/>
<cn>2</cn>
<ci>α</ci>
</apply>
</apply>
</math>
"""
return formula(xmlstr)
def test3():
"""Test XML strings - addition, Greek gamma"""
xmlstr = """
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<divide/>
<cn>1</cn>
<apply>
<plus/>
<cn>2</cn>
<ci>γ</ci>
</apply>
</apply>
</math>
"""
return formula(xmlstr)
def test4():
"""Test XML strings - addition, Greek alpha, mfrac"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>1</mn>
<mo>+</mo>
<mfrac>
<mn>2</mn>
<mi>α</mi>
</mfrac>
</mstyle>
</math>
"""
return formula(xmlstr)
def test5():
"""Test XML strings - sum of two matrices"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>cos</mi>
<mrow>
<mo>(</mo>
<mi>θ</mi>
<mo>)</mo>
</mrow>
</mrow>
<mo>⋅</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd>
<mn>1</mn>
</mtd>
<mtd>
<mn>0</mn>
</mtd>
</mtr>
<mtr>
<mtd>
<mn>0</mn>
</mtd>
<mtd>
<mn>1</mn>
</mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
<mo>+</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd>
<mn>0</mn>
</mtd>
<mtd>
<mn>1</mn>
</mtd>
</mtr>
<mtr>
<mtd>
<mn>1</mn>
</mtd>
<mtd>
<mn>0</mn>
</mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
</mstyle>
</math>
"""
return formula(xmlstr)
def test6():
"""Test XML strings - imaginary numbers"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>1</mn>
<mo>+</mo>
<mi>i</mi>
</mstyle>
</math>
"""
return formula(xmlstr, options='imaginary')
| agpl-3.0 |
KurtDeGreeff/infernal-twin | build/pip/pip/_vendor/colorama/winterm.py | 442 | 5732 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
def get_attrs(self):
return self._fore + self._back * 16 + self._style
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
if light:
self._style |= WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
if light:
self._style |= WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
#I'm not currently tracking the position, so there is no default.
#position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
if mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
if mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)
| gpl-3.0 |
ppries/tensorflow | tensorflow/python/framework/op_def_library_test.py | 21 | 65553 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.op_def_library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework.op_def_library import OpDefLibrary
from tensorflow.python.platform import googletest
def _unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
# NOTE(mrry): Dummy shape registrations for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape("Attr")(_unknown_shape)
ops.RegisterShape("AttrBool")(_unknown_shape)
ops.RegisterShape("AttrBoolList")(_unknown_shape)
ops.RegisterShape("AttrDefault")(_unknown_shape)
ops.RegisterShape("AttrEmptyListDefault")(_unknown_shape)
ops.RegisterShape("AttrEnum")(_unknown_shape)
ops.RegisterShape("AttrEnumList")(_unknown_shape)
ops.RegisterShape("AttrFloat")(_unknown_shape)
ops.RegisterShape("AttrListDefault")(_unknown_shape)
ops.RegisterShape("AttrListMin")(_unknown_shape)
ops.RegisterShape("AttrMin")(_unknown_shape)
ops.RegisterShape("AttrShape")(_unknown_shape)
ops.RegisterShape("AttrShapeList")(_unknown_shape)
ops.RegisterShape("AttrPartialShape")(_unknown_shape)
ops.RegisterShape("AttrPartialShapeList")(_unknown_shape)
ops.RegisterShape("AttrTypeDefault")(_unknown_shape)
ops.RegisterShape("AttrListTypeDefault")(_unknown_shape)
ops.RegisterShape("Binary")(_unknown_shape)
ops.RegisterShape("ComplexStruct")(_unknown_shape)
ops.RegisterShape("InPolymorphicTwice")(_unknown_shape)
ops.RegisterShape("MixedStruct")(_unknown_shape)
ops.RegisterShape("NInPolymorphicTwice")(_unknown_shape)
ops.RegisterShape("NInTwice")(_unknown_shape)
ops.RegisterShape("NInTwoTypeVariables")(_unknown_shape)
ops.RegisterShape("NIntsIn")(_unknown_shape)
ops.RegisterShape("NIntsOut")(_unknown_shape)
ops.RegisterShape("NIntsOutDefault")(_unknown_shape)
ops.RegisterShape("NPolymorphicIn")(_unknown_shape)
ops.RegisterShape("NPolymorphicOut")(_unknown_shape)
ops.RegisterShape("NPolymorphicOutDefault")(_unknown_shape)
ops.RegisterShape("NPolymorphicRestrictIn")(_unknown_shape)
ops.RegisterShape("NPolymorphicRestrictOut")(_unknown_shape)
ops.RegisterShape("OutT")(_unknown_shape)
ops.RegisterShape("OutTypeList")(_unknown_shape)
ops.RegisterShape("OutTypeListRestrict")(_unknown_shape)
ops.RegisterShape("Polymorphic")(_unknown_shape)
ops.RegisterShape("PolymorphicDefaultOut")(_unknown_shape)
ops.RegisterShape("PolymorphicOut")(_unknown_shape)
ops.RegisterShape("RefIn")(_unknown_shape)
ops.RegisterShape("RefOut")(_unknown_shape)
ops.RegisterShape("ReservedAttr")(_unknown_shape)
ops.RegisterShape("ReservedInput")(_unknown_shape)
ops.RegisterShape("Restrict")(_unknown_shape)
ops.RegisterShape("Simple")(_unknown_shape)
ops.RegisterShape("SimpleStruct")(_unknown_shape)
ops.RegisterShape("TwoRefsIn")(_unknown_shape)
ops.RegisterShape("TypeList")(_unknown_shape)
ops.RegisterShape("TypeListRestrict")(_unknown_shape)
ops.RegisterShape("TypeListTwice")(_unknown_shape)
class OpDefLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = OpDefLibrary()
self._g = ops.Graph()
self._default_graph_controller = self._g.as_default()
self._default_graph_controller.__enter__()
self._add_op("name: 'Simple' input_arg { name: 'a' type: DT_INT32 } "
"output_arg { name: 'out' type: DT_FLOAT }")
self._add_op("name: 'OutT' output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
def tearDown(self):
self._default_graph_controller.__exit__(None, None, None)
def _add_op(self, ascii):
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def Tensor(self, t, name="in"):
return self._lib.apply_op("OutT", T=t, name=name)
def testNoRegisteredOpFails(self):
with self.assertRaises(RuntimeError) as cm:
self._lib.apply_op("unknown")
self.assertEqual(str(cm.exception), "Unrecognized Op name unknown")
def testAddOpValidation(self):
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingTypeAttr' "
"input_arg { name: 'a' type_attr: 'T' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingTypeAttr', "
"missing attr 'T'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadTypeAttr' "
"output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'int' }")
self.assertEqual(
str(cm.exception),
"Attr 'T' of 'BadTypeAttr' used as a type_attr but has type int")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingNumberAttr' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingNumberAttr', "
"missing attr 'N'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadNumberAttr' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'type' }")
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'BadNumberAttr' used as a number_attr but has type type")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesA' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesA' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesB' "
"input_arg { name: 'a' type: DT_INT32 type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesB' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'ThreeTypes' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' "
"type_list_attr: 'U' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'U' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'ThreeTypes' must have one type field not 3")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'NoTypes' output_arg { name: 'a' } ")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'NoTypes' must have one type field not 0")
def testSimple(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'Simple' op: 'Simple' input: 'Simple/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=4)
self.assertProtoEquals("""
name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=5, name="named")
self.assertProtoEquals("""
name: 'named' op: 'Simple' input: 'named/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=[[1, 2, 3], [4, 5, 6]], name="two_d")
self.assertProtoEquals("""
name: 'two_d' op: 'Simple' input: 'two_d/a'
""", out.op.node_def)
def testSimpleFailures(self):
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a="Bad string")
self.assertEqual(str(cm.exception),
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got 'Bad string' of type 'str' instead.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=self.Tensor(dtypes.string))
self.assertEqual(str(cm.exception),
"Input 'a' of 'Simple' Op has type string "
"that does not match expected type of int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra="bogus")
self.assertEqual(str(cm.exception),
"apply_op() got unexpected keyword arguments: extra")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra1="bogus", extra2="also_bogus")
self.assertEqual(str(cm.exception),
"apply_op() got unexpected keyword arguments: extra1, "
"extra2")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple")
self.assertEqual(str(cm.exception), "No argument for input a")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", wrong=7)
self.assertEqual(str(cm.exception), "No argument for input a")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a={"label": 1})
self.assertEqual(str(cm.exception),
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got {'label': 1} of type 'dict' instead.")
def testReservedInput(self):
self._add_op("name: 'ReservedInput' "
"input_arg { name: 'input' type: DT_INT32 } ")
op = self._lib.apply_op("ReservedInput", input_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedInput' input: 'x/input'
""", op.node_def)
def testPolymorphic(self):
self._add_op("name: 'Polymorphic' "
"input_arg { name: 'a' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("Polymorphic", a=7, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'Polymorphic' input: 'p/a'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a="s", name="q")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'Polymorphic' input: 'q/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a=["s", "t", "u"], name="r")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'r' op: 'Polymorphic' input: 'r/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Polymorphic", a="s", T=dtypes.string)
self.assertEqual(str(cm.exception),
"Should not specify value for inferred attr 'T'.")
def testPolymorphicOut(self):
self._add_op("name: 'PolymorphicOut' "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("PolymorphicOut", T=dtypes.int32, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut")
self.assertEqual(str(cm.exception),
"No argument for attr T")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut", T=None)
self.assertEqual(str(cm.exception),
"Expected DataType for argument 'T' not None.")
def testPolymorphicDefaultOut(self):
self._add_op("name: 'PolymorphicDefaultOut' "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_STRING } }")
out = self._lib.apply_op("PolymorphicDefaultOut", T=None, name="p")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicDefaultOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
def testBinary(self):
self._add_op("name: 'Binary' "
"input_arg { name: 'a' type_attr: 'T' } "
"input_arg { name: 'b' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("Binary", a=8, b=9, name="b")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Binary", a="left", b="right", name="c")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Binary", a="left", b=12)
self.assertEqual(str(cm.exception),
"Expected string passed to parameter 'b' of op 'Binary', "
"got 12 of type 'int' instead.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Binary",
a=self.Tensor(dtypes.string),
b=self.Tensor(dtypes.int32))
self.assertEqual(str(cm.exception),
"Input 'b' of 'Binary' Op has type int32 "
"that does not match type string of argument 'a'.")
def testRestrict(self):
self._add_op("name: 'Restrict' "
"input_arg { name: 'a' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
out = self._lib.apply_op("Restrict", a="foo", name="g")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'g' op: 'Restrict' input: 'g/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Restrict", a=True, name="h")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'h' op: 'Restrict' input: 'h/a'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Restrict", a=17)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testTypeList(self):
self._add_op("name: 'TypeList' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
op = self._lib.apply_op("TypeList", a=["foo"], name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeList' input: 'z/a_0'
attr { key: 'T' value { list { type: DT_STRING } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[True, 12], name="y")
self.assertProtoEquals("""
name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'
attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' "
"argument to 'TypeList' Op, not ")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=[self.Tensor(dtypes.int32), None])
self.assertStartsWith(str(cm.exception),
"Tensors in list passed to 'a' of 'TypeList' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>]")
def testTypeListTwice(self):
self._add_op("name: 'TypeListTwice' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"input_arg { name: 'b' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
op = self._lib.apply_op("TypeListTwice",
a=["foo", True],
b=["bar", False],
name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeListTwice'
input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
op = self._lib.apply_op("TypeListTwice", a=[], b=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListTwice", a=["foo", True], b=["bar", 6])
self.assertEqual(str(cm.exception),
"Input 'b' of 'TypeListTwice' Op has type list of "
"string, int32 that does not match type list "
"string, bool of argument 'a'.")
def testOutTypeList(self):
self._add_op("name: 'OutTypeList' "
"output_arg { name: 'out' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
out, = self._lib.apply_op("OutTypeList", T=[dtypes.float32], name="x")
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'x' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_FLOAT } } }
""", out.op.node_def)
out1, out2 = self._lib.apply_op("OutTypeList",
T=[dtypes.int32, dtypes.bool],
name="w")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'w' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }
""", out1.op.node_def)
out = self._lib.apply_op("OutTypeList", T=[], name="empty")
self.assertEqual([], out)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeList", T=dtypes.int32)
self.assertEqual(str(cm.exception), "Expected list for attr T")
def testTypeListRestrict(self):
self._add_op("name: 'TypeListRestrict' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
op = self._lib.apply_op("TypeListRestrict", a=["foo", False], name="v")
self.assertProtoEquals("""
name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListRestrict", a=[True, 12])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testOutTypeListRestrict(self):
self._add_op("name: 'OutTypeListRestrict' "
"output_arg { name: 'out' type_list_attr: 't' } "
"attr { name: 't' type: 'list(type)' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
out1, out2 = self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.bool, dtypes.string],
name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertProtoEquals("""
name: 'u' op: 'OutTypeListRestrict'
attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeListRestrict", t=[dtypes.string, dtypes.int32])
self.assertEqual(str(cm.exception),
"Value passed to parameter 't' has DataType int32 "
"not in list of allowed values: string, bool")
def testAttr(self):
self._add_op("name: 'Attr' attr { name: 'a' type: 'int' }")
op = self._lib.apply_op("Attr", a=12, name="t")
self.assertProtoEquals("""
name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }
""", op.node_def)
op = self._lib.apply_op("Attr", a=tensor_shape.Dimension(13), name="u")
self.assertProtoEquals("""
name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a="bad")
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not 'bad'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=[12])
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not [12].")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=None)
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not None.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr")
self.assertEqual(str(cm.exception), "No argument for attr a")
def testAttrFloat(self):
self._add_op("name: 'AttrFloat' attr { name: 'a' type: 'float' }")
op = self._lib.apply_op("AttrFloat", a=1.2, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }
""", op.node_def)
op = self._lib.apply_op("AttrFloat", a=12, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrFloat", a="bad")
self.assertEqual(str(cm.exception),
"Expected float for argument 'a' not 'bad'.")
def testAttrBool(self):
self._add_op("name: 'AttrBool' attr { name: 'a' type: 'bool' }")
op = self._lib.apply_op("AttrBool", a=True, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }
""", op.node_def)
op = self._lib.apply_op("AttrBool", a=False, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=0)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=1)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 1.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=[])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not [].")
def testAttrBoolList(self):
self._add_op("name: 'AttrBoolList' attr { name: 'a' type: 'list(bool)' }")
op = self._lib.apply_op("AttrBoolList", a=[True, False, True], name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBoolList'
attr { key: 'a' value { list { b: true b: false b:true } } }
""", op.node_def)
op = self._lib.apply_op("AttrBoolList", a=[], name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBoolList", a=[0])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
def testAttrMin(self):
self._add_op("name: 'AttrMin' attr { name: 'a' type: 'int' "
"has_minimum: true minimum: 5 }")
op = self._lib.apply_op("AttrMin", a=12, name="s")
self.assertProtoEquals("""
name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrMin", a=2)
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.")
def testAttrListMin(self):
self._add_op("name: 'AttrListMin' attr { name: 'a' type: 'list(int)' "
"has_minimum: true minimum: 2 }")
op = self._lib.apply_op("AttrListMin", a=[1, 2], name="r")
self.assertProtoEquals("""
name: 'r' op: 'AttrListMin'
attr { key: 'a' value { list { i: 1 i: 2 } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrListMin", a=[17])
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrListMin' Op "
"passed list of length 1 less than minimum 2.")
def testAttrEnum(self):
self._add_op("name: 'AttrEnum' "
"attr { name: 'a' type: 'string' "
" allowed_values { list { s: 'apples' s: 'oranges' } } }")
op = self._lib.apply_op("AttrEnum", a="oranges", name="e")
self.assertProtoEquals("""
name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnum", a="invalid")
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnum\' Op '
'passed string \'invalid\' not in: '
'"apples", "oranges".')
def testAttrEnumList(self):
self._add_op("name: 'AttrEnumList' "
"attr { name: 'a' type: 'list(string)' "
" allowed_values { list { s: 'apples' s: 'oranges' } } }")
op = self._lib.apply_op("AttrEnumList", a=["oranges", "apples"], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrEnumList'
attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnumList", a=["apples", "invalid", "oranges"])
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnumList\' Op '
'passed string \'invalid\' not '
'in: "apples", "oranges".')
def testAttrShape(self):
self._add_op("name: 'AttrShape' attr { name: 'a' type: 'shape' }")
op = self._lib.apply_op("AttrShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=(4, 3, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrShape", a=tensor_shape.TensorShape([3, 2]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = 6
shape.dim.add().size = 3
op = self._lib.apply_op("AttrShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }
""", op.node_def)
# TODO(josh11b): Re-enable this test once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for "
# "argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrShape", a="ABC")
def testAttrShapeList(self):
self._add_op("name: 'AttrShapeList' attr { name: 'a' type: 'list(shape)' }")
op = self._lib.apply_op("AttrShapeList", a=[[3, 2], [6, 5, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrPartialShape(self):
self._add_op(
"name: 'AttrPartialShape' attr { name: 'a' type: 'shape' }")
op = self._lib.apply_op("AttrPartialShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrPartialShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=(4, None, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrPartialShape", a=tensor_shape.TensorShape([3, None]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: -1 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrPartialShape'
attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = -1
shape.dim.add().size = 3
op = self._lib.apply_op("AttrPartialShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: -1 } dim { size: 3 } } } }
""", op.node_def)
# TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrPartialShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for "
# "argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrPartialShape", a="ABC")
def testAttrPartialShapeList(self):
self._add_op("""
name: 'AttrPartialShapeList'
attr { name: 'a' type: 'list(shape)' }
""")
op = self._lib.apply_op(
"AttrPartialShapeList", a=[[3, 2], [6, None, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrPartialShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrPartialShapeList' attr {
key: 'a' value { list { } } }
""", op.node_def)
def testAttrDefault(self):
self._add_op("name: 'AttrDefault' "
"attr { name: 'a' type: 'string' "
" default_value { s: 'banana' } }")
op = self._lib.apply_op("AttrDefault", a=None, name="d")
self.assertProtoEquals("""
name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }
""", op.node_def)
op = self._lib.apply_op("AttrDefault", a="kiwi", name="c")
self.assertProtoEquals("""
name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }
""", op.node_def)
def testAttrListDefault(self):
self._add_op("name: 'AttrListDefault' "
"attr { name: 'a' type: 'list(int)' "
" default_value { list { i: 5 i: 15 } } }")
op = self._lib.apply_op("AttrListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 5 i: 15 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrEmptyListDefault(self):
self._add_op("name: 'AttrEmptyListDefault' "
"attr { name: 'a' type: 'list(float)' "
" default_value { list { } } }")
op = self._lib.apply_op("AttrEmptyListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { f: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testReservedAttr(self):
self._add_op("name: 'ReservedAttr' "
"attr { name: 'range' type: 'int' } ")
op = self._lib.apply_op("ReservedAttr", range_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }
""", op.node_def)
def testDefaultAttrType(self):
self._add_op("name: 'AttrTypeDefault' "
"input_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_INT32 } }")
# Give an input whose type has no obvious output type.
op = self._lib.apply_op("AttrTypeDefault", a=[], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrTypeDefault' input: 'n/a'
attr { key: 'T' value { type: DT_INT32 } }
""", op.node_def)
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrTypeDefault", a=[1.0], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrTypeDefault' input: 'f/a'
attr { key: 'T' value { type: DT_FLOAT } }
""", op.node_def)
def testDefaultListAttrType(self):
self._add_op("name: 'AttrListTypeDefault' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_INT32 } }"
"attr { name: 'N' type: 'int' }")
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrListTypeDefault", a=[1.0], b=[2.0], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
def testNIntsIn(self):
self._add_op("name: 'NIntsIn' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NIntsIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NIntsIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NIntsIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'N' value { i: 5 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=["foo", "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[string, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.string),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have "
"types [string, string] that do not match expected type "
"int32.")
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NIntsIn' Op "
"with length 1 shorter than "
"minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[int32, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.int32),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op "
"have types [int32, string] that do not match expected "
"type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NIntsIn' Op, not ")
def testNPolymorphicIn(self):
self._add_op("name: 'NPolymorphicIn' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NPolymorphicIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 5 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=["foo", "bar"], name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[1, self.Tensor(dtypes.float32, name="x")],
name="q")
self.assertProtoEquals("""
name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[self.Tensor(dtypes.float32, name="y"),
self.Tensor(dtypes.float32_ref, name="z")],
name="r")
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NPolymorphicIn' Op with length 1 "
"shorter than minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, None])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that "
"don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn",
a=["abcd", self.Tensor(dtypes.int32)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [string, int32] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NPolymorphicIn' Op, not ")
def testNPolymorphicRestrictIn(self):
self._add_op("name: 'NPolymorphicRestrictIn' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' allowed_values { "
" list { type: DT_STRING type: DT_BOOL } } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NPolymorphicRestrictIn", a=["foo", "bar"],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicRestrictIn",
a=[False, True, False],
name="b")
self.assertProtoEquals("""
name: 'b' op: 'NPolymorphicRestrictIn'
input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictIn", a=[1, 2])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 not in "
"list of allowed values: string, bool")
def testNInTwice(self):
self._add_op("name: 'NInTwice' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"input_arg { name: 'b' type: DT_STRING number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInTwice", a=[1, 2], b=["one", "two"], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwice", a=[], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwice", a=[1, 2, 3], b=["too short"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwice' Op "
"with length 1 must match "
"length 3 of argument 'a'.")
def testNInPolymorphicTwice(self):
self._add_op("name: 'NInPolymorphicTwice' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=[3, 4], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInPolymorphicTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2, 3], b=[5])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInPolymorphicTwice' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'NInPolymorphicTwice' "
"Op have types [string, string] that do not match type "
"int32 inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of "
"'NInPolymorphicTwice' Op have types [string] that do not "
"match type int32 inferred from earlier arguments.")
def testNInTwoTypeVariables(self):
self._add_op("name: 'NInTwoTypeVariables' "
"input_arg { name: 'a' type_attr: 'S' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'S' type: 'type' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInTwoTypeVariables",
a=[1, 2],
b=[True, False],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwoTypeVariables'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables", a=[1, 2], b=[3, 4], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwoTypeVariables'
input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables",
a=[self.Tensor(dtypes.int32, name="q")],
b=[self.Tensor(dtypes.string, name="r")],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwoTypeVariables", a=[1, 2, 3], b=["5"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwoTypeVariables' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
def testInPolymorphicTwice(self):
self._add_op("name: 'InPolymorphicTwice' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'M' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 } "
"attr { name: 'M' type: 'int' has_minimum: true minimum: 0 } ")
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[3, 4, 5], name="n")
self.assertProtoEquals("""
name: 'n' op: 'InPolymorphicTwice'
input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 3 } }
""", op.node_def)
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 0 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[], b=[3, 4, 5])
self.assertEqual(str(cm.exception),
"Don't know how to infer type variable from empty input "
"list passed to input 'a' of 'InPolymorphicTwice' Op.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op "
"have types [string, string] that do not match type int32 "
"inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' "
"Op have types [string] that do not match type int32 "
"inferred from earlier arguments.")
def testNIntsOut(self):
self._add_op("name: 'NIntsOut' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2 = self._lib.apply_op("NIntsOut", N=2, name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3, out4, out5 = self._lib.apply_op(
"NIntsOut", N=5, name="o")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertEqual(dtypes.int32, out4.dtype)
self.assertEqual(dtypes.int32, out5.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }
""", out5.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsOut", N=1)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsOut", N=[3])
self.assertEqual(str(cm.exception),
"Expected int for argument 'N' not [3].")
def testNIntsOutDefault(self):
self._add_op("name: 'NIntsOutDefault' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2"
" default_value { i:3 } }")
out1, out2, out3 = self._lib.apply_op(
"NIntsOutDefault", N=None, name="z")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op("NIntsOutDefault", N=2, name="y")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }
""", out2.op.node_def)
def testNPolymorphicOut(self):
self._add_op("name: 'NPolymorphicOut' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2 = self._lib.apply_op("NPolymorphicOut",
N=2,
T=dtypes.int32,
name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOut", T=dtypes.string, N=3, name="o")
self.assertEqual(dtypes.string, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertEqual(dtypes.string, out3.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 3 } }
""", out3.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicOut", N=1, T=dtypes.string)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NPolymorphicOut' Op "
"passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicOut", N=3, T=[dtypes.string])
self.assertEqual(
str(cm.exception),
"Expected DataType for argument 'T' not [tf.string].")
def testNPolymorphicOutDefault(self):
self._add_op("name: 'NPolymorphicOutDefault' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type'"
" default_value { type: DT_BOOL } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 "
" default_value { i: 2 } }")
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=None, name="r")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=None, name="s")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 's' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=dtypes.int32, name="t")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 't' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=dtypes.int32, name="u")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
def testNPolymorphicRestrictOut(self):
self._add_op("name: 'NPolymorphicRestrictOut' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' allowed_values { "
" list { type: DT_STRING type: DT_BOOL } } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicRestrictOut", N=3, T=dtypes.bool, name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicRestrictOut'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictOut", N=2, T=dtypes.int32)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'T' has DataType int32 "
"not in list of allowed values: string, bool")
def testRef(self):
self._add_op("name: 'RefIn' "
"input_arg { name: 'a' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
self._add_op("name: 'TwoRefsIn' "
"input_arg { name: 'a' type_attr: 'T' is_ref: true } "
"input_arg { name: 'b' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
self._add_op("name: 'RefOut' "
"output_arg { name: 'a' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
out = self._lib.apply_op("RefOut", T=dtypes.bool, name="o")
self.assertEqual(dtypes.bool_ref, out.dtype)
self.assertProtoEquals("""
name: 'o' op: 'RefOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
op = self._lib.apply_op("RefIn", a=out, name="i")
self.assertProtoEquals("""
name: 'i' op: 'RefIn' input: 'o'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: "_class" value { list { s: "loc:@o" } } }
""", op.node_def)
# Can pass ref to non-ref input.
out = self._lib.apply_op("RefOut", T=dtypes.int32, name="r")
out = self._lib.apply_op("Simple", a=out, name="s")
self.assertProtoEquals("""
name: 's' op: 'Simple' input: 'r'
""", out.op.node_def)
# Can't pass non-ref to ref input.
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("RefIn", a=2)
self.assertEqual(str(cm.exception),
"Input 'a' of 'RefIn' Op requires l-value input")
input_a = self._lib.apply_op("RefOut", T=dtypes.int32, name="t")
input_b = self._lib.apply_op("RefOut", T=dtypes.int32, name="u")
op = self._lib.apply_op("TwoRefsIn", a=input_a, b=input_b, name="v")
# NOTE(mrry): The order of colocation constraints is an implementation
# detail.
self.assertProtoEquals("""
name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: "_class" value { list { s: "loc:@t" s: "loc:@u" } } }
""", op.node_def)
def testSpecifyDevice(self):
with self._g.device("/job:ADevice"):
self._lib.apply_op("Simple", a=3)
# We look at the whole graph here to make sure the Const op is also given
# the specified device.
graph_def = self._g.as_graph_def()
self.assertEqual(len(graph_def.node), 2)
for node in graph_def.node:
self.assertDeviceEqual(node.device, "/job:ADevice")
def testStructuredOutputSingleList(self):
self._add_op("name: 'SimpleStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"attr { name: 'n_a' type: 'int' }")
for n_a in [0, 1, 3]:
a = self._lib.apply_op("SimpleStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
def testStructuredOutputListAndSingle(self):
self._add_op("name: 'MixedStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"output_arg { name: 'b' type: DT_FLOAT } "
"attr { name: 'n_a' type: 'int' }")
for n_a in [0, 1, 3]:
a, b = self._lib.apply_op("MixedStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertTrue(isinstance(b, ops.Tensor))
self.assertEqual(dtypes.float32, b.dtype)
def testStructuredOutputMultipleLists(self):
self._add_op("name: 'ComplexStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"output_arg { name: 'b' type: DT_INT64 number_attr: 'n_b' } "
"output_arg { name: 'c' type_list_attr: 't_c' } "
"attr { name: 'n_a' type: 'int' } "
"attr { name: 'n_b' type: 'int' } "
"attr { name: 't_c' type: 'list(type)' }")
for n_a in [0, 1, 3]:
for n_b in [0, 1, 3]:
for t_c in [[],
[dtypes.int32],
[dtypes.int32, dtypes.float32]]:
a, b, c = self._lib.apply_op("ComplexStruct",
n_a=n_a,
n_b=n_b,
t_c=t_c)
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertEqual(n_b, len(b))
self.assertTrue(all(x.dtype == dtypes.int64 for x in b))
self.assertEqual(t_c, [x.dtype for x in c])
class OpDefLibraryGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = OpDefLibrary()
self._g = ops.Graph()
self._add_op("name: 'Simple' input_arg { name: 'a' type: DT_INT32 } "
"output_arg { name: 'out' type: DT_FLOAT }")
self._add_op("name: 'Binary' "
"input_arg { name: 'a' type_attr: 'T' } "
"input_arg { name: 'b' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
def _add_op(self, ascii):
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def testNoGraph(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, ops.get_default_graph())
def testDefaultGraph(self):
with self._g.as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, self._g)
def testDifferentGraphFails(self):
with self._g.as_default():
a = self._lib.apply_op("Simple", a=3)
other_g = ops.Graph()
with other_g.as_default():
b = self._lib.apply_op("Simple", a=4)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("Binary", a=a, b=b)
self.assertTrue("must be from the same graph" in str(cm.exception))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
frankyrumple/ope | laptop_credential/gluon/packages/dal/pydal/adapters/informix.py | 11 | 5178 | # -*- coding: utf-8 -*-
import datetime
import re
from .._globals import IDENTITY
from .base import BaseAdapter
class InformixAdapter(BaseAdapter):
drivers = ('informixdb',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'json': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'SERIAL',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
'big-id': 'BIGSERIAL',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s',
'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s',
}
def RANDOM(self):
return 'Random()'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
dbms_version = int(self.connection.dbms_version.split('.')[0])
if lmin and (dbms_version >= 10):
# Requires Informix 10.0+
sql_s += ' SKIP %d' % (lmin, )
if fetch_amt and (dbms_version >= 9):
# Requires Informix 9.0+
sql_s += ' FIRST %d' % (fetch_amt, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','%%Y-%%m-%%d')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj
return None
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "informix"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
self.test_query = 'SELECT COUNT(*) FROM systables;'
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
user = credential_decoder(user)
password = credential_decoder(password)
dsn = '%s@%s' % (db,host)
driver_args.update(user=user,password=password)
def connector(dsn=dsn,driver_args=driver_args):
return self.driver.connect(dsn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
return self.cursor.sqlerrd[1]
class InformixSEAdapter(InformixAdapter):
""" work in progress """
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
| mit |
dimdung/boto | tests/compat.py | 115 | 1560 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Use unittest2 for older versions of Python
try:
import unittest2 as unittest
except ImportError:
import unittest
# Use thirdt party ordereddict for older versions of Python
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
# Use standard unittest.mock if possible. (mock doesn't support Python 3.4)
try:
from unittest import mock
except ImportError:
import mock
| mit |
gptech/ansible | lib/ansible/module_utils/cloud.py | 119 | 3974 | #
# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module adds shared support for generic cloud modules
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.cloud import *
The 'cloud' module provides the following common classes:
* CloudRetry
- The base class to be used by other cloud providers, in order to
provide a backoff/retry decorator based on status codes.
- Example using the AWSRetry class which inherits from CloudRetry.
@AWSRetry.retry(tries=20, delay=2, backoff=2)
get_ec2_security_group_ids_from_names()
"""
from functools import wraps
import syslog
import time
from ansible.module_utils.pycompat24 import get_exception
class CloudRetry(object):
""" CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""
# This is the base class of the exception.
# AWS Example botocore.exceptions.ClientError
base_class = None
@staticmethod
def status_code_from_exception(error):
""" Return the status code from the exception object
Args:
error (object): The exception itself.
"""
pass
@staticmethod
def found(response_code):
""" Return True if the Response Code to retry on was found.
Args:
response_code (str): This is the Response Code that is being matched against.
"""
pass
@classmethod
def backoff(cls, tries=10, delay=3, backoff=1.1):
""" Retry calling the Cloud decorated function using an exponential backoff.
Kwargs:
tries (int): Number of times to try (not retry) before giving up
default=10
delay (int): Initial delay between retries in seconds
default=3
backoff (int): backoff multiplier e.g. value of 2 will double the delay each retry
default=2
"""
def deco(f):
@wraps(f)
def retry_func(*args, **kwargs):
max_tries, max_delay = tries, delay
while max_tries > 1:
try:
return f(*args, **kwargs)
except Exception:
e = get_exception()
if isinstance(e, cls.base_class):
response_code = cls.status_code_from_exception(e)
if cls.found(response_code):
msg = "{0}: Retrying in {1} seconds...".format(str(e), max_delay)
syslog.syslog(syslog.LOG_INFO, msg)
time.sleep(max_delay)
max_tries -= 1
max_delay *= backoff
else:
# Return original exception if exception is not a ClientError
raise e
else:
# Return original exception if exception is not a ClientError
raise e
return f(*args, **kwargs)
return retry_func # true decorator
return deco
| gpl-3.0 |
topix-hackademy/contact-tools | contacts/urls.py | 1 | 1408 | from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from . import views_company, view_contact, view_role, view_company_type
urlpatterns = [
url(r'^company-type/$', view_company_type.all_company_types),
url(r'^company-type/(?P<id>[0-9]+)/$', view_company_type.single_company_type),
url(r'^role/$', view_role.all_roles),
url(r'^role/(?P<id>[0-9]+)/$', view_role.single_role),
url(r'^contact/$', view_contact.all_contacts),
url(r'^contact/(?P<id>[0-9]+)/$', view_contact.single_contact),
url(r'^contact-by-csid/(?P<csid>[0-9]+)/$', view_contact.get_contact_by_csid),
url(r'^company/$', views_company.all_companies),
url(r'^company/(?P<id>[0-9]+)/$', views_company.single_company),
url(r'^contact-by-email/(?P<email>[\w.%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4})/$', view_contact.get_contact_by_email),
url(r'^contact-by-username/(?P<username>[\w\s]+)/$', view_contact.get_contact_by_username),
url(r'^company-by-code/(?P<code>\w{0,50})/$', views_company.get_company_by_code),
url(r'^company-by-csid/(?P<csid>[0-9]+)/$', views_company.get_company_by_csid),
url(r'^company-freesearch/(?P<searchstring>[\w\s-]+)/$', views_company.get_company_freesearch),
url(r'^relation/$', view_contact.all_relations),
url(r'^$', views_company.index, name='index'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| mit |
nansencenter/DAPPER | dapper/mods/Lorenz63/sakov2012.py | 1 | 1726 | """Reproduce results from Table 1 `bib.sakov2012iterative`."""
import numpy as np
import dapper.mods as modelling
from dapper.mods.Lorenz63 import LPs, Tplot, dstep_dx, step, x0
t = modelling.Chronology(0.01, dkObs=25, KObs=1000, Tplot=Tplot, BurnIn=4*Tplot)
Nx = len(x0)
Dyn = {
'M': Nx,
'model': step,
'linear': dstep_dx,
'noise': 0,
}
X0 = modelling.GaussRV(C=2, mu=x0)
jj = np.arange(Nx) # obs_inds
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 2 # modelling.GaussRV(C=CovMat(2*eye(Nx)))
HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0)
HMM.liveplotters = LPs(jj)
####################
# Suggested tuning
####################
# from dapper.mods.Lorenz63.sakov2012 import HMM # rmse.a:
# xps += Climatology() # 7.6
# xps += OptInterp() # 1.25
# xps += Var3D(xB=0.1) # 1.04
# xps += ExtKF(infl=180) # 0.92
# xps += EnKF('Sqrt', N=3 , infl=1.30) # 0.80
# xps += EnKF('Sqrt', N=10, infl=1.02,rot=True) # 0.60
# xps += EnKF('PertObs',N=10, infl=1.04) # 0.65
# xps += EnKF('PertObs',N=100, infl=1.01) # 0.56
# xps += EnKF_N( N=3) # 0.60
# xps += EnKF_N( N=10, rot=True) # 0.54
# xps += iEnKS('Sqrt', N=10, infl=1.02,rot=True) # 0.31
# xps += PartFilt( N=100 ,reg=2.4,NER=0.3) # 0.38
# xps += PartFilt( N=800 ,reg=0.9,NER=0.2) # 0.28
# xps += PartFilt( N=4000,reg=0.7,NER=0.05) # 0.27
# xps += PFxN(xN=1000, N=30 ,Qs=2 ,NER=0.2) # 0.56
| mit |
OpenNFT/OpenNFT | opennft/config.py | 1 | 3927 | # -*- coding: utf-8 -*-
"""
__________________________________________________________________________
Copyright (C) 2016-2021 OpenNFT.org
"""
import os
import pyqtgraph as pg
LOG_LEVEL = 'DEBUG'
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
UI_PATH = os.path.join(ROOT_PATH, 'ui')
PLUGIN_PATH = os.path.join(ROOT_PATH, 'plugins')
OpenNFT_ICON = os.path.join(ROOT_PATH, 'ui', 'images', 'appicon.png')
MATLAB_FUNCTIONS_PATH = os.path.join(ROOT_PATH, 'matlab')
# Matlab sessions
MAIN_MATLAB_NAME = 'MATLAB_NFB_MAIN'
PTB_MATLAB_NAME = 'MATLAB_NFB_PTB'
SPM_MATLAB_NAME = 'MATLAB_NFB_SPM'
MODEL_HELPER_MATLAB_NAME = 'MATLAB_NFB_MODEL_HELPER'
MAIN_MATLAB_STARTUP_OPTIONS = '-nodesktop'
PTB_MATLAB_STARTUP_OPTIONS = '-nodesktop'
SPM_MATLAB_STARTUP_OPTIONS = '-nodesktop'
MODEL_HELPER_MATLAB_STARTUP_OPTIONS = '-nodesktop'
MATLAB_NAME_SUFFIX = ''
# MRI scan file extensions
DICOM_FILES_EXTENSION = '.dcm'
IMAPH_FILES_EXTENSION = '.img'
# MRI triggering is required
USE_MRPULSE = False
# Time between two iterations
MAIN_LOOP_CALL_PERIOD = 30 # ms
# Fast offline loop for debugging
USE_FAST_OFFLINE_LOOP = True
# currently used only for DCM feedabck
USE_MATLAB_MODEL_HELPER = False
# use PTB helper and include PTB option in parameters
USE_PTB_HELPER = True
# use only when FFileDialog.by crashes when opening the dialog windows
DONOT_USE_QFILE_NATIVE_DIALOG = False
# the length of the TimeVector
# TIMEVECTOR_LENGTH = 8
# plotting initialization
PLOT_GRID_ALPHA = 0.7
ROI_PLOT_WIDTH = 2.0
MUSTER_Y_LIMITS = (-32767, 32768)
# transparency of design template overlay
MUSTER_PLOT_ALPHA = 50
MAX_ROI_NAME_LENGTH = 6
ROI_PLOT_COLORS = [
pg.mkColor(0, 0, 255, 255),
pg.mkColor(0, 255, 255, 255),
pg.mkColor(0, 255, 0, 255),
pg.mkColor(255, 0, 255, 255),
pg.mkColor(255, 0, 0, 255),
pg.mkColor(255, 255, 0, 255),
pg.mkColor(140, 200, 240, 255),
pg.mkColor(208, 208, 147, 255),
pg.mkColor(147, 0, 0, 255),
pg.mkColor(0, 0, 0, 255)
]
MUSTER_PEN_COLORS = [
(73, 137, 255, 255),
(255, 103, 86, 255),
(22, 255, 104, 255),
(200, 200, 100, 255),
(125, 125, 125, 255),
(200, 100, 200, 255),
(100, 200, 200, 255),
(255, 22, 104, 255),
(250, 104, 22, 255),
(245, 245, 245, 255)
]
MUSTER_BRUSH_COLORS = [
(124, 196, 255, MUSTER_PLOT_ALPHA),
(255, 156, 117, MUSTER_PLOT_ALPHA),
(127, 255, 157, MUSTER_PLOT_ALPHA),
(200, 200, 100, MUSTER_PLOT_ALPHA),
(125, 125, 125, MUSTER_PLOT_ALPHA),
(200, 100, 200, MUSTER_PLOT_ALPHA),
(100, 200, 200, MUSTER_PLOT_ALPHA),
(255, 22, 104, MUSTER_PLOT_ALPHA),
(250, 104, 22, MUSTER_PLOT_ALPHA),
(245, 245, 245, MUSTER_PLOT_ALPHA)
]
# Motion correction plot colors
MC_PLOT_COLORS = [
(255, 123, 0), # translations - x, y, z
(255, 56, 109),
(127, 0, 255),
(0, 46, 255), # rotations - alpha, betta, gamma
(0, 147, 54),
(145, 130, 43),
]
PROJ_ROI_COLORS = ROI_PLOT_COLORS
# debuging use only
USE_SLEEP_IN_STOP = False
HIDE_TEST_BTN = True
# rtQA may cause linear performance loss on the big data
# due to saving process of iGLM quality paramaters
USE_RTQA = True
USE_IGLM = True
USE_ROI = True
FIRST_SNR_VOLUME = 2
# zero padding settings
zeroPaddingFlag = True
nrZeroPadVol = 3
# FD defaults
DEFAULT_FD_RADIUS = 50 # radius multiplying angular displacement in FD compution
DEFAULT_FD_THRESHOLDS = [0.1, 0.2, 0.5] # FD thresholds to display by default
# plot display defaults
PLOT_BACKGROUND_COLOR = (255, 255, 255)
PLOT_PEN_COLORS = [
# colors used to plot motion correction metrics
pg.mkPen(pg.mkColor(0, 46, 255), width=1.2),
pg.mkPen(pg.mkColor(255, 123, 0), width=1.2),
pg.mkPen(pg.mkColor(255, 56, 109), width=1.2),
pg.mkPen(pg.mkColor(127, 0, 255), width=1.2),
pg.mkPen(pg.mkColor(0, 147, 54), width=1.2),
pg.mkPen(pg.mkColor(145, 130, 43), width=1.2),
pg.mkPen(pg.mkColor(0, 0, 0), width=1.2)
]
| gpl-3.0 |
buddyli/private2w | controller/type_oper.py | 2 | 1592 | #!/usr/bin/env python
#-*- encoding:utf-8 -*-
from bottle import route, mako_template as template, redirect, request, response, get, post
from bottle import static_file, view #为了不经过controller直接返回诸如html,css等静态文件引入
from model.documents import *
from setting import *
DATE_FORMAT = '%Y-%m-%d %H:%M:%S' # 入库格式化时间
@get('/to_add_type')
def to_add_type():
return template('views/system/type/add', site_opt = site_opt)
@post('/add_type', method = 'POST')
def add_item():
#request.params可以同时获取到GET或者POST方法传入的参数
name = request.params.get('name')
typeObj = Type(name = unicode(name, 'utf8'), addTime = datetime.now())
typeObj.save()
return redirect('/list_type')
@route('/list_type')
def list_item():
start = request.params.get('start') or '0'
size = request.params.get('size') or '1000'
types = Type.objects[int(start):(int(start) + int(size))]
data = {
'types': types
}
return template('views/system/type/list', data = data, site_opt = site_opt)
@route('/del_type')
def del_item():
id = request.params.get('id')
Type.objects(id=id).delete()
redirect('/list_type')
@route('/modify_type', method = 'POST')
def modify_item():
id = request.params.get('id')
name = request.params.get('name')
Type.objects(id=id).update(set__name=unicode(name, 'utf8'))
redirect('/list_type')
@route('/to_modify_type')
def to_modify_item():
id = request.params.get('id')
item = Type.objects(id=id)[0]
data = {
'item': item
}
return template('views/system/type/edit', data = data, site_opt = site_opt) | apache-2.0 |
jeffalexandro/mx-app | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| apache-2.0 |
marcli/sos | sos/plugins/kernelrt.py | 14 | 1587 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# Copyright 2012 Red Hat Inc.
# Guy Streeter <streeter@redhat.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
from sos.plugins import Plugin, RedHatPlugin
class KernelRT(Plugin, RedHatPlugin):
'''Realtime kernel variant
'''
plugin_name = 'kernelrt'
profiles = ('system', 'hardware', 'kernel', 'mrg')
# this file exists only when the realtime kernel is booted
# this plugin will not be called is this file does not exist
files = ('/sys/kernel/realtime',)
def setup(self):
clocksource_path = '/sys/devices/system/clocksource/clocksource0/'
self.add_copy_spec([
'/etc/rtgroups',
'/proc/sys/kernel/sched_rt_period_us',
'/proc/sys/kernel/sched_rt_runtime_us',
'/sys/kernel/realtime',
clocksource_path + 'available_clocksource',
clocksource_path + 'current_clocksource'
])
# note: rhbz#1059685 'tuna - NameError: global name 'cgroups' is not
# defined this command throws an exception on versions prior to
# 0.10.4-5.
self.add_cmd_output('tuna -CP')
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
wido/cloudstack | test/integration/component/test_snapshots.py | 7 | 46019 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Snapshots
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (Snapshot,
Template,
VirtualMachine,
Account,
ServiceOffering,
DiskOffering,
Volume)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_events,
list_volumes,
list_snapshots,
list_templates,
list_virtual_machines,
)
from marvin.lib.utils import (cleanup_resources,
format_volume_to_ext3,
random_gen,
is_snapshot_on_nfs,
get_hypervisor_type)
from marvin.cloudstackAPI import detachVolume
import time
class Services:
"""Test Snapshots Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 200, # in MHz
"memory": 256, # In MBs
},
"disk_offering": {
"displaytext": "Small Disk",
"name": "Small Disk",
"disksize": 1
},
"server_with_disk":
{
"displayname": "Test VM -With Disk",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"server_without_disk":
{
"displayname": "Test VM-No Disk",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
# For NAT rule creation
"publicport": 22,
"protocol": 'TCP',
},
"server": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"recurring_snapshot": {
"intervaltype": 'HOURLY',
# Frequency of snapshots
"maxsnaps": 1, # Should be min 2
"schedule": 1,
"timezone": 'US/Arizona',
# Timezone Formats -
# http://cloud.mindtouch.us/CloudStack_Documentation/Developer's_Guide%3A_CloudStack
},
"templates": {
"displaytext": 'Template',
"name": 'Template',
"ostype": "CentOS 5.3 (64-bit)",
"templatefilter": 'self',
},
"volume": {
"diskname": "APP Data Volume",
"size": 1, # in GBs
"xenserver": {"rootdiskdevice": "/dev/xvda",
"datadiskdevice_1": '/dev/xvdb',
"datadiskdevice_2": '/dev/xvdc', # Data Disk
},
"kvm": {"rootdiskdevice": "/dev/vda",
"datadiskdevice_1": "/dev/vdb",
"datadiskdevice_2": "/dev/vdc"
},
"vmware": {"rootdiskdevice": "/dev/hda",
"datadiskdevice_1": "/dev/hdb",
"datadiskdevice_2": "/dev/hdc"
}
},
"paths": {
"mount_dir": "/mnt/tmp",
"sub_dir": "test",
"sub_lvl_dir1": "test1",
"sub_lvl_dir2": "test2",
"random_data": "random.data",
},
"ostype": "CentOS 5.3 (64-bit)",
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
}
class TestSnapshots(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestSnapshots, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls._cleanup = []
cls.unsupportedHypervisor = False
cls.hypervisor = str(get_hypervisor_type(cls.api_client)).lower()
if cls.hypervisor.lower() in ['hyperv', 'lxc']:
cls.unsupportedHypervisor = True
return
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["volume"]["zoneid"] = cls.services[
"server_with_disk"]["zoneid"] = cls.zone.id
cls.services["server_with_disk"]["diskoffering"] = cls.disk_offering.id
cls.services["server_without_disk"]["zoneid"] = cls.zone.id
cls.services["templates"]["ostypeid"] = cls.template.ostypeid
cls.services["zoneid"] = cls.zone.id
cls.services["diskoffering"] = cls.disk_offering.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
if self.unsupportedHypervisor:
self.skipTest("Skipping test because unsupported hypervisor: %s" %
self.hypervisor)
# Create VMs, NAT Rules etc
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup.append(self.account)
self.virtual_machine = self.virtual_machine_with_disk = \
VirtualMachine.create(
self.api_client,
self.services["server_with_disk"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.services["mode"]
)
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(speed="slow")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_02_snapshot_data_disk(self):
"""Test Snapshot Data Disk
"""
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
volume = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine_with_disk.id,
type='DATADISK',
listall=True
)
self.assertEqual(
isinstance(volume, list),
True,
"Check list response returns a valid list"
)
self.debug("Creating a Snapshot from data volume: %s" % volume[0].id)
snapshot = Snapshot.create(
self.apiclient,
volume[0].id,
account=self.account.name,
domainid=self.account.domainid
)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list item call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check resource id in list resources call"
)
self.assertTrue(
is_snapshot_on_nfs(
self.apiclient,
self.dbclient,
self.config,
self.zone.id,
snapshot.id))
return
@attr(speed="slow")
@attr(
tags=[
"advanced",
"advancedns",
"basic",
"sg"],
required_hardware="true")
def test_01_volume_from_snapshot(self):
"""Test Creating snapshot from volume having spaces in name(KVM)
"""
# Validate the following
# 1. Create a virtual machine and data volume
# 2. Attach data volume to VM
# 3. Login to machine; create temp/test directories on data volume
# and write some random data
# 4. Snapshot the Volume
# 5. Create another Volume from snapshot
# 6. Mount/Attach volume to another virtual machine
# 7. Compare data, data should match
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
random_data_0 = random_gen(size=100)
random_data_1 = random_gen(size=100)
self.debug("random_data_0 : %s" % random_data_0)
self.debug("random_data_1: %s" % random_data_1)
try:
ssh_client = self.virtual_machine.get_ssh_client()
except Exception as e:
self.fail("SSH failed for VM: %s" %
self.virtual_machine.ipaddress)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume with ID: %s" % volume.id)
self.virtual_machine.attach_volume(
self.apiclient,
volume
)
self.debug("Attach volume: %s to VM: %s" %
(volume.id, self.virtual_machine.id))
self.debug("Formatting volume: %s to ext3" % volume.id)
# Format partition using ext3
# Note that this is the second data disk partition of virtual machine
# as it was already containing data disk before attaching the new
# volume, Hence datadiskdevice_2
format_volume_to_ext3(
ssh_client,
self.services["volume"][self.hypervisor]["datadiskdevice_2"]
)
cmds = [
"fdisk -l",
"mkdir -p %s" %
self.services["paths"]["mount_dir"],
"mount -t ext3 %s1 %s" %
(self.services["volume"][
self.hypervisor]["datadiskdevice_2"],
self.services["paths"]["mount_dir"]),
"mkdir -p %s/%s/{%s,%s} " %
(self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["sub_lvl_dir2"]),
"echo %s > %s/%s/%s/%s" %
(random_data_0,
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["random_data"]),
"echo %s > %s/%s/%s/%s" %
(random_data_1,
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir2"],
self.services["paths"]["random_data"]),
"cat %s/%s/%s/%s" %
(self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["random_data"])]
for c in cmds:
self.debug("Command: %s" % c)
result = ssh_client.execute(c)
self.debug(result)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["paths"]["mount_dir"]),
]
for c in cmds:
self.debug("Command: %s" % c)
ssh_client.execute(c)
list_volume_response = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
id=volume.id
)
self.assertEqual(
isinstance(list_volume_response, list),
True,
"Check list volume response for valid data"
)
volume_response = list_volume_response[0]
# Create snapshot from attached volume
snapshot = Snapshot.create(
self.apiclient,
volume_response.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Created snapshot: %s" % snapshot.id)
# Create volume from snapshot
volume_from_snapshot = Volume.create_from_snapshot(
self.apiclient,
snapshot.id,
self.services["volume"],
account=self.account.name,
domainid=self.account.domainid
)
# Detach the volume from virtual machine
self.virtual_machine.detach_volume(
self.apiclient,
volume
)
self.debug("Detached volume: %s from VM: %s" %
(volume.id, self.virtual_machine.id))
self.debug("Created Volume: %s from Snapshot: %s" % (
volume_from_snapshot.id,
snapshot.id))
volumes = Volume.list(
self.apiclient,
id=volume_from_snapshot.id
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(volumes),
None,
"Check Volume list Length"
)
self.assertEqual(
volumes[0].id,
volume_from_snapshot.id,
"Check Volume in the List Volumes"
)
# Attaching volume to new VM
new_virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["server_without_disk"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.services["mode"]
)
self.debug("Deployed new VM for account: %s" % self.account.name)
# self.cleanup.append(new_virtual_machine)
self.debug("Attaching volume: %s to VM: %s" % (
volume_from_snapshot.id,
new_virtual_machine.id
))
new_virtual_machine.attach_volume(
self.apiclient,
volume_from_snapshot
)
# Rebooting is required so that newly attached disks are detected
self.debug("Rebooting : %s" % new_virtual_machine.id)
new_virtual_machine.reboot(self.apiclient)
try:
# Login to VM to verify test directories and files
ssh = new_virtual_machine.get_ssh_client()
# Mount datadiskdevice_1 because this is the first data disk of the
# new virtual machine
cmds = [
"fdisk -l",
"mkdir -p %s" %
self.services["paths"]["mount_dir"],
"mount -t ext3 %s1 %s" %
(self.services["volume"][
self.hypervisor]["datadiskdevice_1"],
self.services["paths"]["mount_dir"]),
]
for c in cmds:
self.debug("Command: %s" % c)
result = ssh.execute(c)
self.debug(result)
returned_data_0 = ssh.execute(
"cat %s/%s/%s/%s" % (
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["random_data"]
))
returned_data_1 = ssh.execute(
"cat %s/%s/%s/%s" % (
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir2"],
self.services["paths"]["random_data"]
))
except Exception as e:
self.fail("SSH access failed for VM: %s, Exception: %s" %
(new_virtual_machine.ipaddress, e))
self.debug("returned_data_0: %s" % returned_data_0[0])
self.debug("returned_data_1: %s" % returned_data_1[0])
# Verify returned data
self.assertEqual(
random_data_0,
returned_data_0[0],
"Verify newly attached volume contents with existing one"
)
self.assertEqual(
random_data_1,
returned_data_1[0],
"Verify newly attached volume contents with existing one"
)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["paths"]["mount_dir"]),
]
for c in cmds:
self.debug("Command: %s" % c)
ssh_client.execute(c)
return
@attr(speed="slow")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_04_delete_snapshot(self):
"""Test Delete Snapshot
"""
# 1. Snapshot the Volume
# 2. Delete the snapshot
# 3. Verify snapshot is removed by calling List Snapshots API
# 4. Verify snapshot was removed from image store
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
self.debug("Creating volume under account: %s" % self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume: %s" % volume.id)
self.debug("Attaching volume to vm: %s" % self.virtual_machine.id)
self.virtual_machine.attach_volume(
self.apiclient,
volume
)
self.debug("Volume attached to vm")
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
id=volume.id
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
snapshot = Snapshot.create(
self.apiclient,
volumes[0].id,
account=self.account.name,
domainid=self.account.domainid
)
snapshot.delete(self.apiclient)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
snapshots,
None,
"Check if result exists in list item call"
)
self.assertFalse(
is_snapshot_on_nfs(
self.apiclient,
self.dbclient,
self.config,
self.zone.id,
snapshot.id))
return
@attr(speed="slow")
@attr(
tags=[
"advanced",
"advancedns",
"basic",
"sg"],
required_hardware="true")
def test_03_snapshot_detachedDisk(self):
"""Test snapshot from detached disk
"""
# Validate the following
# 1. login in VM and write some data on data disk(use fdisk to
# partition datadisk,fdisk, and make filesystem using
# mkfs.ext3)
# 2. Detach the data disk and write some data on data disk
# 3. perform the snapshot on the detached volume
# 4. listvolumes with VM id shouldn't show the detached volume
# 5. listSnapshots should list the snapshot that was created
# 6. verify backup_snap_id was non null in the `snapshots` table
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
volume = volumes[0]
random_data_0 = random_gen(size=100)
random_data_1 = random_gen(size=100)
try:
ssh_client = self.virtual_machine.get_ssh_client()
# Format partition using ext3
format_volume_to_ext3(
ssh_client,
self.services["volume"][self.hypervisor]["datadiskdevice_1"]
)
cmds = [
"mkdir -p %s" %
self.services["paths"]["mount_dir"],
"mount %s1 %s" %
(self.services["volume"][
self.hypervisor]["datadiskdevice_1"],
self.services["paths"]["mount_dir"]),
"pushd %s" %
self.services["paths"]["mount_dir"],
"mkdir -p %s/{%s,%s} " %
(self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["sub_lvl_dir2"]),
"echo %s > %s/%s/%s" %
(random_data_0,
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["random_data"]),
"echo %s > %s/%s/%s" %
(random_data_1,
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir2"],
self.services["paths"]["random_data"]),
"sync",
"umount %s" %
(self.services["paths"]["mount_dir"]),
]
for c in cmds:
self.debug(ssh_client.execute(c))
# detach volume from VM
cmd = detachVolume.detachVolumeCmd()
cmd.id = volume.id
self.apiclient.detachVolume(cmd)
# Create snapshot from detached volume
snapshot = Snapshot.create(self.apiclient, volume.id)
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
listall=True
)
self.assertEqual(
volumes,
None,
"Check Volume is detached"
)
# Verify the snapshot was created or not
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check snapshot id in list resources call"
)
except Exception as e:
self.fail("SSH failed for VM with IP: %s - %s" %
(self.virtual_machine.ssh_ip, e))
qresultset = self.dbclient.execute(
"select id from snapshots where uuid = '%s';"
% snapshot.id
)
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
qresult = qresultset[0]
self.assertNotEqual(
str(qresult[0]),
'NULL',
"Check if backup_snap_id is not null"
)
return
@attr(speed="slow")
@attr(
tags=[
"advanced",
"advancedns",
"smoke",
"xen"],
required_hardware="true")
def test_07_template_from_snapshot(self):
"""Create Template from snapshot
"""
# 1. Login to machine; create temp/test directories on data volume
# 2. Snapshot the Volume
# 3. Create Template from snapshot
# 4. Deploy Virtual machine using this template
# 5. Login to newly created virtual machine
# 6. Compare data in the root disk with the one that was written on the
# volume, it should match
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
userapiclient = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
random_data_0 = random_gen(size=100)
random_data_1 = random_gen(size=100)
try:
# Login to virtual machine
ssh_client = self.virtual_machine.get_ssh_client()
cmds = [
"mkdir -p %s" % self.services["paths"]["mount_dir"],
"mount %s1 %s" % (
self.services["volume"][self.hypervisor]["rootdiskdevice"],
self.services["paths"]["mount_dir"]
),
"mkdir -p %s/%s/{%s,%s} " % (
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["sub_lvl_dir2"]
),
"echo %s > %s/%s/%s/%s" % (
random_data_0,
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["random_data"]
),
"echo %s > %s/%s/%s/%s" % (
random_data_1,
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir2"],
self.services["paths"]["random_data"]
),
"sync",
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception as e:
self.fail("SSH failed for VM with IP address: %s" %
self.virtual_machine.ipaddress)
# Unmount the Volume
cmds = [
"umount %s" % (self.services["paths"]["mount_dir"]),
]
for c in cmds:
self.debug(c)
ssh_client.execute(c)
volumes = list_volumes(
userapiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
volume = volumes[0]
# Create a snapshot of volume
snapshot = Snapshot.create(
userapiclient,
volume.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Snapshot created from volume ID: %s" % volume.id)
# Generate template from the snapshot
template = Template.create_from_snapshot(
userapiclient,
snapshot,
self.services["templates"]
)
self.cleanup.append(template)
self.debug("Template created from snapshot ID: %s" % snapshot.id)
# Verify created template
templates = list_templates(
userapiclient,
templatefilter=self.services["templates"]["templatefilter"],
id=template.id
)
self.assertNotEqual(
templates,
None,
"Check if result exists in list item call"
)
self.assertEqual(
templates[0].id,
template.id,
"Check new template id in list resources call"
)
self.debug("Deploying new VM from template: %s" % template.id)
# Deploy new virtual machine using template
new_virtual_machine = VirtualMachine.create(
userapiclient,
self.services["server_without_disk"],
templateid=template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.services["mode"]
)
try:
# Login to VM & mount directory
ssh = new_virtual_machine.get_ssh_client()
cmds = [
"mkdir -p %s" % self.services["paths"]["mount_dir"],
"mount %s1 %s" % (
self.services["volume"][self.hypervisor]["rootdiskdevice"],
self.services["paths"]["mount_dir"]
)
]
for c in cmds:
ssh.execute(c)
returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % (
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir1"],
self.services["paths"]["random_data"]
))
self.debug(returned_data_0)
returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % (
self.services["paths"]["mount_dir"],
self.services["paths"]["sub_dir"],
self.services["paths"]["sub_lvl_dir2"],
self.services["paths"]["random_data"]
))
self.debug(returned_data_1)
except Exception as e:
self.fail("SSH failed for VM with IP address: %s" %
new_virtual_machine.ipaddress)
# Verify returned data
self.assertEqual(
random_data_0,
returned_data_0[0],
"Verify newly attached volume contents with existing one"
)
self.assertEqual(
random_data_1,
returned_data_1[0],
"Verify newly attached volume contents with existing one"
)
# Unmount the volume
cmds = [
"umount %s" % (self.services["paths"]["mount_dir"]),
]
try:
for c in cmds:
self.debug(c)
ssh_client.execute(c)
except Exception as e:
self.fail("SSH failed for VM with IP address: %s, Exception: %s" %
(new_virtual_machine.ipaddress, e))
return
class TestCreateVMSnapshotTemplate(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateVMSnapshotTemplate,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls._cleanup = []
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.unsupportedHypervisor = False
cls.hypervisor = get_hypervisor_type(cls.api_client)
if cls.hypervisor.lower() in ['hyperv', 'lxc']:
cls.unsupportedHypervisor = True
return
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["domainid"] = cls.domain.id
cls.services["server"]["zoneid"] = cls.zone.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
if self.unsupportedHypervisor:
self.skipTest("snapshots are not supported on %s" % self.hypervisor.lower())
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(speed="slow")
@attr(tags=["advanced", "advancedns"], required_hardware="true")
def test_01_createVM_snapshotTemplate(self):
"""Test create VM, Snapshot and Template
"""
# Validate the following
# 1. Deploy VM using default template, small service offering
# and small data disk offering.
# 2. Perform snapshot on the root disk of this VM.
# 3. Create a template from snapshot.
# 4. Create a instance from above created template.
# 5. listSnapshots should list the snapshot that was created.
# 6. verify that secondary storage NFS share contains the reqd
# volume under /secondary/snapshots/$accountid/
# $volumeid/$snapshot_uuid
# 7. verify backup_snap_id was non null in the `snapshots` table
# 8. listTemplates() should return the newly created Template,
# and check for template state as READY"
# 9. listVirtualMachines() command should return the deployed VM.
# State of this VM should be Running.
# Create Virtual Machine
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
userapiclient = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
self.virtual_machine = VirtualMachine.create(
userapiclient,
self.services["server"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Created VM with ID: %s" % self.virtual_machine.id)
# Get the Root disk of VM
volumes = list_volumes(
userapiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
volume = volumes[0]
# Create a snapshot from the ROOTDISK
snapshot = Snapshot.create(userapiclient, volume.id)
self.debug("Snapshot created: ID - %s" % snapshot.id)
self.cleanup.append(snapshot)
snapshots = list_snapshots(
userapiclient,
id=snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check snapshot id in list resources call"
)
self.debug(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" %
snapshot.id)
snapshot_uuid = snapshot.id
# Generate template from the snapshot
template = Template.create_from_snapshot(
userapiclient,
snapshot,
self.services["templates"]
)
self.debug("Created template from snapshot: %s" % template.id)
self.cleanup.append(template)
templates = list_templates(
userapiclient,
templatefilter=self.services["templates"]["templatefilter"],
id=template.id
)
self.assertNotEqual(
templates,
None,
"Check if result exists in list item call"
)
self.assertEqual(
templates[0].isready,
True,
"Check new template state in list templates call"
)
# Deploy new virtual machine using template
new_virtual_machine = VirtualMachine.create(
userapiclient,
self.services["server"],
templateid=template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
self.debug("Created VM with ID: %s from template: %s" % (
new_virtual_machine.id,
template.id
))
self.cleanup.append(new_virtual_machine)
# Newly deployed VM should be 'Running'
virtual_machines = list_virtual_machines(
userapiclient,
id=new_virtual_machine.id,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(virtual_machines, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(virtual_machines),
0,
"Check list virtual machines response"
)
for virtual_machine in virtual_machines:
self.assertEqual(
virtual_machine.state,
'Running',
"Check list VM response for Running state"
)
self.assertTrue(
is_snapshot_on_nfs(
self.apiclient,
self.dbclient,
self.config,
self.zone.id,
snapshot_uuid))
return
class TestSnapshotEvents(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestSnapshotEvents, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
cls._cleanup = []
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.unsupportedHypervisor = False
cls.hypervisor = get_hypervisor_type(cls.api_client)
if cls.hypervisor.lower() in ['hyperv', 'lxc']:
cls.unsupportedHypervisor = True
return
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
cls.services["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls._cleanup = [
cls.service_offering,
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
if self.unsupportedHypervisor:
self.skipTest("snapshots are not supported on %s" % self.hypervisor)
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(speed="slow")
@attr(tags=["advanced", "advancedns"], required_hardware="false")
def test_05_snapshot_events(self):
"""Test snapshot events
"""
# Validate the following
# 1. Perform snapshot on the root disk of this VM and
# check the events/alerts.
# 2. delete the snapshots and check the events/alerts
# 3. listEvents() shows created/deleted snapshot events
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
# Get the Root disk of VM
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
volume = volumes[0]
# Create a snapshot from the ROOTDISK
snapshot = Snapshot.create(self.apiclient, volume.id)
self.debug("Snapshot created with ID: %s" % snapshot.id)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list snapshots call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check snapshot id in list resources call"
)
snapshot.delete(self.apiclient)
# Sleep to ensure that snapshot is deleted properly
time.sleep(self.services["sleep"])
events = list_events(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
type='SNAPSHOT.DELETE'
)
self.assertEqual(
isinstance(events, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
events,
None,
"Check if event exists in list events call"
)
self.assertIn(
events[0].state,
['Completed', 'Scheduled'],
"Check events state in list events call"
)
return
| apache-2.0 |
jlegendary/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_mock.py | 496 | 5168 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for mock module."""
import Queue
import threading
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import mock
class MockConnTest(unittest.TestCase):
"""A unittest for MockConn class."""
def setUp(self):
self._conn = mock.MockConn('ABC\r\nDEFG\r\n\r\nHIJK')
def test_readline(self):
self.assertEqual('ABC\r\n', self._conn.readline())
self.assertEqual('DEFG\r\n', self._conn.readline())
self.assertEqual('\r\n', self._conn.readline())
self.assertEqual('HIJK', self._conn.readline())
self.assertEqual('', self._conn.readline())
def test_read(self):
self.assertEqual('ABC\r\nD', self._conn.read(6))
self.assertEqual('EFG\r\n\r\nHI', self._conn.read(9))
self.assertEqual('JK', self._conn.read(10))
self.assertEqual('', self._conn.read(10))
def test_read_and_readline(self):
self.assertEqual('ABC\r\nD', self._conn.read(6))
self.assertEqual('EFG\r\n', self._conn.readline())
self.assertEqual('\r\nHIJK', self._conn.read(9))
self.assertEqual('', self._conn.readline())
def test_write(self):
self._conn.write('Hello\r\n')
self._conn.write('World\r\n')
self.assertEqual('Hello\r\nWorld\r\n', self._conn.written_data())
class MockBlockingConnTest(unittest.TestCase):
"""A unittest for MockBlockingConn class."""
def test_read(self):
"""Tests that data put to MockBlockingConn by put_bytes method can be
read from it.
"""
class LineReader(threading.Thread):
"""A test class that launches a thread, calls readline on the
specified conn repeatedly and puts the read data to the specified
queue.
"""
def __init__(self, conn, queue):
threading.Thread.__init__(self)
self._queue = queue
self._conn = conn
self.setDaemon(True)
self.start()
def run(self):
while True:
data = self._conn.readline()
self._queue.put(data)
conn = mock.MockBlockingConn()
queue = Queue.Queue()
reader = LineReader(conn, queue)
self.failUnless(queue.empty())
conn.put_bytes('Foo bar\r\n')
read = queue.get()
self.assertEqual('Foo bar\r\n', read)
class MockTableTest(unittest.TestCase):
"""A unittest for MockTable class."""
def test_create_from_dict(self):
table = mock.MockTable({'Key': 'Value'})
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_create_from_list(self):
table = mock.MockTable([('Key', 'Value')])
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_create_from_tuple(self):
table = mock.MockTable((('Key', 'Value'),))
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_set_and_get(self):
table = mock.MockTable()
self.assertEqual(None, table.get('Key'))
table['Key'] = 'Value'
self.assertEqual('Value', table.get('Key'))
self.assertEqual('Value', table.get('key'))
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['Key'])
self.assertEqual('Value', table['key'])
self.assertEqual('Value', table['KEY'])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
ovaistariq/xb-mgr | lib/buffered_email_handler.py | 2 | 2221 | # (c) 2012, Ovais Tariq <ovaistariq@gmail.com>
#
# This file is part of Xtrabackup Backup Manager
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import logging.handlers
from config_helper import Config_helper
from email_helper import Email_helper
class Buffered_email_handler(logging.handlers.MemoryHandler):
BUFFER_CAPACITY = 16*1024
def __init__(self, host):
logging.handlers.MemoryHandler.__init__(self, target=None,
flushLevel=logging.ERROR,
capacity=Buffered_email_handler.BUFFER_CAPACITY)
self._emailer = Email_helper()
self._host = host
config_helper = Config_helper(host=host)
to_emails = config_helper.get_error_email_recipient()
self._to_email_list = to_emails.split(',')
self._error_logged = False
def flush(self):
if not self._error_logged:
subject = "BACKUP run on %s successfully completed" % self._host
else:
subject = "ERROR: BACKUP on %s failed" % self._host
if len(self.buffer) > 0:
try:
msg = ""
for record in self.buffer:
record = self.format(record)
msg = msg + record + "\r\n"
self._emailer.send_email(subject=subject, msg=msg,
to_email_list=self._to_email_list)
except:
self.handleError(None)
self.buffer = []
def shouldFlush(self, record):
if record.levelno >= logging.ERROR:
self._error_logged = True
return (len(self.buffer) >= self.capacity or
record.levelno >= self.flushLevel)
| gpl-3.0 |
streamlink/streamlink | tests/plugins/test_artetv.py | 3 | 1508 | from streamlink.plugins.artetv import ArteTV
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlArteTV(PluginCanHandleUrl):
__plugin__ = ArteTV
should_match = [
# new url
"http://www.arte.tv/fr/direct/",
"http://www.arte.tv/de/live/",
"http://www.arte.tv/de/videos/074633-001-A/gesprach-mit-raoul-peck",
"http://www.arte.tv/en/videos/071437-010-A/sunday-soldiers",
"http://www.arte.tv/fr/videos/074633-001-A/entretien-avec-raoul-peck",
"http://www.arte.tv/pl/videos/069873-000-A/supermama-i-businesswoman",
# old url - some of them get redirected and some are 404
"http://www.arte.tv/guide/fr/direct",
"http://www.arte.tv/guide/de/live",
"http://www.arte.tv/guide/fr/024031-000-A/le-testament-du-docteur-mabuse",
"http://www.arte.tv/guide/de/024031-000-A/das-testament-des-dr-mabuse",
"http://www.arte.tv/guide/en/072544-002-A/christmas-carols-from-cork",
"http://www.arte.tv/guide/es/068380-000-A/una-noche-en-florencia",
"http://www.arte.tv/guide/pl/068916-006-A/belle-and-sebastian-route-du-rock",
]
should_not_match = [
# shouldn't match
"http://www.arte.tv/guide/fr/plus7/",
"http://www.arte.tv/guide/de/plus7/",
# shouldn't match - playlists without video ids in url
"http://www.arte.tv/en/videos/RC-014457/the-power-of-forests/",
"http://www.arte.tv/en/videos/RC-013118/street-art/",
]
| bsd-2-clause |
reinaH/osf.io | website/addons/base/exceptions.py | 23 | 1683 | """
Custom exceptions for add-ons.
"""
class AddonError(Exception):
pass
class HookError(AddonError):
pass
class AddonEnrichmentError(AddonError):
@property
def can_delete(self):
return False
@property
def can_download(self):
return False
@property
def renderable_error(self):
'''A hook to be implemented by subclasses returning
a html error to be displayed to the user
Later concatenated with additional style tags
'''
return '''
<div class="alert alert-info" role="alert">
This file is currently unable to be rendered. <br>
If this should not have occurred and the issue persists,
please report it to <a href="mailto:support@osf.io">support@osf.io
</div>
'''
def as_html(self):
# TODO Refactor me to be all in the front end
# 2/10/14 ping @chrisseto when refactoring
additional = ''
if not self.can_download:
additional += "<style>.file-download{display: none;}</style>"
if not self.can_delete:
additional += "<style>.file-delete{display: none;}</style>"
return self.renderable_error + additional
class FileDeletedError(AddonEnrichmentError):
@property
def renderable_error(self):
return '''
<div class="alert alert-info" role="alert">
This file has been deleted.
</div>
'''
class FileDoesntExistError(AddonEnrichmentError):
@property
def renderable_error(self):
return '''
<div class="alert alert-info" role="alert">
This file does not exist.
</div>
'''
| apache-2.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_0_0/device_service_flow_broker.py | 14 | 58685 | from ..broker import Broker
class DeviceServiceFlowBroker(Broker):
controller = "device_service_flows"
def show(self, **kwargs):
"""Shows the details for the specified device service flow.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceFlowID: The internal NetMRI identifier for this flow description.
:type DeviceServiceFlowID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service flow methods. The listed methods will be called on each device service flow returned and included in the output. Available methods are: device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device_service, data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_flow: The device service flow identified by the specified DeviceServiceFlowID.
:rtype device_service_flow: DeviceServiceFlow
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device service flows. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which belongs this flow.
:type DeviceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceFlowID: The internal NetMRI identifier for this flow description.
:type DeviceServiceFlowID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceID: The internal NetMRI identifier for the service to which belongs this flow.
:type DeviceServiceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service flows as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service flow methods. The listed methods will be called on each device service flow returned and included in the output. Available methods are: device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceFlowID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceFlowID. Valid values are DeviceServiceFlowID, DeviceServiceID, DeviceID, DataSourceID, SvfFirstSeenTime, SvfStartTime, SvfEndTime, SvfTimestamp, SvfChangedCols, SvfProtocolNum, SvfProtocolName, SvfSrcDisplayText, SvfSrcPortMin, SvfSrcPortMax, SvfDestDisplayText, SvfDestPortMin, SvfDestPortMax.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceFlow. Valid values are DeviceServiceFlowID, DeviceServiceID, DeviceID, DataSourceID, SvfFirstSeenTime, SvfStartTime, SvfEndTime, SvfTimestamp, SvfChangedCols, SvfProtocolNum, SvfProtocolName, SvfSrcDisplayText, SvfSrcPortMin, SvfSrcPortMax, SvfDestDisplayText, SvfDestPortMin, SvfDestPortMax. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_flows: An array of the DeviceServiceFlow objects that match the specified input criteria.
:rtype device_service_flows: Array of DeviceServiceFlow
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device service flows matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which belongs this flow.
:type DeviceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceFlowID: The internal NetMRI identifier for this flow description.
:type DeviceServiceFlowID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceID: The internal NetMRI identifier for the service to which belongs this flow.
:type DeviceServiceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfChangedCols: The fields that changed between this revision of the record and the previous revision.
:type SvfChangedCols: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfDestDisplayText: The text that was defined in the configuration for the destination port part of this flow.
:type SvfDestDisplayText: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfDestPortMax: The numeric value for the destination port range max value. -1 if no meaning.
:type SvfDestPortMax: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfDestPortMin: The numeric value for the destination port range min value. -1 if no meaning.
:type SvfDestPortMin: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfEndTime: The ending effective time of this record, or empty if still in effect.
:type SvfEndTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfFirstSeenTime: The timestamp of when NetMRI saw for the first time this flow.
:type SvfFirstSeenTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfProtocolName: The protocol name.
:type SvfProtocolName: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfProtocolNum: The protocol number of the flow. A value between 0 to 255, 0 is for generic ip protocol.
:type SvfProtocolNum: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfSrcDisplayText: The text that was defined in the configuration for the source port part of this flow.
:type SvfSrcDisplayText: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfSrcPortMax: The numeric value for the source port range max value. -1 if no meaning.
:type SvfSrcPortMax: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfSrcPortMin: The numeric value for the source port range min value. -1 if no meaning.
:type SvfSrcPortMin: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfStartTime: The starting effective time of this record.
:type SvfStartTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvfTimestamp: The date and time this record was collected or calculated.
:type SvfTimestamp: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service flows as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service flow methods. The listed methods will be called on each device service flow returned and included in the output. Available methods are: device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceFlowID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceFlowID. Valid values are DeviceServiceFlowID, DeviceServiceID, DeviceID, DataSourceID, SvfFirstSeenTime, SvfStartTime, SvfEndTime, SvfTimestamp, SvfChangedCols, SvfProtocolNum, SvfProtocolName, SvfSrcDisplayText, SvfSrcPortMin, SvfSrcPortMax, SvfDestDisplayText, SvfDestPortMin, SvfDestPortMax.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceFlow. Valid values are DeviceServiceFlowID, DeviceServiceID, DeviceID, DataSourceID, SvfFirstSeenTime, SvfStartTime, SvfEndTime, SvfTimestamp, SvfChangedCols, SvfProtocolNum, SvfProtocolName, SvfSrcDisplayText, SvfSrcPortMin, SvfSrcPortMax, SvfDestDisplayText, SvfDestPortMin, SvfDestPortMax. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device service flows, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, DeviceServiceFlowID, DeviceServiceID, SvfChangedCols, SvfDestDisplayText, SvfDestPortMax, SvfDestPortMin, SvfEndTime, SvfFirstSeenTime, SvfProtocolName, SvfProtocolNum, SvfSrcDisplayText, SvfSrcPortMax, SvfSrcPortMin, SvfStartTime, SvfTimestamp.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_flows: An array of the DeviceServiceFlow objects that match the specified input criteria.
:rtype device_service_flows: Array of DeviceServiceFlow
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device service flows matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, DeviceServiceFlowID, DeviceServiceID, SvfChangedCols, SvfDestDisplayText, SvfDestPortMax, SvfDestPortMin, SvfEndTime, SvfFirstSeenTime, SvfProtocolName, SvfProtocolNum, SvfSrcDisplayText, SvfSrcPortMax, SvfSrcPortMin, SvfStartTime, SvfTimestamp.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device to which belongs this flow. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceServiceFlowID: The operator to apply to the field DeviceServiceFlowID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceServiceFlowID: The internal NetMRI identifier for this flow description. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceServiceFlowID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceServiceFlowID: If op_DeviceServiceFlowID is specified, the field named in this input will be compared to the value in DeviceServiceFlowID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceServiceFlowID must be specified if op_DeviceServiceFlowID is specified.
:type val_f_DeviceServiceFlowID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceServiceFlowID: If op_DeviceServiceFlowID is specified, this value will be compared to the value in DeviceServiceFlowID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceServiceFlowID must be specified if op_DeviceServiceFlowID is specified.
:type val_c_DeviceServiceFlowID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceServiceID: The operator to apply to the field DeviceServiceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceServiceID: The internal NetMRI identifier for the service to which belongs this flow. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceServiceID: If op_DeviceServiceID is specified, the field named in this input will be compared to the value in DeviceServiceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceServiceID must be specified if op_DeviceServiceID is specified.
:type val_f_DeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceServiceID: If op_DeviceServiceID is specified, this value will be compared to the value in DeviceServiceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceServiceID must be specified if op_DeviceServiceID is specified.
:type val_c_DeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfChangedCols: The operator to apply to the field SvfChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfChangedCols: If op_SvfChangedCols is specified, the field named in this input will be compared to the value in SvfChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfChangedCols must be specified if op_SvfChangedCols is specified.
:type val_f_SvfChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfChangedCols: If op_SvfChangedCols is specified, this value will be compared to the value in SvfChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfChangedCols must be specified if op_SvfChangedCols is specified.
:type val_c_SvfChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfDestDisplayText: The operator to apply to the field SvfDestDisplayText. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfDestDisplayText: The text that was defined in the configuration for the destination port part of this flow. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfDestDisplayText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfDestDisplayText: If op_SvfDestDisplayText is specified, the field named in this input will be compared to the value in SvfDestDisplayText using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfDestDisplayText must be specified if op_SvfDestDisplayText is specified.
:type val_f_SvfDestDisplayText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfDestDisplayText: If op_SvfDestDisplayText is specified, this value will be compared to the value in SvfDestDisplayText using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfDestDisplayText must be specified if op_SvfDestDisplayText is specified.
:type val_c_SvfDestDisplayText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfDestPortMax: The operator to apply to the field SvfDestPortMax. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfDestPortMax: The numeric value for the destination port range max value. -1 if no meaning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfDestPortMax: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfDestPortMax: If op_SvfDestPortMax is specified, the field named in this input will be compared to the value in SvfDestPortMax using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfDestPortMax must be specified if op_SvfDestPortMax is specified.
:type val_f_SvfDestPortMax: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfDestPortMax: If op_SvfDestPortMax is specified, this value will be compared to the value in SvfDestPortMax using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfDestPortMax must be specified if op_SvfDestPortMax is specified.
:type val_c_SvfDestPortMax: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfDestPortMin: The operator to apply to the field SvfDestPortMin. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfDestPortMin: The numeric value for the destination port range min value. -1 if no meaning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfDestPortMin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfDestPortMin: If op_SvfDestPortMin is specified, the field named in this input will be compared to the value in SvfDestPortMin using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfDestPortMin must be specified if op_SvfDestPortMin is specified.
:type val_f_SvfDestPortMin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfDestPortMin: If op_SvfDestPortMin is specified, this value will be compared to the value in SvfDestPortMin using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfDestPortMin must be specified if op_SvfDestPortMin is specified.
:type val_c_SvfDestPortMin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfEndTime: The operator to apply to the field SvfEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfEndTime: If op_SvfEndTime is specified, the field named in this input will be compared to the value in SvfEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfEndTime must be specified if op_SvfEndTime is specified.
:type val_f_SvfEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfEndTime: If op_SvfEndTime is specified, this value will be compared to the value in SvfEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfEndTime must be specified if op_SvfEndTime is specified.
:type val_c_SvfEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfFirstSeenTime: The operator to apply to the field SvfFirstSeenTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfFirstSeenTime: The timestamp of when NetMRI saw for the first time this flow. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfFirstSeenTime: If op_SvfFirstSeenTime is specified, the field named in this input will be compared to the value in SvfFirstSeenTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfFirstSeenTime must be specified if op_SvfFirstSeenTime is specified.
:type val_f_SvfFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfFirstSeenTime: If op_SvfFirstSeenTime is specified, this value will be compared to the value in SvfFirstSeenTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfFirstSeenTime must be specified if op_SvfFirstSeenTime is specified.
:type val_c_SvfFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfProtocolName: The operator to apply to the field SvfProtocolName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfProtocolName: The protocol name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfProtocolName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfProtocolName: If op_SvfProtocolName is specified, the field named in this input will be compared to the value in SvfProtocolName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfProtocolName must be specified if op_SvfProtocolName is specified.
:type val_f_SvfProtocolName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfProtocolName: If op_SvfProtocolName is specified, this value will be compared to the value in SvfProtocolName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfProtocolName must be specified if op_SvfProtocolName is specified.
:type val_c_SvfProtocolName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfProtocolNum: The operator to apply to the field SvfProtocolNum. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfProtocolNum: The protocol number of the flow. A value between 0 to 255, 0 is for generic ip protocol. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfProtocolNum: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfProtocolNum: If op_SvfProtocolNum is specified, the field named in this input will be compared to the value in SvfProtocolNum using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfProtocolNum must be specified if op_SvfProtocolNum is specified.
:type val_f_SvfProtocolNum: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfProtocolNum: If op_SvfProtocolNum is specified, this value will be compared to the value in SvfProtocolNum using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfProtocolNum must be specified if op_SvfProtocolNum is specified.
:type val_c_SvfProtocolNum: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfSrcDisplayText: The operator to apply to the field SvfSrcDisplayText. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfSrcDisplayText: The text that was defined in the configuration for the source port part of this flow. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfSrcDisplayText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfSrcDisplayText: If op_SvfSrcDisplayText is specified, the field named in this input will be compared to the value in SvfSrcDisplayText using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfSrcDisplayText must be specified if op_SvfSrcDisplayText is specified.
:type val_f_SvfSrcDisplayText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfSrcDisplayText: If op_SvfSrcDisplayText is specified, this value will be compared to the value in SvfSrcDisplayText using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfSrcDisplayText must be specified if op_SvfSrcDisplayText is specified.
:type val_c_SvfSrcDisplayText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfSrcPortMax: The operator to apply to the field SvfSrcPortMax. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfSrcPortMax: The numeric value for the source port range max value. -1 if no meaning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfSrcPortMax: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfSrcPortMax: If op_SvfSrcPortMax is specified, the field named in this input will be compared to the value in SvfSrcPortMax using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfSrcPortMax must be specified if op_SvfSrcPortMax is specified.
:type val_f_SvfSrcPortMax: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfSrcPortMax: If op_SvfSrcPortMax is specified, this value will be compared to the value in SvfSrcPortMax using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfSrcPortMax must be specified if op_SvfSrcPortMax is specified.
:type val_c_SvfSrcPortMax: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfSrcPortMin: The operator to apply to the field SvfSrcPortMin. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfSrcPortMin: The numeric value for the source port range min value. -1 if no meaning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfSrcPortMin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfSrcPortMin: If op_SvfSrcPortMin is specified, the field named in this input will be compared to the value in SvfSrcPortMin using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfSrcPortMin must be specified if op_SvfSrcPortMin is specified.
:type val_f_SvfSrcPortMin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfSrcPortMin: If op_SvfSrcPortMin is specified, this value will be compared to the value in SvfSrcPortMin using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfSrcPortMin must be specified if op_SvfSrcPortMin is specified.
:type val_c_SvfSrcPortMin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfStartTime: The operator to apply to the field SvfStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfStartTime: If op_SvfStartTime is specified, the field named in this input will be compared to the value in SvfStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfStartTime must be specified if op_SvfStartTime is specified.
:type val_f_SvfStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfStartTime: If op_SvfStartTime is specified, this value will be compared to the value in SvfStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfStartTime must be specified if op_SvfStartTime is specified.
:type val_c_SvfStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvfTimestamp: The operator to apply to the field SvfTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvfTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvfTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvfTimestamp: If op_SvfTimestamp is specified, the field named in this input will be compared to the value in SvfTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvfTimestamp must be specified if op_SvfTimestamp is specified.
:type val_f_SvfTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvfTimestamp: If op_SvfTimestamp is specified, this value will be compared to the value in SvfTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvfTimestamp must be specified if op_SvfTimestamp is specified.
:type val_c_SvfTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service flows as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service flow methods. The listed methods will be called on each device service flow returned and included in the output. Available methods are: device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceFlowID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceFlowID. Valid values are DeviceServiceFlowID, DeviceServiceID, DeviceID, DataSourceID, SvfFirstSeenTime, SvfStartTime, SvfEndTime, SvfTimestamp, SvfChangedCols, SvfProtocolNum, SvfProtocolName, SvfSrcDisplayText, SvfSrcPortMin, SvfSrcPortMax, SvfDestDisplayText, SvfDestPortMin, SvfDestPortMax.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceFlow. Valid values are DeviceServiceFlowID, DeviceServiceID, DeviceID, DataSourceID, SvfFirstSeenTime, SvfStartTime, SvfEndTime, SvfTimestamp, SvfChangedCols, SvfProtocolNum, SvfProtocolName, SvfSrcDisplayText, SvfSrcPortMin, SvfSrcPortMax, SvfDestDisplayText, SvfDestPortMin, SvfDestPortMax. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_flows: An array of the DeviceServiceFlow objects that match the specified input criteria.
:rtype device_service_flows: Array of DeviceServiceFlow
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def device_service(self, **kwargs):
"""The service object to which this flow belongs.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceFlowID: The internal NetMRI identifier for this flow description.
:type DeviceServiceFlowID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The service object to which this flow belongs.
:rtype : DeviceService
"""
return self.api_request(self._get_method_fullname("device_service"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceFlowID: The internal NetMRI identifier for this flow description.
:type DeviceServiceFlowID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceFlowID: The internal NetMRI identifier for this flow description.
:type DeviceServiceFlowID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
| apache-2.0 |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/iot/tests/latest/recording_processors.py | 5 | 2082 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure_devtools.scenario_tests import RecordingProcessor
from azure_devtools.scenario_tests.utilities import is_text_payload
MOCK_KEY = 'mock_key'
class KeyReplacer(RecordingProcessor):
def process_request(self, request):
if is_text_payload(request) and isinstance(request.body, bytes):
request.body = self._replace_byte_keys(request.body)
elif is_text_payload(request) and isinstance(request.body, str):
request.body = self._replace_string_keys(request.body)
return request
def process_response(self, response):
if is_text_payload(response) and response['body']['string']:
response['body']['string'] = self._replace_string_keys(response['body']['string'])
return response
# pylint: disable=no-self-use
def _replace_string_keys(self, val):
import re
if 'primaryKey' in val:
val = re.sub(r'"primaryKey":( ?)"([^"]+)"', r'"primaryKey":"{}"'
.format(MOCK_KEY), val, flags=re.IGNORECASE)
if 'secondaryKey' in val:
val = re.sub(r'"secondaryKey":( ?)"([^"]+)"', r'"secondaryKey":"{}"'
.format(MOCK_KEY), val, flags=re.IGNORECASE)
return val
# pylint: disable=no-self-use
def _replace_byte_keys(self, val):
import re
if b'primaryKey' in val:
val = re.sub(b'"primaryKey":( ?)"([^"]+)"', '"primaryKey":"{}"'
.format(MOCK_KEY).encode(), val, flags=re.IGNORECASE)
if b'secondaryKey' in val:
val = re.sub(b'"secondaryKey":( ?)"([^"]+)"', '"secondaryKey":"{}"'
.format(MOCK_KEY).encode(), val, flags=re.IGNORECASE)
return val
| mit |
Simran-B/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_urlparse.py | 55 | 16540 | #! /usr/bin/env python
from test import test_support
import unittest
import urlparse
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
RFC3986_BASE = "http://a/b/c/d;p?q"
# A list of test cases. Each test case is a a two-tuple that contains
# a string with the query and a dictionary with the expected result.
parse_qsl_test_cases = [
("", []),
("&", []),
("&&", []),
("=", [('', '')]),
("=a", [('', 'a')]),
("a", [('a', '')]),
("a=", [('a', '')]),
("a=", [('a', '')]),
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
]
class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
result = urlparse.urlparse(url)
self.assertEqual(result, parsed)
t = (result.scheme, result.netloc, result.path,
result.params, result.query, result.fragment)
self.assertEqual(t, parsed)
# put it back together and it should be the same
result2 = urlparse.urlunparse(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# the result of geturl() is a fixpoint; we can always parse it
# again to get the same result:
result3 = urlparse.urlparse(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.params, result.params)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
# check the roundtrip using urlsplit() as well
result = urlparse.urlsplit(url)
self.assertEqual(result, split)
t = (result.scheme, result.netloc, result.path,
result.query, result.fragment)
self.assertEqual(t, split)
result2 = urlparse.urlunsplit(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# check the fixpoint property of re-parsing the result of geturl()
result3 = urlparse.urlsplit(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
def test_qsl(self):
for orig, expect in parse_qsl_test_cases:
result = urlparse.parse_qsl(orig, keep_blank_values=True)
self.assertEqual(result, expect, "Error parsing %s" % repr(orig))
def test_roundtrips(self):
testcases = [
('file:///tmp/junk.txt',
('file', '', '/tmp/junk.txt', '', '', ''),
('file', '', '/tmp/junk.txt', '', '')),
('imap://mail.python.org/mbox1',
('imap', 'mail.python.org', '/mbox1', '', '', ''),
('imap', 'mail.python.org', '/mbox1', '', '')),
('mms://wms.sys.hinet.net/cts/Drama/09006251100.asf',
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '', ''),
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '')),
('svn+ssh://svn.zope.org/repos/main/ZConfig/trunk/',
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '', ''),
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', ''))
]
for url, parsed, split in testcases:
self.checkRoundtrips(url, parsed, split)
def test_http_roundtrips(self):
# urlparse.urlsplit treats 'http:' as an optimized special case,
# so we test both 'http:' and 'https:' in all the following.
# Three cheers for white box knowledge!
testcases = [
('://www.python.org',
('www.python.org', '', '', '', ''),
('www.python.org', '', '', '')),
('://www.python.org#abc',
('www.python.org', '', '', '', 'abc'),
('www.python.org', '', '', 'abc')),
('://www.python.org?q=abc',
('www.python.org', '', '', 'q=abc', ''),
('www.python.org', '', 'q=abc', '')),
('://www.python.org/#abc',
('www.python.org', '/', '', '', 'abc'),
('www.python.org', '/', '', 'abc')),
('://a/b/c/d;p?q#f',
('a', '/b/c/d', 'p', 'q', 'f'),
('a', '/b/c/d;p', 'q', 'f')),
]
for scheme in ('http', 'https'):
for url, parsed, split in testcases:
url = scheme + url
parsed = (scheme,) + parsed
split = (scheme,) + split
self.checkRoundtrips(url, parsed, split)
def checkJoin(self, base, relurl, expected):
self.assertEqual(urlparse.urljoin(base, relurl), expected,
(base, relurl, expected))
def test_unparse_parse(self):
for u in ['Python', './Python']:
self.assertEqual(urlparse.urlunsplit(urlparse.urlsplit(u)), u)
self.assertEqual(urlparse.urlunparse(urlparse.urlparse(u)), u)
def test_RFC1808(self):
# "normal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, 'g:h', 'g:h')
self.checkJoin(RFC1808_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, '/g', 'http://a/g')
self.checkJoin(RFC1808_BASE, '//g', 'http://g')
self.checkJoin(RFC1808_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC1808_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC1808_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC1808_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC1808_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC1808_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC1808_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC1808_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC1808_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, '..', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, '../..', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../g', 'http://a/g')
# "abnormal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, '', 'http://a/b/c/d;p?q#f')
self.checkJoin(RFC1808_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC1808_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC1808_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC1808_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC1808_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC1808_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC1808_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC1808_BASE, 'g/../h', 'http://a/b/c/h')
# RFC 1808 and RFC 1630 disagree on these (according to RFC 1808),
# so we'll not actually run these tests (which expect 1808 behavior).
#self.checkJoin(RFC1808_BASE, 'http:g', 'http:g')
#self.checkJoin(RFC1808_BASE, 'http:', 'http:')
def test_RFC2396(self):
# cases from RFC 2396
self.checkJoin(RFC2396_BASE, 'g:h', 'g:h')
self.checkJoin(RFC2396_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, '/g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '//g', 'http://g')
self.checkJoin(RFC2396_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC2396_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC2396_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC2396_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC2396_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC2396_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC2396_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, '..', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, '../..', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '', RFC2396_BASE)
self.checkJoin(RFC2396_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC2396_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC2396_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC2396_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC2396_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC2396_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC2396_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC2396_BASE, 'g/../h', 'http://a/b/c/h')
self.checkJoin(RFC2396_BASE, 'g;x=1/./y', 'http://a/b/c/g;x=1/y')
self.checkJoin(RFC2396_BASE, 'g;x=1/../y', 'http://a/b/c/y')
self.checkJoin(RFC2396_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC2396_BASE, 'g?y/../x', 'http://a/b/c/g?y/../x')
self.checkJoin(RFC2396_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC2396_BASE, 'g#s/../x', 'http://a/b/c/g#s/../x')
#The following scenarios have been updated in RFC3986
#self.checkJoin(RFC2396_BASE, '?y', 'http://a/b/c/?y')
#self.checkJoin(RFC2396_BASE, ';x', 'http://a/b/c/;x')
def test_RFC3986(self):
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC2396_BASE, ';x', 'http://a/b/c/;x')
def test_urldefrag(self):
for url, defrag, frag in [
('http://python.org#frag', 'http://python.org', 'frag'),
('http://python.org', 'http://python.org', ''),
('http://python.org/#frag', 'http://python.org/', 'frag'),
('http://python.org/', 'http://python.org/', ''),
('http://python.org/?q#frag', 'http://python.org/?q', 'frag'),
('http://python.org/?q', 'http://python.org/?q', ''),
('http://python.org/p#frag', 'http://python.org/p', 'frag'),
('http://python.org/p?q', 'http://python.org/p?q', ''),
(RFC1808_BASE, 'http://a/b/c/d;p?q', 'f'),
(RFC2396_BASE, 'http://a/b/c/d;p?q', ''),
]:
self.assertEqual(urlparse.urldefrag(url), (defrag, frag))
def test_urlsplit_attributes(self):
url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, None)
# geturl() won't return exactly the original URL in this case
# since the scheme is always case-normalized
#self.assertEqual(p.geturl(), url)
url = "http://User:Pass@www.python.org:080/doc/?query=yes#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Addressing issue1698, which suggests Username can contain
# "@" characters. Though not RFC compliant, many ftp sites allow
# and request email addresses as usernames.
url = "http://User@example.com:Pass@www.python.org:080/doc/?query=yes#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User@example.com:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User@example.com")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
p = urlparse.urlsplit("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
p = urlparse.urlparse("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
def test_attributes_without_netloc(self):
# This example is straight from RFC 3261. It looks like it
# should allow the username, hostname, and port to be filled
# in, but doesn't. Since it's a URI and doesn't use the
# scheme://netloc syntax, the netloc and related attributes
# should be left empty.
uri = "sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15"
p = urlparse.urlsplit(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urlparse.urlparse(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
def test_caching(self):
# Test case for bug #1313119
uri = "http://example.com/doc/"
unicode_uri = unicode(uri)
urlparse.urlparse(unicode_uri)
p = urlparse.urlparse(uri)
self.assertEqual(type(p.scheme), type(uri))
self.assertEqual(type(p.hostname), type(uri))
self.assertEqual(type(p.path), type(uri))
def test_noslash(self):
# Issue 1637: http://foo.com?query is legal
self.assertEqual(urlparse.urlparse("http://example.com?blahblah=/foo"),
('http', 'example.com', '', '', 'blahblah=/foo', ''))
def test_main():
test_support.run_unittest(UrlParseTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
soylentdeen/Graffity | src/Vibrations/VibrationExplorer.py | 1 | 5531 | import sys
sys.path.append('../')
import numpy
import Graffity
import CIAO_DatabaseTools
import astropy.time as aptime
from matplotlib import pyplot
import colorsys
def getFreqs():
while True:
retval = []
enteredText = raw_input("Enter a comma separated list of frequencies: ")
try:
for val in enteredText.split(','):
retval.append(float(val.strip()))
break
except:
pass
return retval
def getModes():
while True:
enteredText = raw_input("Which modes to investigate? AVC or ALL? : ")
if enteredText == 'AVC':
return 'AVC'
if enteredText == 'ALL':
return 'ALL'
def getDataLoggers(DB, GravityVals, startTime, ax=None):
order = numpy.argsort(GravityVals[:,-2])
GravityVals = GravityVals[order]
i = 1
for record in GravityVals:
print("%03d | %s" % (i,aptime.Time(float(record[-2]), format='mjd').iso))
i += 1
index = int(raw_input("Enter desired index :")) - 1
FTData = Graffity.GRAVITY_Data(GravityVals[index][-1])
FTData.DualSciP2VM.computeOPDPeriodograms()
VibrationPeaks = FTData.DualSciP2VM.findVibrationPeaks()
FTData.computeACQCAMStrehl()
FTData.computeACQCAMStrehl()
#freqs = getFreqs()
#Modes = getModes()
CIAOVals = DB.query(keywords=['ALT', 'AZ', 'STREHL'], timeOfDay='NIGHT', startTime=startTime)
DataLoggers = {}
for UT in [1, 2, 3, 4]:
closest = numpy.argsort(numpy.abs(CIAOVals[UT][:,-4]
- float(GravityVals[index,-2])))[0]
DataLoggers[UT] = Graffity.DataLogger(directory=CIAOVals[UT][closest,-3])
DataLoggers[UT].loadData()
DataLoggers[UT].computeStrehl()
freqs = extractBCIFreqs(VibrationPeaks, UT)
DataLoggers[UT].measureVibs(frequencies=freqs, modes='AVC')
return DataLoggers, VibrationPeaks
def extractBCIFreqs(VibrationPeaks, UT):
freqs = []
baselines = {0:[4,3], 1:[4, 2], 2:[4, 1], 3:[3, 2], 4:[3, 1], 5:[2, 1]}
for bl in baselines.keys():
if UT in baselines[bl]:
for f in VibrationPeaks[bl]['freqs']:
freqs.append(f)
return numpy.array(freqs)
fig = pyplot.figure(0, figsize=(8.0, 10.0), frameon=False)
fig.clear()
ax1 = fig.add_axes([0.1, 0.2, 0.4, 0.3])
ax2 = fig.add_axes([0.1, 0.5, 0.4, 0.4], sharex=ax1)
ax3 = fig.add_axes([0.5, 0.2, 0.4, 0.3], sharex=ax1)
ax3.yaxis.tick_right()
ax4 = fig.add_axes([0.5, 0.5, 0.4, 0.4], sharex=ax1)
ax4.yaxis.tick_right()
GDB = CIAO_DatabaseTools.GRAVITY_Database()
CDB = CIAO_DatabaseTools.CIAO_Database()
startTime = '2017-08-10 00:00:00'
GravityVals = GDB.query(keywords = [], timeOfDay='NIGHT', startTime=startTime)
#ax1.set_xscale('log')
#ax1.set_yscale('log')
CIAO, Vibrations = getDataLoggers(CDB, GravityVals, startTime, ax=ax1)
hsv = [(numpy.random.uniform(low=0.0, high=1),
numpy.random.uniform(low=0.2, high=1),
numpy.random.uniform(low=0.9, high=1)) for i in
range(99)]
colors = []
for h in hsv:
colors.append(colorsys.hsv_to_rgb(h[0], h[1], h[2]))
handles = numpy.array([])
labels = numpy.array([])
baselines = {0:[4,3], 1:[4, 2], 2:[4, 1], 3:[3, 2], 4:[3, 1], 5:[2, 1]}
colors = {0:'y', 1:'g', 2:'r', 3:'c', 4:'m', 5:'k'}
for CIAO_ID, ax in zip([1, 2, 3, 4], [ax1, ax2, ax3, ax4]):
DL = CIAO[CIAO_ID]
for mode in DL.vibPower.keys():
BCIVibs = {}
for bl in baselines.keys():
if CIAO_ID in baselines[bl]:
label = "UT%dUT%d" % (baselines[bl][0], baselines[bl][1])
BCIVibs[label] = {'index':bl, 'power':[]}
f = []
p = []
for peak in DL.vibPower[mode]['CommPower'].iteritems():
if peak[1] > 0:
f.append(peak[0])
p.append(numpy.log10(peak[1]))
for label in BCIVibs.keys():
if not( f[-1] in Vibrations[BCIVibs[label]['index']]['freqs']):
BCIVibs[label]['power'].append(0.0)
else:
for i, freq in enumerate(Vibrations[BCIVibs[label]['index']]['freqs']):
if freq == f[-1]:
BCIVibs[label]['power'].append(Vibrations[BCIVibs[label]['index']]['power'][i])
#ax.plot(DL.ZPowerFrequencies, numpy.log10(DL.ZPowerCommands[mode,:]), color =
# colors[mode])
f = numpy.array(f)
p = numpy.array(p)
ax.scatter(numpy.log10(f), p, color='b')
for bl in BCIVibs.keys():
BCIVibs[bl]['power'] = numpy.array(BCIVibs[bl]['power'])
nonzero = BCIVibs[bl]['power'] > 0.0
ax.scatter(numpy.log10(f[nonzero]), numpy.log10(BCIVibs[bl]['power'][nonzero]),
label=bl, color = colors[BCIVibs[bl]['index']])
#ax.scatter(numpy.array(f), numpy.array(p), color=colors[mode],
# label='Mode %d' % mode)
h, l = ax.get_legend_handles_labels()
handles=numpy.append(handles, numpy.array(h))
labels =numpy.append(labels, numpy.array(l))
#ax1.set_ybound(0, 20)
#ax2.set_ybound(0, 20)
#ax3.set_ybound(0, 20)
#ax4.set_ybound(0, 20)
#ax1.set_xbound(0, 160)
#ax2.set_xbound(0, 160)
#ax3.set_xbound(0, 160)
#ax4.set_xbound(0, 160)
#ax2.xaxis.set_ticklabels([])
#ax4.xaxis.set_ticklabels([])
junk, indices = numpy.unique(labels, return_index=True)
fig.legend(handles[indices], labels[indices], ncol=4, loc=3, scatterpoints=1)
fig.show()
#"""
| mit |
fdvarela/odoo8 | addons/account_anglo_saxon/__openerp__.py | 67 | 2462 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Anglo-Saxon Accounting',
'version': '1.2',
'author': 'OpenERP SA, Veritos',
'website': 'http://openerp.com - http://veritos.nl',
'description': """
This module supports the Anglo-Saxon accounting methodology by changing the accounting logic with stock transactions.
=====================================================================================================================
The difference between the Anglo-Saxon accounting countries and the Rhine
(or also called Continental accounting) countries is the moment of taking
the Cost of Goods Sold versus Cost of Sales. Anglo-Saxons accounting does
take the cost when sales invoice is created, Continental accounting will
take the cost at the moment the goods are shipped.
This module will add this functionality by using a interim account, to
store the value of shipped goods and will contra book this interim
account when the invoice is created to transfer this amount to the
debtor or creditor account. Secondly, price differences between actual
purchase price and fixed product standard price are booked on a separate
account.""",
'images': ['images/account_anglo_saxon.jpeg'],
'depends': ['product', 'purchase'],
'category': 'Accounting & Finance',
'demo': [],
'data': ['product_view.xml'],
'test': ['test/anglo_saxon.yml', 'test/anglo_saxon_avg_fifo.yml'],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tornadozou/tensorflow | tensorflow/python/training/session_manager_test.py | 41 | 31753 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SessionManager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager
class SessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.Variable(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.Variable([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.Variable([6.0, 7.0, 8.0], name="v")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def _test_recovered_variable(self,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
# Create a new Graph and SessionManager and recover from a checkpoint.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
with session_lib.Session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
self._test_recovered_variable(checkpoint_dir=checkpoint_dir)
self._test_recovered_variable(
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
checkpoint_dir))
# Cannot set both checkpoint_dir and checkpoint_filename_with_path.
with self.assertRaises(ValueError):
self._test_recovered_variable(
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
checkpoint_dir))
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
def testInitWithNoneLocalInitOpError(self):
# Creating a SessionManager with a None local_init_op but
# non-None ready_for_local_init_op raises ValueError
with self.assertRaisesRegexp(ValueError,
"If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "):
session_manager.SessionManager(
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=None)
def testRecoverSessionWithReadyForLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
"recover_session_ready_for_local_init")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testRecoverSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
# We use ready_for_local_init_op=tf.report_uninitialized_variables(),
# which causes recover_session to not run local_init_op, and to return
# initialized=False
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_to_ready_local")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testRecoverSessionNoChkptStillRunsLocalInitOp(self):
# This test checks for backwards compatibility.
# In particular, we continue to ensure that recover_session will execute
# local_init_op exactly once, regardless of whether the session was
# successfully recovered.
with ops.Graph().as_default():
w = variables.Variable(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
# Try to recover session from None
sess, initialized = sm2.recover_session(
"", saver=None, checkpoint_dir=None)
# Succeeds because recover_session still run local_init_op
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
def testRecoverSessionFailsStillRunsLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_stil_run")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
w = variables.Variable(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=False)
self.assertFalse(initialized)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
def testWaitForSessionLocalInit(self):
server = server_lib.Server.create_local_server()
with ops.Graph().as_default() as graph:
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
# Initialize v but not w
s = session_lib.Session(server.target, graph=graph)
s.run(v.initializer)
sess = sm.wait_for_session(server.target, max_wait_secs=3)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testWaitForSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
with ops.Graph().as_default() as graph:
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
with self.assertRaises(errors_impl.DeadlineExceededError):
# Time-out because w fails to be initialized,
# because of overly restrictive ready_for_local_init_op
sm.wait_for_session("", max_wait_secs=3)
def testWaitForSessionInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default() as graph:
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Session was not ready after waiting.*"):
sm.wait_for_session("", max_wait_secs=3)
def testPrepareSessionWithReadyForLocalInitOp(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
x = variables.Variable(
3 * v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
self.assertEqual(False, variables.is_variable_initialized(x).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=[w.initializer, x.initializer])
sess = sm2.prepare_session("", init_op=v.initializer)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
self.assertEquals(3, sess.run(x))
def testPrepareSessionWithPartialInitOp(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
x = variables.Variable(
3 * v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x")
v_res = variables.Variable(1, name="v_res")
w_res = variables.Variable(
v_res,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w_res")
x_res = variables.Variable(
3 * v_res,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x_res")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
self.assertEqual(False, variables.is_variable_initialized(x).eval())
self.assertEqual(False, variables.is_variable_initialized(v_res).eval())
self.assertEqual(False, variables.is_variable_initialized(w_res).eval())
self.assertEqual(False, variables.is_variable_initialized(x_res).eval())
sm2 = session_manager.SessionManager(local_init_op=[
w.initializer, x.initializer, w_res.initializer, x_res.initializer
])
sess = sm2.prepare_session("", init_op=None)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
self.assertEquals(3, sess.run(x))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v_res:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w_res:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x_res:0")).eval(session=sess))
self.assertEquals(1, sess.run(w_res))
self.assertEquals(3, sess.run(x_res))
def testPrepareSessionDidNotInitLocalVariable(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(
RuntimeError, "Init operations did not make model ready.*"):
sm2.prepare_session("", init_op=v.initializer)
def testPrepareSessionDidNotInitLocalVariableList(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready"):
sm2.prepare_session("", init_op=[v.initializer])
def testPrepareSessionWithReadyNotReadyForLocal(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
with self.assertRaisesRegexp(
RuntimeError,
"Init operations did not make model ready for local_init"):
sm2.prepare_session("", init_op=None)
def testPrepareSessionWithInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready.*"):
sm2.prepare_session("", init_op=None)
class ObsoleteSessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.Variable(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.Variable([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.Variable([6.0, 7.0, 8.0], name="v")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
popazerty/enigma2-4.3 | lib/python/Screens/LanguageSelection.py | 6 | 5658 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import ActionMap
from Components.Language import language
from Components.config import config
from Components.Sources.List import List
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.Pixmap import Pixmap
from Components.Language_cache import LANG_TEXT
from enigma import eTimer
from Screens.Rc import Rc
from Tools.Directories import resolveFilename, SCOPE_ACTIVE_SKIN
from Tools.LoadPixmap import LoadPixmap
import gettext
inWizzard = False
def LanguageEntryComponent(file, name, index):
png = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "countries/" + index + ".png"))
if png is None:
png = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "countries/" + file + ".png"))
if png is None:
png = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "countries/missing.png"))
res = (index, name, png)
return res
def _cached(x):
return LANG_TEXT.get(config.osd.language.value, {}).get(x, "")
class LanguageSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
language.InitLang()
self.oldActiveLanguage = language.getActiveLanguage()
self.catalog = language.getActiveCatalog()
self.list = []
# self["flag"] = Pixmap()
self["summarylangname"] = StaticText()
self["languages"] = List(self.list)
self["languages"].onSelectionChanged.append(self.changed)
self.updateList()
self.onLayoutFinish.append(self.selectActiveLanguage)
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("Save"))
self["key_yellow"] = Label(_("Update Cache"))
self["key_blue"] = Label(_("Delete Language"))
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"ok": self.save,
"cancel": self.cancel,
"red": self.cancel,
"green": self.save,
"yellow": self.updateCache,
"blue": self.delLang,
}, -1)
def updateCache(self):
print"updateCache"
self["languages"].setList([('update cache','Updating cache, please wait...',None)])
self.updateTimer = eTimer()
self.updateTimer.callback.append(self.startupdateCache)
self.updateTimer.start(100)
def startupdateCache(self):
self.updateTimer.stop()
language.updateLanguageCache()
self["languages"].setList(self.list)
self.selectActiveLanguage()
def selectActiveLanguage(self):
activeLanguage = language.getActiveLanguage()
pos = 0
for x in self.list:
if x[0] == activeLanguage:
self["languages"].index = pos
break
pos += 1
def save(self):
self.run()
global inWizzard
if inWizzard:
inWizzard = False
self.session.openWithCallback(self.deletelanguagesCB, MessageBox, _("Do you want to delete all other languages?"), default = False)
else:
self.close(self.oldActiveLanguage != config.osd.language.value)
def deletelanguagesCB(self, anwser):
if anwser:
language.delLanguage()
self.close()
def cancel(self):
language.activateLanguage(self.oldActiveLanguage)
config.osd.language.setValue(self.oldActiveLanguage)
config.osd.language.save()
self.close()
def delLang(self):
curlang = config.osd.language.value
lang = curlang
languageList = language.getLanguageListSelection()
for t in languageList:
if curlang == t[0]:
lang = t[1]
break
self.session.openWithCallback(self.delLangCB, MessageBox, _("Do you want to delete all other languages?") + _(" Except %s") %(lang), default = False)
def delLangCB(self, anwser):
if anwser:
language.delLanguage()
language.activateLanguage(self.oldActiveLanguage)
self.updateList()
self.selectActiveLanguage()
def run(self, justlocal = False):
print "updating language..."
lang = self["languages"].getCurrent()[0]
if lang == 'update cache':
self.setTitle("Updating cache")
self["summarylangname"].setText("Updating cache")
return
if lang != config.osd.language.value:
config.osd.language.setValue(lang)
config.osd.language.save()
self.setTitle(_cached("T2"))
self["summarylangname"].setText(_cached("T2"))
self["key_red"].setText(_cached("T3"))
self["key_green"].setText(_cached("T4"))
# index = self["languages"].getCurrent()[2]
# print 'INDEX:',index
# self["flag"].instance.setPixmap(self["languages"].getCurrent()[2])
if justlocal:
return
language.activateLanguage(lang)
config.misc.languageselected.value = 0
config.misc.languageselected.save()
print "ok"
def updateList(self):
languageList = language.getLanguageList()
if not languageList: # no language available => display only english
list = [ LanguageEntryComponent("en", "English (UK)", "en_GB") ]
else:
list = [ LanguageEntryComponent(file = x[1][2].lower(), name = x[1][0], index = x[0]) for x in languageList]
self.list = list
self["languages"].list = list
def changed(self):
self.run(justlocal = True)
class LanguageWizard(LanguageSelection, Rc):
def __init__(self, session):
LanguageSelection.__init__(self, session)
Rc.__init__(self)
global inWizzard
inWizzard = True
self.onLayoutFinish.append(self.selectKeys)
self["wizard"] = Pixmap()
self["summarytext"] = StaticText()
self["text"] = Label()
self.setText()
def selectKeys(self):
self.clearSelectedKeys()
self.selectKey("UP")
self.selectKey("DOWN")
def changed(self):
self.run(justlocal = True)
self.setText()
def setText(self):
self["text"].setText(_cached("T1"))
self["summarytext"].setText(_cached("T1"))
def createSummary(self):
return LanguageWizardSummary
class LanguageWizardSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent)
| gpl-2.0 |
opentracing/opentracing-python | opentracing/mocktracer/propagator.py | 3 | 1271 | # Copyright (c) The OpenTracing Authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
class Propagator(object):
def inject(self, span_context, carrier):
pass
def extract(self, carrier):
pass
| apache-2.0 |
rajul/tvb-framework | tvb/interfaces/__init__.py | 2 | 1546 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
This package contains a web client interface for TVB.
"""
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) | gpl-2.0 |
rbreitenmoser/snapcraft | snapcraft/plugins/catkin.py | 1 | 19520 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright © 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The catkin plugin is useful for building ROS parts.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
Additionally, this plugin uses the following plugin-specific keywords:
- catkin-packages:
(list of strings)
List of catkin packages to build.
- source-space:
(string)
The source space containing Catkin packages. By default this is 'src'.
"""
import os
import tempfile
import logging
import shutil
import re
import subprocess
import snapcraft
from snapcraft import (
common,
repo,
)
logger = logging.getLogger(__name__)
class CatkinPlugin(snapcraft.BasePlugin):
_PLUGIN_STAGE_SOURCES = '''
deb http://packages.ros.org/ros/ubuntu/ trusty main
deb http://${prefix}.ubuntu.com/${suffix}/ trusty main universe
deb http://${prefix}.ubuntu.com/${suffix}/ trusty-updates main universe
deb http://${prefix}.ubuntu.com/${suffix}/ trusty-security main universe
deb http://${security}.ubuntu.com/${suffix} trusty-security main universe
'''
@classmethod
def schema(cls):
schema = super().schema()
schema['properties']['rosdistro'] = {
'type': 'string',
'default': 'indigo'
}
schema['properties']['catkin-packages'] = {
'type': 'array',
'minitems': 1,
'uniqueItems': True,
'items': {
'type': 'string'
},
'default': [],
}
schema['properties']['source-space'] = {
'type': 'string',
'default': 'src',
}
schema['required'].append('catkin-packages')
return schema
def __init__(self, name, options):
super().__init__(name, options)
# Get a unique set of packages
self.catkin_packages = set(options.catkin_packages)
# The path created via the `source` key (or a combination of `source`
# and `source-subdir` keys) needs to point to a valid Catkin workspace
# containing another subdirectory called the "source space." By
# default, this is a directory named "src," but it can be remapped via
# the `source-space` key. It's important that the source space is not
# the root of the Catkin workspace, since Catkin won't work that way
# and it'll create a circular link that causes rosdep to hang.
if self.options.source_subdir:
self._ros_package_path = os.path.join(self.sourcedir,
self.options.source_subdir,
self.options.source_space)
else:
self._ros_package_path = os.path.join(self.sourcedir,
self.options.source_space)
if os.path.abspath(self.sourcedir) == os.path.abspath(
self._ros_package_path):
raise RuntimeError(
'source-space cannot be the root of the Catkin workspace')
def env(self, root):
"""Runtime environment for ROS binaries and services."""
return [
# The ROS packaging system tools (e.g. rospkg, etc.) don't go
# into the ROS install path (/opt/ros/$distro), so we need the
# PYTHONPATH to include the dist-packages in /usr/lib as well.
'PYTHONPATH={0}'.format(os.path.join(root, 'usr', 'lib',
self.python_version, 'dist-packages')),
# This environment variable tells ROS nodes where to find ROS
# master. It does not affect ROS master, however-- this is just the
# default URI.
'ROS_MASTER_URI=http://localhost:11311',
# Various ROS tools (e.g. rospack, roscore) keep a cache or a log,
# and use $ROS_HOME to determine where to put them.
'ROS_HOME=$SNAP_USER_DATA/ros',
# This environment variable points to where the setup.sh and
# _setup_util.py files are located. This is required at both build-
# and run-time.
'_CATKIN_SETUP_DIR={}'.format(os.path.join(
root, 'opt', 'ros', self.options.rosdistro)),
# FIXME: Nasty hack to source ROS's setup.sh (since each of these
# lines is prepended with "export"). There's got to be a better way
# to do this.
'echo FOO=BAR\nif `test -e {0}` ; then\n. {0} ;\nfi\n'.format(
os.path.join(
root, 'opt', 'ros', self.options.rosdistro, 'setup.sh'))
]
def pull(self):
"""Copy source into build directory and fetch dependencies.
Catkin packages can specify their system dependencies in their
package.xml. In order to support that, the Catkin packages are
interrogated for their dependencies here. Since `stage-packages` are
already installed by the time this function is run, the dependencies
from the package.xml are pulled down explicitly.
"""
super().pull()
# Make sure the package path exists before continuing
if not os.path.exists(self._ros_package_path):
raise FileNotFoundError(
'Unable to find package path: "{}"'.format(
self._ros_package_path))
# Parse the Catkin packages to pull out their system dependencies
system_dependencies = _find_system_dependencies(
self.catkin_packages, self.options.rosdistro,
self._ros_package_path, os.path.join(self.partdir, 'rosdep'),
self.PLUGIN_STAGE_SOURCES)
# Pull down and install any system dependencies that were discovered
if system_dependencies:
ubuntudir = os.path.join(self.partdir, 'ubuntu')
os.makedirs(ubuntudir, exist_ok=True)
logger.info('Preparing to fetch package dependencies...')
ubuntu = repo.Ubuntu(ubuntudir, sources=self.PLUGIN_STAGE_SOURCES)
logger.info('Fetching package dependencies...')
ubuntu.get(system_dependencies)
logger.info('Installing package dependencies...')
ubuntu.unpack(self.installdir)
@property
def python_version(self):
return self.run_output(['pyversions', '-i'])
@property
def gcc_version(self):
return self.run_output(['gcc', '-dumpversion'])
@property
def rosdir(self):
return os.path.join(self.installdir, 'opt', 'ros',
self.options.rosdistro)
def _run_in_bash(self, commandlist, cwd=None):
with tempfile.NamedTemporaryFile(mode='w') as f:
f.write('set -ex\n')
f.write('exec {}\n'.format(' '.join(commandlist)))
f.flush()
self.run(['/bin/bash', f.name], cwd=cwd)
def build(self):
"""Build Catkin packages.
This function runs some pre-build steps to prepare the sources for
building in the Snapcraft environment, builds the packages via
catkin_make_isolated, and finally runs some post-build clean steps
to prepare the newly-minted install to be packaged as a .snap.
"""
super().build()
logger.info('Preparing to build Catkin packages...')
self._prepare_build()
logger.info('Building Catkin packages...')
self._build_catkin_packages()
logger.info('Cleaning up newly installed Catkin packages...')
self._finish_build()
def _prepare_build(self):
# Each Catkin package distributes .cmake files so they can be found via
# find_package(). However, the Ubuntu packages pulled down as
# dependencies contain .cmake files pointing to system paths (e.g.
# /usr/lib, /usr/include, etc.). They need to be rewritten to point to
# the install directory.
def rewrite_paths(match):
paths = match.group(1).strip().split(';')
for i, path in enumerate(paths):
# Rewrite this path if it's an absolute path and not already
# within the install directory.
if (os.path.isabs(path) and
not path.startswith(self.installdir)):
paths[i] = self.installdir + path
return '"' + ';'.join(paths) + '"'
# Looking for any path-like string
common.replace_in_file(self.rosdir, re.compile(r'.*Config.cmake$'),
re.compile(r'"(.*?/.*?)"'),
rewrite_paths)
def _finish_build(self):
# Fix all shebangs to use the in-snap python.
common.replace_in_file(self.rosdir, re.compile(r''),
re.compile(r'#!.*python'),
r'#!/usr/bin/env python')
# Replace the CMAKE_PREFIX_PATH in _setup_util.sh
setup_util_file = os.path.join(self.rosdir, '_setup_util.py')
if os.path.isfile(setup_util_file):
with open(setup_util_file, 'r+') as f:
pattern = re.compile(r"CMAKE_PREFIX_PATH = '{}.*".format(
self.rosdir))
replaced = pattern.sub('CMAKE_PREFIX_PATH = []', f.read())
f.seek(0)
f.truncate()
f.write(replaced)
# Also replace the python usage in 10.ros.sh to use the in-snap python.
ros10_file = os.path.join(self.rosdir,
'etc/catkin/profile.d/10.ros.sh')
if os.path.isfile(ros10_file):
with open(ros10_file, 'r+') as f:
pattern = re.compile(r'/usr/bin/python')
replaced = pattern.sub(r'python', f.read())
f.seek(0)
f.truncate()
f.write(replaced)
def _build_catkin_packages(self):
# Nothing to do if no packages were specified
if not self.catkin_packages:
return
catkincmd = ['catkin_make_isolated']
# Install the package
catkincmd.append('--install')
# Specify the packages to be built
catkincmd.append('--pkg')
catkincmd.extend(self.catkin_packages)
# Don't clutter the real ROS workspace-- use the Snapcraft build
# directory
catkincmd.extend(['--directory', self.builddir])
# Account for a non-default source space by always specifying it
catkincmd.extend(['--source-space', os.path.join(
self.builddir, self.options.source_space)])
# Specify that the package should be installed along with the rest of
# the ROS distro.
catkincmd.extend(['--install-space', self.rosdir])
# All the arguments that follow are meant for CMake
catkincmd.append('--cmake-args')
# Make sure we're using the compilers included in this .snap
catkincmd.extend([
'-DCMAKE_C_FLAGS="$CFLAGS"',
'-DCMAKE_CXX_FLAGS="$CPPFLAGS -I{} -I{}"'.format(
os.path.join(self.installdir, 'usr', 'include', 'c++',
self.gcc_version),
os.path.join(self.installdir, 'usr', 'include',
common.get_arch_triplet(), 'c++',
self.gcc_version)),
'-DCMAKE_LD_FLAGS="$LDFLAGS"',
'-DCMAKE_C_COMPILER={}'.format(
os.path.join(self.installdir, 'usr', 'bin', 'gcc')),
'-DCMAKE_CXX_COMPILER={}'.format(
os.path.join(self.installdir, 'usr', 'bin', 'g++'))
])
# This command must run in bash due to a bug in Catkin that causes it
# to explode if there are spaces in the cmake args (which there are).
# This has been fixed in Catkin Tools... perhaps we should be using
# that instead.
self._run_in_bash(catkincmd)
def _find_system_dependencies(catkin_packages, ros_distro, ros_package_path,
rosdep_path, ubuntu_sources):
"""Find system dependencies for a given set of Catkin packages."""
rosdep = _Rosdep(ros_distro, ros_package_path, rosdep_path, ubuntu_sources)
rosdep.setup()
system_dependencies = {}
logger.info('Determining system dependencies for Catkin packages...')
for package in catkin_packages:
# Query rosdep for the list of dependencies for this package
dependencies = rosdep.get_dependencies(package)
for dependency in dependencies:
# No need to resolve this dependency if we know it's local, or if
# we've already resolved it into a system dependency
if (dependency in catkin_packages or
dependency in system_dependencies):
continue
# In this situation, the package depends on something that we
# weren't instructed to build. It's probably a system dependency,
# but the developer could have also forgotten to tell us to build
# it.
system_dependency = rosdep.resolve_dependency(dependency)
if not system_dependency:
raise RuntimeError(
'Package "{}" isn\'t a valid system dependency. '
'Did you forget to add it to catkin-packages? If '
'not, add the Ubuntu package containing it to '
'stage-packages until you can get it into the '
'rosdep database.'.format(dependency))
system_dependencies[dependency] = system_dependency
# TODO: Not sure why this isn't pulled in by roscpp. Can it
# be compiled by clang, etc.? If so, perhaps this should be
# left up to the developer.
if dependency == 'roscpp':
system_dependencies['g++'] = 'g++'
# Finally, return a list of all system dependencies
return list(system_dependencies.values())
class _Rosdep:
def __init__(self, ros_distro, ros_package_path, rosdep_path,
ubuntu_sources):
self._ros_distro = ros_distro
self._ros_package_path = ros_package_path
self._ubuntu_sources = ubuntu_sources
self._rosdep_path = rosdep_path
self._rosdep_install_path = os.path.join(self._rosdep_path, 'install')
self._rosdep_sources_path = os.path.join(self._rosdep_path,
'sources.list.d')
self._rosdep_cache_path = os.path.join(self._rosdep_path, 'cache')
def setup(self):
# Make sure we can run multiple times without error, while leaving the
# capability to re-initialize, by making sure we clear the sources.
if os.path.exists(self._rosdep_sources_path):
shutil.rmtree(self._rosdep_sources_path)
os.makedirs(self._rosdep_sources_path)
os.makedirs(self._rosdep_install_path, exist_ok=True)
os.makedirs(self._rosdep_cache_path, exist_ok=True)
# rosdep isn't necessarily a dependency of the project, and we don't
# want to bloat the .snap more than necessary. So we'll unpack it
# somewhere else, and use it from there.
logger.info('Preparing to fetch rosdep...')
ubuntu = repo.Ubuntu(self._rosdep_path, sources=self._ubuntu_sources)
logger.info('Fetching rosdep...')
ubuntu.get(['python-rosdep'])
logger.info('Installing rosdep...')
ubuntu.unpack(self._rosdep_install_path)
logger.info('Initializing rosdep database...')
try:
self._run(['init'])
except subprocess.CalledProcessError as e:
output = e.output.decode('utf8').strip()
raise RuntimeError(
'Error initializing rosdep database:\n{}'.format(output))
logger.info('Updating rosdep database...')
try:
self._run(['update'])
except subprocess.CalledProcessError as e:
output = e.output.decode('utf8').strip()
raise RuntimeError(
'Error updating rosdep database:\n{}'.format(output))
def get_dependencies(self, package_name):
try:
output = self._run(['keys', package_name]).strip()
if output:
return output.split('\n')
else:
return []
except subprocess.CalledProcessError:
raise FileNotFoundError(
'Unable to find Catkin package "{}"'.format(package_name))
def resolve_dependency(self, dependency_name):
try:
# rosdep needs three pieces of information here:
#
# 1) The dependency we're trying to lookup.
# 2) The rosdistro being used.
# 3) The version of Ubuntu being used. We're currently using only
# the Trusty ROS sources, so we're telling rosdep to resolve
# dependencies using Trusty (even if we're running on something
# else).
output = self._run(['resolve', dependency_name, '--rosdistro',
self._ros_distro, '--os', 'ubuntu:trusty'])
except subprocess.CalledProcessError:
return None
# `rosdep resolve` returns output like:
# #apt
# ros-indigo-package
#
# We're obviously only interested in the second line.
resolved = output.split('\n')
if len(resolved) < 2:
raise RuntimeError(
'Unexpected rosdep resolve output:\n{}'.format(output))
return resolved[1]
def _run(self, arguments):
env = os.environ.copy()
# We want to make sure we use our own rosdep (which is python)
env['PATH'] = os.path.join(self._rosdep_install_path, 'usr', 'bin')
env['PYTHONPATH'] = os.path.join(self._rosdep_install_path, 'usr',
'lib', 'python2.7', 'dist-packages')
# By default, rosdep uses /etc/ros/rosdep to hold its sources list. We
# don't want that here since we don't want to touch the host machine
# (not to mention it would require sudo), so we can redirect it via
# this environment variable
env['ROSDEP_SOURCE_PATH'] = self._rosdep_sources_path
# By default, rosdep saves its cache in $HOME/.ros, which we shouldn't
# access here, so we'll redirect it with this environment variable.
env['ROS_HOME'] = self._rosdep_cache_path
# This environment variable tells rosdep which directory to recursively
# search for packages.
env['ROS_PACKAGE_PATH'] = self._ros_package_path
return subprocess.check_output(['rosdep'] + arguments,
env=env).decode('utf8').strip()
| gpl-3.0 |
kgblll/libresoft-gymkhana | social/layers/InternalLayer.py | 2 | 7302 | #!/usr/bin/env python
# Copyright (C) 2009-2010 GSyC/LibreSoft, Universidad Rey Juan Carlos
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# Author : Jose Gato Luis <jgato@libresoft.es>
#
from django.contrib.gis.geos import Point
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.measure import D
from GIC.Channels.GenericChannel import *
from social.core.config import ALLOWED_SEARCH
from social.core.utils import get_person
from social.core.api_user import get_data as user_get_data
from social.core.api_layer import get_data as layer_get_data
import string
class InternalLayer (GenericChannel):
MANDATORY_FIELDS = ["latitude", "longitude", "radius", "category"]
CATEGORIES = [{"id" : "0", "name" : "all", "desc" : "All supported models in LibreGeoSocial"},
{"id" : "1", "name" : "photo", "desc" : "LibreGeoSocial GeoPhotoS"},
{"id" : "2", "name" : "note", "desc" : "LibreGeoSocial GeoNotes"},
{"id" : "3", "name" : "sound", "desc": "LibreGeoSocial GeoSounds"},
{"id" : "4", "name" : "video", "desc": "LibreGeoSocial GeoVideos"}]
def __init__ (self, layer_id):
self.options = {}
self.layer_id = layer_id
def get_info(self):
layer_data = layer_get_data(self.layer_id)
if layer_data == None:
"""
Maybe a virtual layer
"""
virtual_layer = user_get_data(self.layer_id)
if virtual_layer != None:
return "Virtual Layer: no description"
else:
return "No data"
else:
return layer_data["description"]
def get_categories(self):
return self.CATEGORIES
pass
def set_mandatory_fields(self, dict):
for field in self.MANDATORY_FIELDS:
if not field in dict:
return (False, field)
else:
self.options[field] = dict[field]
return (True, "")
def set_options(self, options):
success, result = self.set_mandatory_fields(options)
if not success:
return False, "\"%s\" parameter is mandatory " % (result)
self.options["user"] = options["user"]
return True, ""
def _category_model(self):
selected_category = self.options["category"]
result = []
for category in self.CATEGORIES:
if selected_category == category["id"]:
if category["id"] == "0":
result = ALLOWED_SEARCH
else:
result = [category["name"]]
break;
return result
def _serialize_results(self, results):
"""
After a searh we get an dictionary with arrays of models
we will serialize it in a simple array of nodes
"""
node_list = []
for model in results:
for node in results[model]:
node_list.append(node)
return node_list
def _do_search (self, type, fields, terms, layer_id, exact=False):
"""
Finds the objects in type whose fields include terms
@param type: the model type
@param fields: the fields that will be looked
@param terms: the search terms
@param layer_id: id of the layer to make the search
@param exact: terms must be exact
"""
from social.core.models import LayerNodes, Person
from django.contrib.gis.db.models.query import GeoQuerySet
try:
"""
First of all we need to know if layer_id it is a real layer
or virtual (user) layer
"""
virtual_layer = False
user_layer = user_get_data(layer_id)
if user_layer != None:
virtual_layer = True
result = type.objects.none()
terms = string.split(terms)
for f in fields:
if virtual_layer:
try:
#print "virtual layer - ", type
r = type.objects.filter(uploader = layer_id)
except: #some types are not allowed in virtual layers, for example persons and return no results
return result
else:
nodes_layer = LayerNodes.objects.get_nodes_for_layer(layer_id)
r = type.objects.filter(id__in = nodes_layer)
#if isinstance(node, Person):
# print "blblbl"
for term in terms:
if exact:
r = r.__and__(type.objects.filter(**{"%s" % f: term}))
else:
r = r.__and__(type.objects.filter(**{"%s__icontains" % f: term}))
result = result.__or__(r)
return result
except Exception, err:
print err
return None
def _do_multi_search (self, types_fields, terms, layer_id, viewer=None, exact=False):
"""
This receives an array of dictionaries with types and terms
"""
results = {}
for tf in types_fields:
if viewer:
try:
model_search = tf["type"].objects.allowed(viewer.id).__and__(self._do_search(tf["type"], tf["fields"], terms, layer_id, exact))
except:
return False
else:
model_search = self._do_search(tf["type"], tf["fields"], terms, exact)
results[tf["type"]._meta.verbose_name] = model_search
return results
def search(self, user, longitude, latitude, models, terms, layer_id, dist=0):
"""
Uses the search application and returns the results
@param user: The user that makes the request, important for privacy
@param longitude: longitude point to search around
@param latitude: latitude point to search around
@param models_fields: A list with the models
@param terms: the search terms
@param layer_id: id of the layer to make the search
@param dist: the maximum distance from (longitude,latitude) point, if 0
all matching nodes will be returned and longitude and latitude are ignored.
"""
try:
v = get_person(user)
except:
return []
m_f=[]
for model in models:
if model in ALLOWED_SEARCH:
try:
model_type = ContentType.objects.get(name=model, app_label="core").model_class()
m_f += [{"type": model_type, "fields": model_type.get_search_fields}]
except Exception, err:
print err
pass
results= self._do_multi_search(m_f, terms, layer_id, v )
if results == False:
return []
#Now filter by distance if requested
if float(dist) > 0:
point = Point(float(longitude), float(latitude), srid=4326)
for model in models:
from django.db.models.query import EmptyQuerySet
if not isinstance(results[model], EmptyQuerySet):
results[model]=results[model].distance(point).order_by("distance"
).filter(position__distance_lte=(point, D(km=float(dist))))
else:
results[model] = []
"""
Serialize models to an unidimensional array, in order to extract the limits
and allow to create slides of information
"""
node_list = self._serialize_results(results)
return node_list
def process(self):
models = self._category_model()
if len(models) > 0:
results = self.search(self.options["user"], self.options["longitude"], self.options["latitude"],
models, self.search_pattern, self.layer_id, self.options["radius"])
return True, results
else:
return False, "Category not supported"
| gpl-2.0 |
BayanGroup/sentry | tests/sentry/api/endpoints/test_team_details.py | 1 | 3125 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from mock import patch
from sentry.models import (
OrganizationMemberType, Team, TeamStatus
)
from sentry.testutils import APITestCase
class TeamDetailsTest(APITestCase):
def test_simple(self):
team = self.team # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-team-details', kwargs={
'organization_slug': team.organization.slug,
'team_slug': team.slug,
})
response = self.client.get(url)
assert response.status_code == 200
assert response.data['id'] == str(team.id)
class TeamUpdateTest(APITestCase):
def test_simple(self):
team = self.team # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-team-details', kwargs={
'organization_slug': team.organization.slug,
'team_slug': team.slug,
})
resp = self.client.put(url, data={
'name': 'hello world',
'slug': 'foobar',
})
assert resp.status_code == 200, resp.content
team = Team.objects.get(id=team.id)
assert team.name == 'hello world'
assert team.slug == 'foobar'
class TeamDeleteTest(APITestCase):
@patch('sentry.api.endpoints.team_details.delete_team')
def test_as_admin(self, delete_team):
org = self.create_organization()
team = self.create_team(organization=org)
project = self.create_project(team=team) # NOQA
user = self.create_user(email='foo@example.com', is_superuser=False)
org.member_set.create(
user=user,
has_global_access=True,
type=OrganizationMemberType.ADMIN,
)
self.login_as(user)
url = reverse('sentry-api-0-team-details', kwargs={
'organization_slug': team.organization.slug,
'team_slug': team.slug,
})
with self.settings(SENTRY_PROJECT=0):
response = self.client.delete(url)
team = Team.objects.get(id=team.id)
assert response.status_code == 204, response.data
assert team.status == TeamStatus.PENDING_DELETION
delete_team.delay.assert_called_once_with(
object_id=team.id,
countdown=3600,
)
def test_as_member(self):
org = self.create_organization(owner=self.user)
team = self.create_team(organization=org)
project = self.create_project(team=team) # NOQA
user = self.create_user(email='foo@example.com', is_superuser=False)
team.organization.member_set.create_or_update(
organization=org,
user=user,
values={
'type': OrganizationMemberType.MEMBER,
}
)
self.login_as(user=user)
url = reverse('sentry-api-0-team-details', kwargs={
'organization_slug': team.organization.slug,
'team_slug': team.slug,
})
response = self.client.delete(url)
assert response.status_code == 403
| bsd-3-clause |
nwchandler/ansible | lib/ansible/modules/storage/zfs/zpool_facts.py | 69 | 6653 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zpool_facts
short_description: Gather facts about ZFS pools.
description:
- Gather facts from ZFS pool properties.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS pool name.
aliases: [ "pool", "zpool" ]
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zpool(1M) man page.
aliases: [ "props" ]
default: all
required: false
'''
EXAMPLES = '''
# Gather facts about ZFS pool rpool
zpool_facts: pool=rpool
# Gather space usage about all imported ZFS pools
zpool_facts: properties='free,size'
debug: msg='ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
with_items: '{{ ansible_zfs_pools }}'
'''
RETURN = '''
name:
description: ZFS pool name
returned: always
type: string
sample: rpool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: boolean
sample: True
zfs_pools:
description: ZFS pool facts
returned: always
type: string
sample:
{
"allocated": "3.46G",
"altroot": "-",
"autoexpand": "off",
"autoreplace": "off",
"bootfs": "rpool/ROOT/openindiana",
"cachefile": "-",
"capacity": "6%",
"comment": "-",
"dedupditto": "0",
"dedupratio": "1.00x",
"delegation": "on",
"expandsize": "-",
"failmode": "wait",
"feature@async_destroy": "enabled",
"feature@bookmarks": "enabled",
"feature@edonr": "enabled",
"feature@embedded_data": "active",
"feature@empty_bpobj": "active",
"feature@enabled_txg": "active",
"feature@extensible_dataset": "enabled",
"feature@filesystem_limits": "enabled",
"feature@hole_birth": "active",
"feature@large_blocks": "enabled",
"feature@lz4_compress": "active",
"feature@multi_vdev_crash_dump": "enabled",
"feature@sha512": "enabled",
"feature@skein": "enabled",
"feature@spacemap_histogram": "active",
"fragmentation": "3%",
"free": "46.3G",
"freeing": "0",
"guid": "15729052870819522408",
"health": "ONLINE",
"leaked": "0",
"listsnapshots": "off",
"name": "rpool",
"readonly": "off",
"size": "49.8G",
"version": "-"
}
'''
import os
from collections import defaultdict
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
class ZPoolFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self._pools = defaultdict(dict)
self.facts = []
def pool_exists(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
if self.name:
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
pool, property, value = line.split('\t')
self._pools[pool].update({property: value})
for k, v in iteritems(self._pools):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_pools': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
),
supports_check_mode=True
)
zpool_facts = ZPoolFacts(module)
result = {}
result['changed'] = False
result['name'] = zpool_facts.name
if zpool_facts.parsable:
result['parsable'] = zpool_facts.parsable
if zpool_facts.name is not None:
if zpool_facts.pool_exists():
result['ansible_facts'] = zpool_facts.get_facts()
else:
module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
else:
result['ansible_facts'] = zpool_facts.get_facts()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ntt-sic/heat | heat/tests/test_clients.py | 1 | 2473 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.engine import clients
from heat.tests.common import HeatTestCase
from heatclient import client as heatclient
class ClientsTest(HeatTestCase):
def test_clients_chosen_at_module_initilization(self):
self.assertFalse(hasattr(clients.Clients, 'nova'))
self.assertTrue(hasattr(clients.Clients('fakecontext'), 'nova'))
def test_clients_get_heat_url(self):
con = mock.Mock()
con.tenant_id = "b363706f891f48019483f8bd6503c54b"
obj = clients.Clients(con)
obj._get_client_option = mock.Mock()
obj._get_client_option.return_value = None
self.assertEqual(None, obj._get_heat_url())
heat_url = "http://0.0.0.0:8004/v1/%(tenant_id)s"
obj._get_client_option.return_value = heat_url
tenant_id = "b363706f891f48019483f8bd6503c54b"
result = heat_url % {"tenant_id": tenant_id}
self.assertEqual(result, obj._get_heat_url())
obj._get_client_option.return_value = result
self.assertEqual(result, obj._get_heat_url())
@mock.patch.object(heatclient, 'Client')
def test_clients_heat(self, mock_call):
con = mock.Mock()
con.auth_url = "http://auth.example.com:5000/v2.0"
con.tenant_id = "b363706f891f48019483f8bd6503c54b"
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
obj = clients.Clients(con)
obj._get_heat_url = mock.Mock(name="_get_heat_url")
obj._get_heat_url.return_value = None
obj.url_for = mock.Mock(name="url_for")
obj.url_for.return_value = "url_from_keystone"
obj.heat()
self.assertEqual('url_from_keystone', mock_call.call_args[0][1])
obj._get_heat_url.return_value = "url_from_config"
obj._heat = None
obj.heat()
self.assertEqual('url_from_config', mock_call.call_args[0][1])
| apache-2.0 |
irfancharania/plugin.video.paktvforum | resources/lib/sites/desironak.py | 1 | 4266 | from resources.lib.abc_base import BaseForum
from BeautifulSoup import BeautifulSoup
import resources.lib.util as util
import HTMLParser
import resources.lib.structure as s
import resources.lib.hosts as hosts
class DesiRonakApi(BaseForum):
short_name = 'desironak'
long_name = 'Desi Ronak Forum'
local_thumb = 'thumb_desironak.png'
base_url = 'http://www.desironak.com/forums/'
sub_id_regex = '\?(\d+)\-'
section_url_template = 'forumdisplay.php?'
thread_url_template = 'showthread.php?'
mobile_style = '&styleid=24'
###############################################
category_drama = s.Category('Browse Pakistani Dramas', [
s.Channel('30', 'Geo', 'geo.png'),
s.Channel('29', 'Ary Digital', 'ary.png'),
s.Channel('31', 'Hum TV', 'hum.png'),
s.Channel('460', 'PTV Home', 'ptv.png'),
s.Channel('1182', 'Urdu 1', 'urdu1.png'),
s.Channel('1328', 'Geo Kahani', 'geoKahani.png'),
s.Channel('277', 'A Plus', 'aplus.png'),
s.Channel('578', 'TV One', 'tv1.png'),
s.Channel('779', 'Express Entertainment',
'expressEntertainment.png'),
s.Channel('229', 'ARY Musik', 'aryMusik.png'),
s.Channel('563', 'ATV', 'atv.png'),
s.Channel('246', 'Aag TV', 'aag.png'),
])
category_morning = s.Category('Browse Morning/Cooking Shows', [
s.Channel('454', 'Morning Shows', 'morning.png'),
s.Channel('33', 'Cooking Shows', 'cooking.png'),
])
category_telefilms = s.Category(
'Browse Stage Dramas/Telefilms/Special Events', [
s.Channel('235', 'Family Stage Dramas'),
s.Channel('62', 'Telefilms'),
s.Channel('88', 'Events'),
])
category_news = s.Category('Browse Current Affairs Talk Shows', [
s.Channel('355', 'Geo News', 'geoNews.png'),
s.Channel('400', 'Express News', 'expressNews.png'),
s.Channel('250', 'Dunya News', 'dunya.png'),
s.Channel('394', 'AAJ News', 'aaj.png'),
s.Channel('424', 'Dawn News', 'dawn.png'),
s.Channel('389', 'Ary News', 'aryNews.png'),
s.Channel('1005', 'One News', 'newsone.jpg'),
s.Channel('405', 'Samaa News', 'samaa.png'),
])
categories = {
'drama': category_drama,
'morning': category_morning,
'news': category_news,
'telefilms': category_telefilms,
}
###############################################
frames = [
{'label': 'Today\'s Dramas',
'url': 'http://www.desironak.com/forums/cmps_index.php?pageid=dramas',
'moduleid': 'module17',
'containstype': s.ThreadType().Episode},
{'label': 'Today\'s Talk Shows',
'url':
'http://www.desironak.com/forums/cmps_index.php?pageid=talkshows',
'moduleid': 'module16',
'containstype': s.ThreadType().Episode},
]
###############################################
match_string = {
'youtube.php': (hosts.youtube, 'id='),
'dailymotion.php': (hosts.dailymotion, 'id='),
'tnpk.php': (hosts.tunepk, 'url='),
'ushare.php': (hosts.youtube, 'id='),
'dshare.php': (hosts.dailymotion, 'id='),
'tnpkshare.php': (hosts.tunepk, 'url='),
}
###############################################
def get_frame_menu(self):
return self.frames
def browse_frame(self, frameid, url):
data = util.get_remote_data(url)
soup = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
frameid = int(frameid)
moduleid = self.frames[frameid]['moduleid']
containstype = self.frames[frameid]['containstype']
items = []
linklist = soup.find('div', id=moduleid).findAll('a')
for l in linklist:
tagline = HTMLParser.HTMLParser().unescape(l.text)
link = l['href']
tid = self.get_sub_id(link)
if tid:
link = self.base_url + self.thread_url_template + tid
items.append({
'label': tagline,
'url': link,
'pk': tid
})
sorted_items = sorted(items, key=lambda item: item['label'])
return sorted_items, containstype
| mit |
uni2u/neutron | neutron/plugins/embrane/common/constants.py | 12 | 2733 | # Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heleosapi import exceptions as h_exc
from neutron.plugins.common import constants
# Router specific constants
UTIF_LIMIT = 7
QUEUE_TIMEOUT = 300
class Status:
# Transient
CREATING = constants.PENDING_CREATE
UPDATING = constants.PENDING_UPDATE
DELETING = constants.PENDING_DELETE
# Final
ACTIVE = constants.ACTIVE
ERROR = constants.ERROR
READY = constants.INACTIVE
DELETED = "DELETED" # not visible
class Events:
CREATE_ROUTER = "create_router"
UPDATE_ROUTER = "update_router"
DELETE_ROUTER = "delete_router"
GROW_ROUTER_IF = "grow_router_if"
SHRINK_ROUTER_IF = "shrink_router_if"
SET_NAT_RULE = "set_nat_rule"
RESET_NAT_RULE = "reset_nat_rule"
_DVA_PENDING_ERROR_MSG = _("Dva is pending for the following reason: %s")
_DVA_NOT_FOUNT_ERROR_MSG = _("Dva can't be found to execute the operation, "
"probably was cancelled through the heleos UI")
_DVA_BROKEN_ERROR_MSG = _("Dva seems to be broken for reason %s")
_DVA_BROKEN_INTERFACE_ERROR_MSG = _("Dva interface seems to be broken "
"for reason %s")
_DVA_CREATION_FAILED_ERROR_MSG = _("Dva creation failed reason %s")
_DVA_CREATION_PENDING_ERROR_MSG = _("Dva creation is in pending state "
"for reason %s")
_CFG_FAILED_ERROR_MSG = _("Dva configuration failed for reason %s")
_DVA_DEL_FAILED_ERROR_MSG = _("Failed to delete the backend "
"router for reason %s. Please remove "
"it manually through the heleos UI")
error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG,
h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG,
h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG,
h_exc.BrokenInterface: _DVA_BROKEN_INTERFACE_ERROR_MSG,
h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG,
h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG,
h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG,
h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG}
| apache-2.0 |
liosha2007/temporary-groupdocs-python-sdk | examples/api-samples/inc_samples/sample11.py | 1 | 5359 | ### This sample will show how programmatically create and post an annotation into document. How to delete the annotation
# Import of classes from libraries
from pyramid.renderers import render_to_response
from groupdocs.ApiClient import ApiClient
from groupdocs.AntApi import AntApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
# Set variables and get POST data
def sample11(request):
clientId = request.POST.get('client_id')
privateKey = request.POST.get('private_key')
fileId = request.POST.get('fileId')
annotationType = request.POST.get('annotation_type')
# Checking required parameters
if IsNotNull(clientId) == False or IsNotNull(privateKey) == False or IsNotNull(fileId) == False or IsNotNull(annotationType) == False:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : 'You do not enter all parameters' })
### Create Signer, ApiClient and Annotation Api objects
# Create signer object
signer = GroupDocsRequestSigner(privateKey)
# Create apiClient object
apiClient = ApiClient(signer)
# Create Annotation object
ant = AntApi(apiClient)
# Delete annotation if Delete Button clicked
if request.POST.get('delete_annotation') == "1":
try:
ant.DeleteAnnotation(clientId, request.POST.get('annotationId'))
except Exception, e:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : str(e) })
# Required parameters
allParams = ['box_x', 'box_y', 'text']
# Added required parameters depends on annotation type ['text' or 'area']
if annotationType == "text":
allParams = allParams + ['box_width', 'box_height', 'annotationPosition_x', 'annotationPosition_y', 'range_position', 'range_length']
elif annotationType == "area":
allParams = allParams + ['box_width', 'box_height']
# Checking required parameters
for param in allParams:
needParam = request.POST.get(param)
if IsNotNull(needParam) == False:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : 'You do not enter all parameters' })
types = {'text' : "0", "area" : "1", "point" : "2"}
# construct requestBody
requestBody = {
"type": types[request.POST.get('annotation_type')],
"replies": [ { "text": request.POST.get('text') } ],
}
# construct requestBody depends on annotation type
# text annotation
if annotationType == "text":
requestBody = dict(requestBody.items() + {
"box": {
"x" : request.POST.get('box_x'),
"y" : request.POST.get('box_y'),
"width" : request.POST.get('box_width'),
"height" : request.POST.get('box_height')
},
"textRange":{
"position" : request.POST.get('range_position'),
"length" : request.POST.get('range_length')
},
"annotationPosition": {
"x" : request.POST.get('annotationPosition_x'),
"y" : request.POST.get('annotationPosition_y')
},
}.items())
# area annotation
elif annotationType == "area":
requestBody = dict(requestBody.items() + {
"box": {
"x" : request.POST.get('box_x'),
"y" : request.POST.get('box_y'),
"width" : request.POST.get('box_width'),
"height" : request.POST.get('box_height')
},
"annotationPosition": {
"x" : "0",
"y" : "0"
},
}.items())
# point annotation
elif annotationType == "point":
requestBody = dict(requestBody.items() + {
"box": {
"x" : request.POST.get('box_x'),
"y" : request.POST.get('box_y'),
"width" : "0",
"height" : "0"
},
"annotationPosition": {
"x" : "0",
"y" : "0"
},
}.items())
try:
# Make a request to Annotation API using clientId, fileId and requestBody
response = ant.CreateAnnotation(clientId, fileId, requestBody)
if response.status == "Ok":
if response.result:
iframe = '<iframe src="https://apps.groupdocs.com//document-annotation2/embed/' + response.result.documentGuid + '" frameborder="0" width="720" height="600"></iframe>'
except Exception, e:
return render_to_response('__main__:templates/sample11.pt',
{ 'error' : str(e) })
# If request was successfull - set variables for template
return render_to_response('__main__:templates/sample11.pt',
{ 'userId' : clientId,
'privateKey' : privateKey,
'fileId' : fileId,
'annotationType' : annotationType,
'annotationText' : request.POST.get('text'),
'annotationId' : response.result.annotationGuid,
'iframe' : iframe,
'status' : response.status
},
request=request) | apache-2.0 |
newrocknj/horizon | openstack_dashboard/dashboards/project/data_processing/jobs/workflows/create.py | 13 | 6682 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.forms import fields
from horizon import workflows
from openstack_dashboard.dashboards.project.data_processing \
.utils import helpers
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
JOB_BINARY_CREATE_URL = ("horizon:project:data_processing.job_binaries"
":create-job-binary")
class AdditionalLibsAction(workflows.Action):
lib_binaries = forms.DynamicChoiceField(
label=_("Choose libraries"),
required=False,
add_item_link=JOB_BINARY_CREATE_URL)
lib_ids = forms.CharField(
required=False,
widget=forms.HiddenInput())
def populate_lib_binaries_choices(self, request, context):
job_binaries = saharaclient.job_binary_list(request)
choices = [(job_binary.id, job_binary.name)
for job_binary in job_binaries]
choices.insert(0, ('', _("-- not selected --")))
return choices
class Meta(object):
name = _("Libs")
help_text_template = (
"project/data_processing.jobs/_create_job_libs_help.html")
class GeneralConfigAction(workflows.Action):
job_name = forms.CharField(label=_("Name"))
job_type = forms.ChoiceField(label=_("Job Type"),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'jobtype'
}))
main_binary = forms.DynamicChoiceField(
label=_("Choose a main binary"),
required=False,
help_text=_("Choose the binary which "
"should be used in this Job."),
add_item_link=JOB_BINARY_CREATE_URL,
widget=fields.DynamicSelectWidget(
attrs={
'class': 'switched',
'data-switch-on': 'jobtype',
'data-jobtype-pig': _("Choose a main binary"),
'data-jobtype-hive': _("Choose a main binary"),
'data-jobtype-spark': _("Choose a main binary"),
'data-jobtype-mapreduce.streaming': _("Choose a main binary")
}))
job_description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
def __init__(self, request, context, *args, **kwargs):
super(GeneralConfigAction,
self).__init__(request, context, *args, **kwargs)
if request.REQUEST.get("guide_job_type"):
self.fields["job_type"].initial = (
request.REQUEST.get("guide_job_type").lower())
def populate_job_type_choices(self, request, context):
choices = []
choices_list = saharaclient.job_types_list(request)
for choice in choices_list:
job_type = choice.name.lower()
if job_type in helpers.JOB_TYPE_MAP:
choices.append((job_type, helpers.JOB_TYPE_MAP[job_type][0]))
return choices
def populate_main_binary_choices(self, request, context):
job_binaries = saharaclient.job_binary_list(request)
choices = [(job_binary.id, job_binary.name)
for job_binary in job_binaries]
choices.insert(0, ('', _("-- not selected --")))
return choices
def clean(self):
cleaned_data = super(workflows.Action, self).clean()
job_type = cleaned_data.get("job_type", "")
if job_type in ["Java", "MapReduce"]:
cleaned_data['main_binary'] = None
return cleaned_data
class Meta(object):
name = _("Create Job Template")
help_text_template = (
"project/data_processing.jobs/_create_job_help.html")
class GeneralConfig(workflows.Step):
action_class = GeneralConfigAction
contributes = ("job_name", "job_type", "job_description", "main_binary")
def contribute(self, data, context):
for k, v in data.items():
if k == "job_type":
context[k] = helpers.JOB_TYPE_MAP[v][1]
else:
context[k] = v
return context
class ConfigureLibs(workflows.Step):
action_class = AdditionalLibsAction
template_name = "project/data_processing.jobs/library_template.html"
def contribute(self, data, context):
chosen_libs = json.loads(data.get("lib_ids", '[]'))
for k in xrange(len(chosen_libs)):
context["lib_" + str(k)] = chosen_libs[k]
return context
class CreateJob(workflows.Workflow):
slug = "create_job"
name = _("Create Job Template")
finalize_button_name = _("Create")
success_message = _("Job created")
failure_message = _("Could not create job template")
success_url = "horizon:project:data_processing.jobs:index"
default_steps = (GeneralConfig, ConfigureLibs)
def handle(self, request, context):
main_locations = []
lib_locations = []
for k in context.keys():
if k.startswith('lib_'):
lib_locations.append(context.get(k))
if context.get("main_binary", None):
main_locations.append(context["main_binary"])
try:
job = saharaclient.job_create(
request,
context["job_name"],
context["job_type"],
main_locations,
lib_locations,
context["job_description"])
hlps = helpers.Helpers(request)
if hlps.is_from_guide():
request.session["guide_job_id"] = job.id
request.session["guide_job_type"] = context["job_type"]
request.session["guide_job_name"] = context["job_name"]
self.success_url = (
"horizon:project:data_processing.wizard:jobex_guide")
return True
except Exception:
exceptions.handle(request)
return False
| apache-2.0 |
underyx/ansible | v2/ansible/module_utils/rax.py | 32 | 10155 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from uuid import UUID
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_image(module, rax_module, image):
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
module.fail_json(msg='No matching image found (%s)' %
image)
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception, e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='str', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
verify_ssl=dict(choices=BOOLEANS, type='bool'),
)
def rax_required_together():
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('verify_ssl')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception, e:
module.fail_json(msg='%s' % e.message)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
| gpl-3.0 |
Krozark/Kraggne | Kraggne/forms.py | 1 | 4491 | from django import forms
from django.forms import ValidationError
from django.core.validators import URLValidator
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
import re
from Kraggne.utils import MakePattern, clean_url
from Kraggne.models import MenuItem, FormBlock
class MenuItemForm(forms.ModelForm):
class Meta:
model = MenuItem
def clean_view(self):
link = self.cleaned_data['view'] or ''
# It could be a fully-qualified URL -- try that first b/c reverse()
# chokes on "http://"
if any([link.startswith(s) for s in ('http://', 'https://')]):
URLValidator()(link)
self.url = link
return self.cleaned_data['view']
auto = self.cleaned_data['cms_page']
if not auto:
if link:
link,self.url = clean_url(link,include=True,detail=True)
return link
raise ValidationError(_('Please supply a valid URL or URL name.'))
else: #auto
if link:
link,self.url = clean_url(link,include=True,detail=True,lis=True,existe=False)
return link
#try:
# re.compile(link)
# self.url = link
# return link
#except:
# raise forms.ValidationError(_("%s is not a valide Regex." % link))
#elif link[0] != "/":
# self.url = "/"+link
#else:
# self.url = link
#return link
parent = self.cleaned_data['parent']
if parent:
p_url = parent.url
if '#' in p_url:
p_url = p_url[:p_url.find('#')]
if p_url == "/":
self.url = "/"+self.cleaned_data['slug']
elif p_url[-1] != "/":
self.url = p_url +"/"+self.cleaned_data['slug']
else:
self.url = p_url +self.cleaned_data['slug']
else:
self.url = "/"+self.cleaned_data['slug']
return ''
def clean(self):
super(MenuItemForm, self).clean()
#if 'is_visible' in self.cleaned_data and \
# self.cleaned_data['is_visible'] and \
# 'view' in self.cleaned_data and \
# self.cleaned_data['view'].startswith('^'):
# raise forms.ValidationError(_('Menu items with regular expression URLs must be disabled.'))
return self.cleaned_data
def save(self, commit=True):
item = super(MenuItemForm, self).save(commit=False)
item.view = self.cleaned_data['view']
item.url = self.url
if item.view:
if not any([item.view.startswith(s) for s in ('http://', 'https://', 'include(','detail(','list(')]):
if re.search('[^\d/\w\-:_#? ]',item.view):
item.is_visible = False
##try to register the new url
#if hasattr(Kraggne_urls,'urlpatterns'):
# urls = getattr(Kraggne_urls,'urlpatterns')
# urls += MakePattern(item)
if commit:
item.save()
return item
class FormBlockForm(forms.ModelForm):
class Meta:
model = FormBlock
def clean_view(self):
view = self.cleaned_data['view']
self.url = view
if view:
view,self.url = clean_url(view)
if view and view[-1] != "/":
self.url+="/"
return view
def clean_form(self):
form = self.cleaned_data['form']
try:
point = form.rfind('.')
if point != -1:
app = form[:point]
klass = form[point+1:]
f= __import__(app,globals(),locals(),[klass,])
f=getattr(f,klass)
else:
f=__import__(form)
try:
f.is_valid
except :#TypeError:
raise forms.ValidationError(_("%s is not a form" % form))
except :#ImportError:
raise forms.ValidationError(_("%s could not be found" % form))
return form
def save_m2m(self):
pass
def save(self,commit=True):
form = super(FormBlockForm,self).save()
form.view = self.cleaned_data['view']
form.form = self.cleaned_data['form']
form.url = self.url
if commit:
form.save(commit=True)
return form
| bsd-2-clause |
D4wN/brickv | src/brickv/plugin_system/plugins/red/program_page_files.py | 1 | 6157 | # -*- coding: utf-8 -*-
"""
RED Plugin
Copyright (C) 2014-2015 Matthias Bolte <matthias@tinkerforge.com>
program_page_files.py: Program Wizard Files Page
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt4.QtCore import Qt, QDir
from PyQt4.QtGui import QIcon, QListWidgetItem, QApplication
from brickv.plugin_system.plugins.red.program_page import ProgramPage
from brickv.plugin_system.plugins.red.program_utils import *
from brickv.plugin_system.plugins.red.ui_program_page_files import Ui_ProgramPageFiles
from brickv.utils import get_main_window, get_home_path, get_open_file_names, \
get_existing_directory
from brickv.load_pixmap import load_pixmap
import os
import posixpath
import sys
class ProgramPageFiles(ProgramPage, Ui_ProgramPageFiles):
def __init__(self, title_prefix='', last_directory=None):
ProgramPage.__init__(self)
self.setupUi(self)
self.edit_mode = False
self.folder_icon = QIcon(load_pixmap('folder-icon.png'))
self.file_icon = QIcon(load_pixmap('file-icon.png'))
if last_directory != None:
self.last_directory = last_directory
else:
self.last_directory = get_home_path()
self.setTitle(title_prefix + 'Files')
self.list_files.itemSelectionChanged.connect(self.update_ui_state)
self.button_add_files.clicked.connect(self.show_add_files_dialog)
self.button_add_directory.clicked.connect(self.show_add_directory_dialog)
self.button_remove_selected_files.clicked.connect(self.remove_selected_files)
# overrides QWizardPage.initializePage
def initializePage(self):
self.set_formatted_sub_title(u'Specify the files to be uploaded for the {language} program [{name}].')
self.list_files.clear()
self.update_ui_state()
# if a program exists then this page is used in an edit wizard
if self.wizard().program != None:
self.edit_mode = True
# overrides QWizardPage.isComplete
def isComplete(self):
if self.edit_mode:
return self.list_files.count() > 0 and ProgramPage.isComplete(self)
return ProgramPage.isComplete(self)
# overrides ProgramPage.update_ui_state
def update_ui_state(self):
self.button_remove_selected_files.setEnabled(len(self.list_files.selectedItems()) > 0)
def show_add_files_dialog(self):
filenames = get_open_file_names(get_main_window(), 'Add Files', self.last_directory)
if len(filenames) > 0:
self.last_directory = os.path.split(filenames[0])[0]
for filename in filenames:
if len(self.list_files.findItems(filename, Qt.MatchFixedString)) > 0:
continue
uploads = [Upload(filename, os.path.split(filename)[1])]
item = QListWidgetItem(filename)
item.setData(Qt.UserRole, uploads)
item.setData(Qt.DecorationRole, self.file_icon)
self.list_files.addItem(item)
self.completeChanged.emit()
def show_add_directory_dialog(self):
directory = get_existing_directory(get_main_window(), 'Add Directory', self.last_directory)
if len(directory) == 0:
return
self.last_directory = directory
if len(self.list_files.findItems(os.path.join(directory, '*'), Qt.MatchFixedString)) > 0:
return
uploads = []
progress = ExpandingProgressDialog(self)
progress.set_progress_text_visible(False)
progress.setWindowTitle('New Program')
progress.setLabelText(u"Collecting content of {0}".format(directory))
progress.setModal(True)
progress.setRange(0, 0)
progress.show()
for root, directories, files in os.walk(directory):
if progress.wasCanceled():
break
for filename in files:
source = os.path.join(root, filename)
target = QDir.fromNativeSeparators(os.path.relpath(source, directory))
uploads.append(Upload(source, target))
# ensure that the UI stays responsive
QApplication.processEvents()
if progress.wasCanceled():
break
if progress.wasCanceled():
return
progress.cancel()
# FIXME: maybe add a warning if the directory contains very many files or large amounts of data
item = QListWidgetItem(os.path.join(directory, '*'))
item.setData(Qt.UserRole, uploads)
item.setData(Qt.DecorationRole, self.folder_icon)
self.list_files.addItem(item)
self.completeChanged.emit()
def remove_selected_files(self):
for item in self.list_files.selectedItems():
self.list_files.takeItem(self.list_files.row(item))
self.completeChanged.emit()
def get_items(self):
items = []
for row in range(self.list_files.count()):
items.append(self.list_files.item(row).text())
return items
def get_directories(self):
directories = set()
for upload in self.get_uploads():
directory = os.path.split(upload.target)[0]
if len(directory) > 0:
directories.add(directory)
return sorted(list(directories))
def get_uploads(self):
uploads = []
for row in range(self.list_files.count()):
uploads += self.list_files.item(row).data(Qt.UserRole)
return uploads
| gpl-2.0 |
googleapis/python-aiplatform | scripts/readme-gen/readme_gen.py | 122 | 1722 | #!/usr/bin/env python
# Copyright 2016 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates READMEs using configuration defined in yaml."""
import argparse
import io
import os
import subprocess
import jinja2
import yaml
jinja_env = jinja2.Environment(
trim_blocks=True,
loader=jinja2.FileSystemLoader(
os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates'))))
README_TMPL = jinja_env.get_template('README.tmpl.rst')
def get_help(file):
return subprocess.check_output(['python', file, '--help']).decode()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('--destination', default='README.rst')
args = parser.parse_args()
source = os.path.abspath(args.source)
root = os.path.dirname(source)
destination = os.path.join(root, args.destination)
jinja_env.globals['get_help'] = get_help
with io.open(source, 'r') as f:
config = yaml.load(f)
# This allows get_help to execute in the right directory.
os.chdir(root)
output = README_TMPL.render(config)
with io.open(destination, 'w') as f:
f.write(output)
if __name__ == '__main__':
main()
| apache-2.0 |
OsirisSPS/osiris-sps | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/lib2to3/pgen2/grammar.py | 119 | 5373 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def copy(self):
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
"tokens", "symbol2label"):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print "s2n"
pprint(self.symbol2number)
print "n2s"
pprint(self.number2symbol)
print "states"
pprint(self.states)
print "dfas"
pprint(self.dfas)
print "labels"
pprint(self.labels)
print "start", self.start
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
| gpl-3.0 |
chenjun0210/tensorflow | tensorflow/python/kernel_tests/stack_ops_test.py | 64 | 5580 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stack_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class StackOpTest(test.TestCase):
def _testStackPushPop(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
self.assertAllClose([[4.0, 5.0]], c1.eval())
def testStackPushPop(self):
self._testStackPushPop(use_gpu=False)
self._testStackPushPop(use_gpu=True)
def _testStackPushPopSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
a = np.arange(2000)
x = constant_op.constant(a, dtype=dtypes.float32)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, x, swap_memory=True)
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
self.assertAllClose(a, c1.eval())
def testStackPushPopSwap(self):
self._testStackPushPopSwap(use_gpu=False)
self._testStackPushPopSwap(use_gpu=True)
def _testStackWhileSwap(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
def c(x):
return math_ops.less(x, 10)
def b(x):
with ops.control_dependencies([x]):
a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
with ops.control_dependencies([v]):
return math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)
def c1(x, y):
return math_ops.greater(x, 0)
def b1(x, y):
nx = math_ops.subtract(x, 1)
ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
return [nx, ny]
rx, ry = control_flow_ops.while_loop(
c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
def testStackWhileSwap(self):
self._testStackWhileSwap(use_gpu=False)
self._testStackWhileSwap(use_gpu=True)
def _testMultiStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
with ops.control_dependencies([c1]):
c1 = gen_data_flow_ops._stack_pop(h1, dtypes.float32)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
with ops.control_dependencies([c2]):
c2 = gen_data_flow_ops._stack_pop(h2, dtypes.float32)
r = c1 + c2
self.assertAllClose(9.0, r.eval())
def testMultiStack(self):
self._testMultiStack(use_gpu=False)
self._testMultiStack(use_gpu=True)
def _testSameNameStacks(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_push(h1, 4.0)
h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c2 = gen_data_flow_ops._stack_push(h2, 5.0)
r = c1 + c2
self.assertNotEqual(h1.eval()[1], h2.eval()[1])
def testSameNameStacks(self):
self._testSameNameStacks(use_gpu=False)
self._testSameNameStacks(use_gpu=True)
def _testCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
def testCloseStack(self):
self._testCloseStack(use_gpu=False)
self._testCloseStack(use_gpu=True)
def _testPushCloseStack(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
with ops.control_dependencies([c]):
c1 = gen_data_flow_ops._stack_close(h)
sess.run(c1)
def testPushCloseStack(self):
self._testPushCloseStack(use_gpu=False)
self._testPushCloseStack(use_gpu=True)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ltilve/ChromiumGStreamerBackend | build/android/pylib/base/base_test_result_unittest.py | 134 | 2817 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for TestRunResults."""
import unittest
from pylib.base.base_test_result import BaseTestResult
from pylib.base.base_test_result import TestRunResults
from pylib.base.base_test_result import ResultType
class TestTestRunResults(unittest.TestCase):
def setUp(self):
self.p1 = BaseTestResult('p1', ResultType.PASS, log='pass1')
other_p1 = BaseTestResult('p1', ResultType.PASS)
self.p2 = BaseTestResult('p2', ResultType.PASS)
self.f1 = BaseTestResult('f1', ResultType.FAIL, log='failure1')
self.c1 = BaseTestResult('c1', ResultType.CRASH, log='crash1')
self.u1 = BaseTestResult('u1', ResultType.UNKNOWN)
self.tr = TestRunResults()
self.tr.AddResult(self.p1)
self.tr.AddResult(other_p1)
self.tr.AddResult(self.p2)
self.tr.AddResults(set([self.f1, self.c1, self.u1]))
def testGetAll(self):
self.assertFalse(
self.tr.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1]))
def testGetPass(self):
self.assertFalse(self.tr.GetPass().symmetric_difference(
[self.p1, self.p2]))
def testGetNotPass(self):
self.assertFalse(self.tr.GetNotPass().symmetric_difference(
[self.f1, self.c1, self.u1]))
def testGetAddTestRunResults(self):
tr2 = TestRunResults()
other_p1 = BaseTestResult('p1', ResultType.PASS)
f2 = BaseTestResult('f2', ResultType.FAIL)
tr2.AddResult(other_p1)
tr2.AddResult(f2)
tr2.AddTestRunResults(self.tr)
self.assertFalse(
tr2.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1, f2]))
def testGetLogs(self):
log_print = ('[FAIL] f1:\n'
'failure1\n'
'[CRASH] c1:\n'
'crash1')
self.assertEqual(self.tr.GetLogs(), log_print)
def testGetShortForm(self):
short_print = ('ALL: 5 PASS: 2 FAIL: 1 '
'CRASH: 1 TIMEOUT: 0 UNKNOWN: 1 ')
self.assertEqual(self.tr.GetShortForm(), short_print)
def testGetGtestForm(self):
gtest_print = ('[==========] 5 tests ran.\n'
'[ PASSED ] 2 tests.\n'
'[ FAILED ] 3 tests, listed below:\n'
'[ FAILED ] f1\n'
'[ FAILED ] c1 (CRASHED)\n'
'[ FAILED ] u1 (UNKNOWN)\n'
'\n'
'3 FAILED TESTS')
self.assertEqual(gtest_print, self.tr.GetGtestForm())
def testRunPassed(self):
self.assertFalse(self.tr.DidRunPass())
tr2 = TestRunResults()
self.assertTrue(tr2.DidRunPass())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
dsajkl/123 | common/test/acceptance/tests/studio/test_studio_acid_xblock.py | 25 | 6923 | """
Acceptance tests for Studio related to the acid xblock.
"""
from unittest import skip
from bok_choy.web_app_test import WebAppTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.xblock.acid import AcidView
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class XBlockAcidBase(WebAppTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': 'course_' + self.unique_id[:5],
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_id = '{org}.{number}.{run}'.format(**self.course_info)
self.setup_fixtures()
self.auth_page = AutoAuthPage(
self.browser,
staff=False,
username=self.user.get('username'),
email=self.user.get('email'),
password=self.user.get('password')
)
self.auth_page.visit()
def validate_acid_block_preview(self, acid_block):
"""
Validate the Acid Block's preview
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
def test_acid_block_editor(self):
"""
Verify that all expected acid block tests pass in studio editor
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].edit().editor_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidParentBase(XBlockAcidBase):
"""
Base class for tests that verify that parent XBlock integration is working correctly
"""
__test__ = False
def validate_acid_block_preview(self, acid_block):
super(XBlockAcidParentBase, self).validate_acid_block_preview(acid_block)
self.assertTrue(acid_block.child_tests_passed)
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
container = unit.xblocks[0].go_to_container()
acid_block = AcidView(self.browser, container.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
class XBlockAcidEmptyParentTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
)
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidChildTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
self.user = course_fix.user
def test_acid_block_preview(self):
super(XBlockAcidChildTest, self).test_acid_block_preview()
def test_acid_block_editor(self):
super(XBlockAcidChildTest, self).test_acid_block_editor()
| agpl-3.0 |
evidation-health/bokeh | examples/plotting/file/tap.py | 43 | 1258 | from __future__ import division
import itertools
import numpy as np
from six.moves import zip
from bokeh.plotting import ColumnDataSource, figure, show, output_file
from bokeh.models import TapTool
xx, yy = np.meshgrid(range(0,101,4), range(0,101,4))
x = xx.flatten()
y = yy.flatten()
N = len(x)
inds = [str(i) for i in np.arange(N)]
radii = np.random.random(size=N)*0.4 + 1.7
colors = [
"#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*x), np.floor(30+2*y))
]
foo = list(itertools.permutations("abcdef"))[:N]
bar = np.random.normal(size=N)
source = ColumnDataSource(
data=dict(
x=x,
y=y,
radius=radii,
colors=colors,
foo=foo,
bar=bar,
)
)
output_file("tap.html")
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,tap,previewsave"
p = figure(title="Tappy Scatter", tools=TOOLS)
p.circle(x, y, radius=radii, source=source,
fill_color=colors, fill_alpha=0.6,
line_color=None, name="mystuff")
p.text(x, y, text=inds, alpha=0.5, text_font_size="5pt",
text_baseline="middle", text_align="center")
# in the broswer console, you will see messages when circles are clicked
tool = p.select(dict(type=TapTool))[0]
tool.names.append("mystuff")
show(p) # open a browser
| bsd-3-clause |
agx/git-buildpackage | gbp/rpm/policy.py | 1 | 8077 | # vim: set fileencoding=utf-8 :
#
# (C) 2012 Intel Corporation <markus.lehtonen@linux.intel.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
"""Default packaging policy for RPM"""
import re
from gbp.pkg import PkgPolicy, Archive
from gbp.scripts.common.pq import parse_gbp_commands
class RpmPkgPolicy(PkgPolicy):
"""Packaging policy for RPM"""
# Special rpmlib python module for GBP (only)
python_rpmlib_module_name = "rpm"
alnum = 'a-zA-Z0-9'
# Valid characters for RPM pkg name
name_whitelist_chars = r'._+%{}\-'
# Valid characters for RPM pkg version
version_whitelist_chars = r'._+%{}~'
# Regexp for checking the validity of package name
packagename_re = re.compile("^[%s][%s%s]+$" %
(alnum, alnum, name_whitelist_chars))
packagename_msg = ("Package names must be at least two characters long, "
"start with an alphanumeric and can only contain "
"alphanumerics or characters in %s" %
list(name_whitelist_chars))
# Regexp for checking the validity of package (upstream) version
upstreamversion_re = re.compile("^[0-9][%s%s]*$" %
(alnum, version_whitelist_chars))
upstreamversion_msg = ("Upstream version numbers must start with a digit "
"and can only containg alphanumerics or characters "
"in %s" % list(version_whitelist_chars))
@classmethod
def is_valid_orig_archive(cls, filename):
"""
Is this a valid orig source archive
@param filename: upstream source archive filename
@type filename: C{str}
@return: true if valid upstream source archive filename
@rtype: C{bool}
>>> RpmPkgPolicy.is_valid_orig_archive("foo/bar_baz.tar.gz")
True
>>> RpmPkgPolicy.is_valid_orig_archive("foo.bar.tar")
True
>>> RpmPkgPolicy.is_valid_orig_archive("foo.bar")
False
>>> RpmPkgPolicy.is_valid_orig_archive("foo.gz")
False
"""
_base, arch_fmt, _compression = Archive.parse_filename(filename)
if arch_fmt:
return True
return False
class Changelog(object):
"""Container for changelog related policy settings"""
# Regexps for splitting/parsing the changelog section (of
# Tizen / Fedora style changelogs)
section_match_re = r'^\*'
section_split_re = r'^\*\s*(?P<ch_header>\S.*?)$\n(?P<ch_body>.*)'
header_split_re = r'(?P<ch_time>\S.*\s[0-9]{4})\s+(?P<ch_name>\S.*$)'
header_name_split_re = r'(?P<name>[^<]*)\s+<(?P<email>[^>]+)>((\s*-)?\s+(?P<revision>\S+))?$'
body_name_re = r'\[(?P<name>.*)\]'
# Changelog header format (when writing out changelog)
header_format = "* %(time)s %(name)s <%(email)s> %(revision)s"
header_time_format = "%a %b %d %Y"
header_rev_format = "%(version)s"
class ChangelogEntryFormatter(object):
"""Helper class for generating changelog entries from git commits"""
# Maximum length for a changelog entry line
max_entry_line_length = 76
# Bug tracking system related meta tags recognized from git commit msg
bts_meta_tags = ("Close", "Closes", "Fixes", "Fix")
# Regexp for matching bug tracking system ids (e.g. "bgo#123")
bug_id_re = r'[A-Za-z0-9#_\-]+'
@classmethod
def _parse_bts_tags(cls, lines, meta_tags):
"""
Parse and filter out bug tracking system related meta tags from
commit message.
@param lines: commit message
@type lines: C{list} of C{str}
@param meta_tags: meta tags to look for
@type meta_tags: C{tuple} of C{str}
@return: bts-ids per meta tag and the non-mathced lines
@rtype: (C{dict}, C{list} of C{str})
"""
tags = {}
other_lines = []
bts_re = re.compile(r'^(?P<tag>%s):\s*(?P<ids>.*)' %
('|'.join(meta_tags)), re.I)
bug_id_re = re.compile(cls.bug_id_re)
for line in lines:
match = bts_re.match(line)
if match:
tag = match.group('tag')
ids_str = match.group('ids')
bug_ids = [bug_id.strip() for bug_id in
bug_id_re.findall(ids_str)]
if tag in tags:
tags[tag] += bug_ids
else:
tags[tag] = bug_ids
else:
other_lines.append(line)
return (tags, other_lines)
@classmethod
def _extra_filter(cls, lines, ignore_re):
"""
Filter out specific lines from the commit message.
@param lines: commit message
@type lines: C{list} of C{str}
@param ignore_re: regexp for matching ignored lines
@type ignore_re: C{str}
@return: filtered commit message
@rtype: C{list} of C{str}
"""
if ignore_re:
match = re.compile(ignore_re)
return [line for line in lines if not match.match(line)]
else:
return lines
@classmethod
def compose(cls, commit_info, **kwargs):
"""
Generate a changelog entry from a git commit.
@param commit_info: info about the commit
@type commit_info: C{commit_info} object from
L{gbp.git.repository.GitRepository.get_commit_info()}.
@param kwargs: additional arguments to the compose() method,
currently we recognize 'full', 'id_len' and 'ignore_re'
@type kwargs: C{dict}
@return: formatted changelog entry
@rtype: C{list} of C{str}
"""
# Parse and filter out gbp command meta-tags
cmds, body = parse_gbp_commands(commit_info, 'gbp-rpm-ch',
('ignore', 'short', 'full'), ())
body = body.splitlines()
if 'ignore' in cmds:
return None
# Parse and filter out bts-related meta-tags
bts_tags, body = cls._parse_bts_tags(body, cls.bts_meta_tags)
# Additional filtering
body = cls._extra_filter(body, kwargs['ignore_re'])
# Generate changelog entry
subject = commit_info['subject']
commitid = commit_info['id']
if kwargs['id_len']:
text = ["- [%s] %s" % (commitid[0:kwargs['id_len']], subject)]
else:
text = ["- %s" % subject]
# Add all non-filtered-out lines from commit message, unless 'short'
if (kwargs['full'] or 'full' in cmds) and 'short' not in cmds:
# Add all non-blank body lines.
text.extend([" " + line for line in body if line.strip()])
# Add bts tags and ids in the end
for tag, ids in bts_tags.items():
bts_msg = " (%s: %s)" % (tag, ', '.join(ids))
if len(text[-1]) + len(bts_msg) >= cls.max_entry_line_length:
text.append(" ")
text[-1] += bts_msg
return text
| gpl-2.0 |
gkonstantyno/construct | tests/test_lib.py | 1 | 1840 | import unittest
from construct.lib.binary import int_to_bin, bin_to_int, swap_bytes, encode_bin, decode_bin
from construct.lib.expr import Path
class TestBinary(unittest.TestCase):
def test_int_to_bin(self):
self.assertEqual(int_to_bin(19, 5), b"\x01\x00\x00\x01\x01")
self.assertEqual(int_to_bin(19, 8), b'\x00\x00\x00\x01\x00\x00\x01\x01')
self.assertEqual(int_to_bin(19, 3), b'\x00\x01\x01')
def test_int_to_bin_signed(self):
self.assertEqual(int_to_bin(-13, 5), b"\x01\x00\x00\x01\x01")
self.assertEqual(int_to_bin(-13, 8), b"\x01\x01\x01\x01\x00\x00\x01\x01")
def test_bin_to_int(self):
self.assertEqual(bin_to_int(b"\x01\x00\x00\x01\x01"), 19)
self.assertEqual(bin_to_int(b"10011"), 19)
def test_bin_to_int_signed(self):
self.assertEqual(bin_to_int(b"\x01\x00\x00\x01\x01", True), -13)
self.assertEqual(bin_to_int(b"10011", True), -13)
def test_swap_bytes(self):
self.assertEqual(swap_bytes(b"aaaabbbbcccc", 4), b"ccccbbbbaaaa")
self.assertEqual(swap_bytes(b"abcdefgh", 2), b"ghefcdab")
self.assertEqual(swap_bytes(b"00011011", 2), b"11100100")
def test_encode_bin(self):
self.assertEqual(encode_bin(b"ab"),
b"\x00\x01\x01\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x01\x00")
def test_decode_bin(self):
self.assertEqual(decode_bin(
b"\x00\x01\x01\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x01\x00"),
b"ab")
def test_decode_bin_length(self):
self.assertRaises(ValueError, decode_bin, b"\x00")
class TestExpr(unittest.TestCase):
def test(self):
path = Path("path")
x = ~((path.foo * 2 + 3 << 2) % 11)
self.assertEqual(x, 'not ((((this.foo * 2) + 3) >> 2) % 11)')
self.assertFalse(x(dict(foo=7)))
| mit |
bootchk/pensool | source/gui/manager/drop.py | 1 | 3475 | '''
Coordinates drag and drop, which crosses controls.
!!! Note that all event coords are in the device coord system.
TODO not a class, just a singleton module
'''
'''
Copyright 2010, 2011 Lloyd Konneker
This file is part of Pensool.
Pensool is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
'''
import base.vector as vector
from decorators import *
class DropManager(object):
'''
Manages drag and drop within the app.
(Doesn't manage drag and drop between apps.)
Similar to Carbon Drop Manager.
Source: the control where drag started
Target: the control where drag ended (dropped)
Controlee: the drawable being dragged.
Enforces: only one drag and drop operation at a time.
For now, each object must implement drop() method.
TODO query each object for acceptance of drop.
'''
def __init__(self):
self.start_point = None
self.current_point = None
self.source = None # which morph drag started from
self.source_control = None # which control on the source
self.draggee = None # which morp is being dragged
# source not necessarily equal draggee, but often does.
# For example, when dragging out a new morph from source morph
@dump_event
def begin(self, event, controlee, control):
'''
Enter dragging state.
Remember event and source controlee and control.
!!! Event already in user coords.
'''
assert(controlee is not None)
assert(self.source is None) # Not begin before
self.start_point = vector.Vector(event.x, event.y)
self.current_point = self.start_point.copy()
self.source = controlee
self.draggee = controlee # Defaults to same as source
self.source_control = control
#@dump_event
def continued(self, event, target):
'''
Some control received mouse motion while is_drag.
Tell source (EG to ghost its action.)
'''
self.source_control.continue_drag(event,
self._get_offset(event),
self._get_increment(event))
self.current_point = vector.Vector(event.x, event.y)
@dump_event
def end(self, target, event):
'''
On mouse button release after motion (that's the definition of drag.)
Tell the target object that source object dropped on it.
'''
if self.source is None:
raise RuntimeError("Drag end without source")
'''
Tell the target:
what was dropped (source)
where (event)
why (source_control)
how far (offset)
'''
target.drop(self.source, event, self._get_offset(event), self.source_control)
self.__init__()
@dump_event
def cancel(self):
'''
Cancel drag.
??? When would this happen
'''
self.source = None
def is_drag(self):
return self.source is not None
def set_draggee(self, draggee):
self.draggee = draggee
def get_draggee(self):
return(self.draggee)
def _get_offset(self, event):
# Calculate offset drag end to drag begin
offset = vector.Vector(event.x, event.y)
offset -= self.start_point
return offset
def _get_increment(self, event):
# Calculate incremental offset previous event to this event
offset = vector.Vector(event.x, event.y)
offset -= self.current_point
return offset
# Singleton
dropmgr = DropManager()
| gpl-3.0 |
jeremiahyan/odoo | odoo/addons/base/models/res_partner.py | 1 | 50395 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import collections
import datetime
import hashlib
import pytz
import threading
import re
import requests
from lxml import etree
from random import randint
from werkzeug import urls
from odoo import api, fields, models, tools, SUPERUSER_ID, _, Command
from odoo.osv.expression import get_unaccent_wrapper
from odoo.exceptions import RedirectWarning, UserError, ValidationError
# Global variables used for the warning fields declared on the res.partner
# in the following modules : sale, purchase, account, stock
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = 'Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.'
ADDRESS_FIELDS = ('street', 'street2', 'zip', 'city', 'state_id', 'country_id')
@api.model
def _lang_get(self):
return self.env['res.lang'].get_installed()
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
_tzs = [(tz, tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
def _tz_get(self):
return _tzs
class FormatAddressMixin(models.AbstractModel):
_name = "format.address.mixin"
_description = 'Address Format'
def _fields_view_get_address(self, arch):
# consider the country of the user, not the country of the partner we want to display
address_view_id = self.env.company.country_id.address_view_id.sudo()
if address_view_id and not self._context.get('no_address_format') and (not address_view_id.model or address_view_id.model == self._name):
#render the partner address accordingly to address_view_id
doc = etree.fromstring(arch)
for address_node in doc.xpath("//div[hasclass('o_address_format')]"):
Partner = self.env['res.partner'].with_context(no_address_format=True)
sub_view = Partner.fields_view_get(
view_id=address_view_id.id, view_type='form', toolbar=False, submenu=False)
sub_view_node = etree.fromstring(sub_view['arch'])
#if the model is different than res.partner, there are chances that the view won't work
#(e.g fields not present on the model). In that case we just return arch
if self._name != 'res.partner':
try:
self.env['ir.ui.view'].postprocess_and_fields(sub_view_node, model=self._name)
except ValueError:
return arch
address_node.getparent().replace(address_node, sub_view_node)
arch = etree.tostring(doc, encoding='unicode')
return arch
class PartnerCategory(models.Model):
_description = 'Partner Tags'
_name = 'res.partner.category'
_order = 'name'
_parent_store = True
def _get_default_color(self):
return randint(1, 11)
name = fields.Char(string='Tag Name', required=True, translate=True)
color = fields.Integer(string='Color Index', default=_get_default_color)
parent_id = fields.Many2one('res.partner.category', string='Parent Category', index=True, ondelete='cascade')
child_ids = fields.One2many('res.partner.category', 'parent_id', string='Child Tags')
active = fields.Boolean(default=True, help="The active field allows you to hide the category without removing it.")
parent_path = fields.Char(index=True)
partner_ids = fields.Many2many('res.partner', column1='category_id', column2='partner_id', string='Partners')
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('You can not create recursive tags.'))
def name_get(self):
""" Return the categories' display name, including their direct
parent by default.
If ``context['partner_category_display']`` is ``'short'``, the short
version of the category name (without the direct parent) is used.
The default is the long version.
"""
if self._context.get('partner_category_display') == 'short':
return super(PartnerCategory, self).name_get()
res = []
for category in self:
names = []
current = category
while current:
names.append(current.name)
current = current.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
args = [('name', operator, name)] + args
return self._search(args, limit=limit, access_rights_uid=name_get_uid)
class PartnerTitle(models.Model):
_name = 'res.partner.title'
_order = 'name'
_description = 'Partner Title'
name = fields.Char(string='Title', required=True, translate=True)
shortcut = fields.Char(string='Abbreviation', translate=True)
class Partner(models.Model):
_description = 'Contact'
_inherit = ['format.address.mixin', 'avatar.mixin']
_name = "res.partner"
_order = "display_name"
def _default_category(self):
return self.env['res.partner.category'].browse(self._context.get('category_id'))
@api.model
def default_get(self, default_fields):
"""Add the company of the parent as default if we are creating a child partner.
Also take the parent lang by default if any, otherwise, fallback to default DB lang."""
values = super().default_get(default_fields)
parent = self.env["res.partner"]
if 'parent_id' in default_fields and values.get('parent_id'):
parent = self.browse(values.get('parent_id'))
values['company_id'] = parent.company_id.id
if 'lang' in default_fields:
values['lang'] = values.get('lang') or parent.lang or self.env.lang
return values
name = fields.Char(index=True)
display_name = fields.Char(compute='_compute_display_name', recursive=True, store=True, index=True)
date = fields.Date(index=True)
title = fields.Many2one('res.partner.title')
parent_id = fields.Many2one('res.partner', string='Related Company', index=True)
parent_name = fields.Char(related='parent_id.name', readonly=True, string='Parent name')
child_ids = fields.One2many('res.partner', 'parent_id', string='Contact', domain=[('active', '=', True)]) # force "active_test" domain to bypass _search() override
ref = fields.Char(string='Reference', index=True)
lang = fields.Selection(_lang_get, string='Language',
help="All the emails and documents sent to this contact will be translated in this language.")
active_lang_count = fields.Integer(compute='_compute_active_lang_count')
tz = fields.Selection(_tz_get, string='Timezone', default=lambda self: self._context.get('tz'),
help="When printing documents and exporting/importing data, time values are computed according to this timezone.\n"
"If the timezone is not set, UTC (Coordinated Universal Time) is used.\n"
"Anywhere else, time values are computed according to the time offset of your web client.")
tz_offset = fields.Char(compute='_compute_tz_offset', string='Timezone offset', invisible=True)
user_id = fields.Many2one('res.users', string='Salesperson',
help='The internal user in charge of this contact.')
vat = fields.Char(string='Tax ID', index=True, help="The Tax Identification Number. Complete it if the contact is subjected to government taxes. Used in some legal statements.")
same_vat_partner_id = fields.Many2one('res.partner', string='Partner with same Tax ID', compute='_compute_same_vat_partner_id', store=False)
bank_ids = fields.One2many('res.partner.bank', 'partner_id', string='Banks')
website = fields.Char('Website Link')
comment = fields.Html(string='Notes')
category_id = fields.Many2many('res.partner.category', column1='partner_id',
column2='category_id', string='Tags', default=_default_category)
credit_limit = fields.Float(string='Credit Limit')
active = fields.Boolean(default=True)
employee = fields.Boolean(help="Check this box if this contact is an Employee.")
function = fields.Char(string='Job Position')
type = fields.Selection(
[('contact', 'Contact'),
('invoice', 'Invoice Address'),
('delivery', 'Delivery Address'),
('other', 'Other Address'),
("private", "Private Address"),
], string='Address Type',
default='contact',
help="Invoice & Delivery addresses are used in sales orders. Private addresses are only visible by authorized users.")
# address fields
street = fields.Char()
street2 = fields.Char()
zip = fields.Char(change_default=True)
city = fields.Char()
state_id = fields.Many2one("res.country.state", string='State', ondelete='restrict', domain="[('country_id', '=?', country_id)]")
country_id = fields.Many2one('res.country', string='Country', ondelete='restrict')
country_code = fields.Char(related='country_id.code', string="Country Code")
partner_latitude = fields.Float(string='Geo Latitude', digits=(10, 7))
partner_longitude = fields.Float(string='Geo Longitude', digits=(10, 7))
email = fields.Char()
email_formatted = fields.Char(
'Formatted Email', compute='_compute_email_formatted',
help='Format email address "Name <email@domain>"')
phone = fields.Char()
mobile = fields.Char()
is_company = fields.Boolean(string='Is a Company', default=False,
help="Check if the contact is a company, otherwise it is a person")
industry_id = fields.Many2one('res.partner.industry', 'Industry')
# company_type is only an interface field, do not use it in business logic
company_type = fields.Selection(string='Company Type',
selection=[('person', 'Individual'), ('company', 'Company')],
compute='_compute_company_type', inverse='_write_company_type')
company_id = fields.Many2one('res.company', 'Company', index=True)
color = fields.Integer(string='Color Index', default=0)
user_ids = fields.One2many('res.users', 'partner_id', string='Users', auto_join=True)
partner_share = fields.Boolean(
'Share Partner', compute='_compute_partner_share', store=True,
help="Either customer (not a user), either shared user. Indicated the current partner is a customer without "
"access or with a limited access created for sharing data.")
contact_address = fields.Char(compute='_compute_contact_address', string='Complete Address')
# technical field used for managing commercial fields
commercial_partner_id = fields.Many2one('res.partner', string='Commercial Entity',
compute='_compute_commercial_partner', recursive=True,
store=True, index=True)
commercial_company_name = fields.Char('Company Name Entity', compute='_compute_commercial_company_name',
store=True)
company_name = fields.Char('Company Name')
barcode = fields.Char(help="Use a barcode to identify this contact.", copy=False, company_dependent=True)
# hack to allow using plain browse record in qweb views, and used in ir.qweb.field.contact
self = fields.Many2one(comodel_name=_name, compute='_compute_get_ids')
_sql_constraints = [
('check_name', "CHECK( (type='contact' AND name IS NOT NULL) or (type!='contact') )", 'Contacts require a name'),
]
@api.depends('name', 'user_ids.share', 'image_1920', 'is_company')
def _compute_avatar_1920(self):
super()._compute_avatar_1920()
@api.depends('name', 'user_ids.share', 'image_1024', 'is_company')
def _compute_avatar_1024(self):
super()._compute_avatar_1024()
@api.depends('name', 'user_ids.share', 'image_512', 'is_company')
def _compute_avatar_512(self):
super()._compute_avatar_512()
@api.depends('name', 'user_ids.share', 'image_256', 'is_company')
def _compute_avatar_256(self):
super()._compute_avatar_256()
@api.depends('name', 'user_ids.share', 'image_128', 'is_company')
def _compute_avatar_128(self):
super()._compute_avatar_128()
def _compute_avatar(self, avatar_field, image_field):
partners_with_internal_user = self.filtered(lambda partner: partner.user_ids - partner.user_ids.filtered('share'))
super(Partner, partners_with_internal_user)._compute_avatar(avatar_field, image_field)
for partner in self - partners_with_internal_user:
partner[avatar_field] = partner[image_field] or partner._avatar_get_placeholder()
def _avatar_get_placeholder(self):
path = "base/static/img/avatar_grey.png"
if self.is_company:
path = "base/static/img/company_image.png"
return base64.b64encode(tools.file_open(path, 'rb').read())
@api.depends('is_company', 'name', 'parent_id.display_name', 'type', 'company_name')
def _compute_display_name(self):
diff = dict(show_address=None, show_address_only=None, show_email=None, html_format=None, show_vat=None)
names = dict(self.with_context(**diff).name_get())
for partner in self:
partner.display_name = names.get(partner.id)
@api.depends('lang')
def _compute_active_lang_count(self):
lang_count = len(self.env['res.lang'].get_installed())
for partner in self:
partner.active_lang_count = lang_count
@api.depends('tz')
def _compute_tz_offset(self):
for partner in self:
partner.tz_offset = datetime.datetime.now(pytz.timezone(partner.tz or 'GMT')).strftime('%z')
@api.depends('user_ids.share', 'user_ids.active')
def _compute_partner_share(self):
super_partner = self.env['res.users'].browse(SUPERUSER_ID).partner_id
if super_partner in self:
super_partner.partner_share = False
for partner in self - super_partner:
partner.partner_share = not partner.user_ids or not any(not user.share for user in partner.user_ids)
@api.depends('vat', 'company_id')
def _compute_same_vat_partner_id(self):
for partner in self:
# use _origin to deal with onchange()
partner_id = partner._origin.id
#active_test = False because if a partner has been deactivated you still want to raise the error,
#so that you can reactivate it instead of creating a new one, which would loose its history.
Partner = self.with_context(active_test=False).sudo()
domain = [
('vat', '=', partner.vat),
('company_id', 'in', [False, partner.company_id.id]),
]
if partner_id:
domain += [('id', '!=', partner_id), '!', ('id', 'child_of', partner_id)]
partner.same_vat_partner_id = bool(partner.vat) and not partner.parent_id and Partner.search(domain, limit=1)
@api.depends(lambda self: self._display_address_depends())
def _compute_contact_address(self):
for partner in self:
partner.contact_address = partner._display_address()
def _compute_get_ids(self):
for partner in self:
partner.self = partner.id
@api.depends('is_company', 'parent_id.commercial_partner_id')
def _compute_commercial_partner(self):
for partner in self:
if partner.is_company or not partner.parent_id:
partner.commercial_partner_id = partner
else:
partner.commercial_partner_id = partner.parent_id.commercial_partner_id
@api.depends('company_name', 'parent_id.is_company', 'commercial_partner_id.name')
def _compute_commercial_company_name(self):
for partner in self:
p = partner.commercial_partner_id
partner.commercial_company_name = p.is_company and p.name or partner.company_name
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
if (not view_id) and (view_type == 'form') and self._context.get('force_email'):
view_id = self.env.ref('base.view_partner_simple_form').id
res = super(Partner, self)._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self._fields_view_get_address(res['arch'])
return res
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('You cannot create recursive Partner hierarchies.'))
def copy(self, default=None):
self.ensure_one()
chosen_name = default.get('name') if default else ''
new_name = chosen_name or _('%s (copy)', self.name)
default = dict(default or {}, name=new_name)
return super(Partner, self).copy(default)
@api.onchange('parent_id')
def onchange_parent_id(self):
# return values in result, as this method is used by _fields_sync()
if not self.parent_id:
return
result = {}
partner = self._origin
if partner.parent_id and partner.parent_id != self.parent_id:
result['warning'] = {
'title': _('Warning'),
'message': _('Changing the company of a contact should only be done if it '
'was never correctly set. If an existing contact starts working for a new '
'company then a new contact should be created under that new '
'company. You can use the "Discard" button to abandon this change.')}
if partner.type == 'contact' or self.type == 'contact':
# for contacts: copy the parent address, if set (aka, at least one
# value is set in the address: otherwise, keep the one from the
# contact)
address_fields = self._address_fields()
if any(self.parent_id[key] for key in address_fields):
def convert(value):
return value.id if isinstance(value, models.BaseModel) else value
result['value'] = {key: convert(self.parent_id[key]) for key in address_fields}
return result
@api.onchange('country_id')
def _onchange_country_id(self):
if self.country_id and self.country_id != self.state_id.country_id:
self.state_id = False
@api.onchange('state_id')
def _onchange_state(self):
if self.state_id.country_id:
self.country_id = self.state_id.country_id
@api.onchange('email')
def onchange_email(self):
if not self.image_1920 and self._context.get('gravatar_image') and self.email:
self.image_1920 = self._get_gravatar_image(self.email)
@api.onchange('parent_id', 'company_id')
def _onchange_company_id(self):
if self.parent_id:
self.company_id = self.parent_id.company_id.id
@api.depends('name', 'email')
def _compute_email_formatted(self):
for partner in self:
if partner.email:
partner.email_formatted = tools.formataddr((partner.name or u"False", partner.email or u"False"))
else:
partner.email_formatted = ''
@api.depends('is_company')
def _compute_company_type(self):
for partner in self:
partner.company_type = 'company' if partner.is_company else 'person'
def _write_company_type(self):
for partner in self:
partner.is_company = partner.company_type == 'company'
@api.onchange('company_type')
def onchange_company_type(self):
self.is_company = (self.company_type == 'company')
@api.constrains('barcode')
def _check_barcode_unicity(self):
if self.env['res.partner'].search_count([('barcode', '=', self.barcode)]) > 1:
raise ValidationError('An other user already has this barcode')
def _update_fields_values(self, fields):
""" Returns dict of write() values for synchronizing ``fields`` """
values = {}
for fname in fields:
field = self._fields[fname]
if field.type == 'many2one':
values[fname] = self[fname].id
elif field.type == 'one2many':
raise AssertionError(_('One2Many fields cannot be synchronized as part of `commercial_fields` or `address fields`'))
elif field.type == 'many2many':
values[fname] = [Command.set(self[fname].ids)]
else:
values[fname] = self[fname]
return values
@api.model
def _address_fields(self):
"""Returns the list of address fields that are synced from the parent."""
return list(ADDRESS_FIELDS)
@api.model
def _formatting_address_fields(self):
"""Returns the list of address fields usable to format addresses."""
return self._address_fields()
def update_address(self, vals):
addr_vals = {key: vals[key] for key in self._address_fields() if key in vals}
if addr_vals:
return super(Partner, self).write(addr_vals)
@api.model
def _commercial_fields(self):
""" Returns the list of fields that are managed by the commercial entity
to which a partner belongs. These fields are meant to be hidden on
partners that aren't `commercial entities` themselves, and will be
delegated to the parent `commercial entity`. The list is meant to be
extended by inheriting classes. """
return ['vat', 'credit_limit']
def _commercial_sync_from_company(self):
""" Handle sync of commercial fields when a new parent commercial entity is set,
as if they were related fields """
commercial_partner = self.commercial_partner_id
if commercial_partner != self:
sync_vals = commercial_partner._update_fields_values(self._commercial_fields())
self.write(sync_vals)
def _commercial_sync_to_children(self):
""" Handle sync of commercial fields to descendants """
commercial_partner = self.commercial_partner_id
sync_vals = commercial_partner._update_fields_values(self._commercial_fields())
sync_children = self.child_ids.filtered(lambda c: not c.is_company)
for child in sync_children:
child._commercial_sync_to_children()
res = sync_children.write(sync_vals)
sync_children._compute_commercial_partner()
return res
def _fields_sync(self, values):
""" Sync commercial fields and address fields from company and to children after create/update,
just as if those were all modeled as fields.related to the parent """
# 1. From UPSTREAM: sync from parent
if values.get('parent_id') or values.get('type') == 'contact':
# 1a. Commercial fields: sync if parent changed
if values.get('parent_id'):
self._commercial_sync_from_company()
# 1b. Address fields: sync if parent or use_parent changed *and* both are now set
if self.parent_id and self.type == 'contact':
onchange_vals = self.onchange_parent_id().get('value', {})
self.update_address(onchange_vals)
# 2. To DOWNSTREAM: sync children
self._children_sync(values)
def _children_sync(self, values):
if not self.child_ids:
return
# 2a. Commercial Fields: sync if commercial entity
if self.commercial_partner_id == self:
commercial_fields = self._commercial_fields()
if any(field in values for field in commercial_fields):
self._commercial_sync_to_children()
for child in self.child_ids.filtered(lambda c: not c.is_company):
if child.commercial_partner_id != self.commercial_partner_id:
self._commercial_sync_to_children()
break
# 2b. Address fields: sync if address changed
address_fields = self._address_fields()
if any(field in values for field in address_fields):
contacts = self.child_ids.filtered(lambda c: c.type == 'contact')
contacts.update_address(values)
def _handle_first_contact_creation(self):
""" On creation of first contact for a company (or root) that has no address, assume contact address
was meant to be company address """
parent = self.parent_id
address_fields = self._address_fields()
if (parent.is_company or not parent.parent_id) and len(parent.child_ids) == 1 and \
any(self[f] for f in address_fields) and not any(parent[f] for f in address_fields):
addr_vals = self._update_fields_values(address_fields)
parent.update_address(addr_vals)
def _clean_website(self, website):
url = urls.url_parse(website)
if not url.scheme:
if not url.netloc:
url = url.replace(netloc=url.path, path='')
website = url.replace(scheme='http').to_url()
return website
def write(self, vals):
if vals.get('active') is False:
# DLE: It should not be necessary to modify this to make work the ORM. The problem was just the recompute
# of partner.user_ids when you create a new user for this partner, see test test_70_archive_internal_partners
# You modified it in a previous commit, see original commit of this:
# https://github.com/odoo/odoo/commit/9d7226371730e73c296bcc68eb1f856f82b0b4ed
#
# RCO: when creating a user for partner, the user is automatically added in partner.user_ids.
# This is wrong if the user is not active, as partner.user_ids only returns active users.
# Hence this temporary hack until the ORM updates inverse fields correctly.
self.invalidate_cache(['user_ids'], self._ids)
users = self.env['res.users'].sudo().search([('partner_id', 'in', self.ids)])
if users:
if self.env['res.users'].sudo(False).check_access_rights('write', raise_exception=False):
error_msg = _('You cannot archive contacts linked to an active user.\n'
'You first need to archive their associated user.\n\n'
'Linked active users : %(names)s', names=", ".join([u.display_name for u in users]))
action_error = users._action_show()
raise RedirectWarning(error_msg, action_error, _('Go to users'))
else:
raise ValidationError(_('You cannot archive contacts linked to an active user.\n'
'Ask an administrator to archive their associated user first.\n\n'
'Linked active users :\n%(names)s', names=", ".join([u.display_name for u in users])))
# res.partner must only allow to set the company_id of a partner if it
# is the same as the company of all users that inherit from this partner
# (this is to allow the code from res_users to write to the partner!) or
# if setting the company_id to False (this is compatible with any user
# company)
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('parent_id'):
vals['company_name'] = False
if 'company_id' in vals:
company_id = vals['company_id']
for partner in self:
if company_id and partner.user_ids:
company = self.env['res.company'].browse(company_id)
companies = set(user.company_id for user in partner.user_ids)
if len(companies) > 1 or company not in companies:
raise UserError(
("The selected company is not compatible with the companies of the related user(s)"))
if partner.child_ids:
partner.child_ids.write({'company_id': company_id})
result = True
# To write in SUPERUSER on field is_company and avoid access rights problems.
if 'is_company' in vals and self.user_has_groups('base.group_partner_manager') and not self.env.su:
result = super(Partner, self.sudo()).write({'is_company': vals.get('is_company')})
del vals['is_company']
result = result and super(Partner, self).write(vals)
for partner in self:
if any(u.has_group('base.group_user') for u in partner.user_ids if u != self.env.user):
self.env['res.users'].check_access_rights('write')
partner._fields_sync(vals)
return result
@api.model_create_multi
def create(self, vals_list):
if self.env.context.get('import_file'):
self._check_import_consistency(vals_list)
for vals in vals_list:
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('parent_id'):
vals['company_name'] = False
partners = super(Partner, self).create(vals_list)
if self.env.context.get('_partners_skip_fields_sync'):
return partners
for partner, vals in zip(partners, vals_list):
partner._fields_sync(vals)
partner._handle_first_contact_creation()
return partners
@api.ondelete(at_uninstall=False)
def _unlink_except_user(self):
users = self.env['res.users'].sudo().search([('partner_id', 'in', self.ids)])
if not users:
return # no linked user, operation is allowed
if self.env['res.users'].sudo(False).check_access_rights('write', raise_exception=False):
error_msg = _('You cannot delete contacts linked to an active user.\n'
'You should rather archive them after archiving their associated user.\n\n'
'Linked active users : %(names)s', names=", ".join([u.display_name for u in users]))
action_error = users._action_show()
raise RedirectWarning(error_msg, action_error, _('Go to users'))
else:
raise ValidationError(_('You cannot delete contacts linked to an active user.\n'
'Ask an administrator to archive their associated user first.\n\n'
'Linked active users :\n%(names)s', names=", ".join([u.display_name for u in users])))
def _load_records_create(self, vals_list):
partners = super(Partner, self.with_context(_partners_skip_fields_sync=True))._load_records_create(vals_list)
# batch up first part of _fields_sync
# group partners by commercial_partner_id (if not self) and parent_id (if type == contact)
groups = collections.defaultdict(list)
for partner, vals in zip(partners, vals_list):
cp_id = None
if vals.get('parent_id') and partner.commercial_partner_id != partner:
cp_id = partner.commercial_partner_id.id
add_id = None
if partner.parent_id and partner.type == 'contact':
add_id = partner.parent_id.id
groups[(cp_id, add_id)].append(partner.id)
for (cp_id, add_id), children in groups.items():
# values from parents (commercial, regular) written to their common children
to_write = {}
# commercial fields from commercial partner
if cp_id:
to_write = self.browse(cp_id)._update_fields_values(self._commercial_fields())
# address fields from parent
if add_id:
parent = self.browse(add_id)
for f in self._address_fields():
v = parent[f]
if v:
to_write[f] = v.id if isinstance(v, models.BaseModel) else v
if to_write:
self.browse(children).write(to_write)
# do the second half of _fields_sync the "normal" way
for partner, vals in zip(partners, vals_list):
partner._children_sync(vals)
partner._handle_first_contact_creation()
return partners
def create_company(self):
self.ensure_one()
if self.company_name:
# Create parent company
values = dict(name=self.company_name, is_company=True, vat=self.vat)
values.update(self._update_fields_values(self._address_fields()))
new_company = self.create(values)
# Set new company as my parent
self.write({
'parent_id': new_company.id,
'child_ids': [Command.update(partner_id, dict(parent_id=new_company.id)) for partner_id in self.child_ids.ids]
})
return True
def open_commercial_entity(self):
""" Utility method used to add an "Open Company" button in partner views """
self.ensure_one()
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'res_id': self.commercial_partner_id.id,
'target': 'current',
'flags': {'form': {'action_buttons': True}}}
def open_parent(self):
""" Utility method used to add an "Open Parent" button in partner views """
self.ensure_one()
address_form_id = self.env.ref('base.view_partner_address_form').id
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'views': [(address_form_id, 'form')],
'res_id': self.parent_id.id,
'target': 'new',
'flags': {'form': {'action_buttons': True}}}
def _get_contact_name(self, partner, name):
return "%s, %s" % (partner.commercial_company_name or partner.sudo().parent_id.name, name)
def _get_name(self):
""" Utility method to allow name_get to be overrided without re-browse the partner """
partner = self
name = partner.name or ''
if partner.company_name or partner.parent_id:
if not name and partner.type in ['invoice', 'delivery', 'other']:
name = dict(self.fields_get(['type'])['type']['selection'])[partner.type]
if not partner.is_company:
name = self._get_contact_name(partner, name)
if self._context.get('show_address_only'):
name = partner._display_address(without_company=True)
if self._context.get('show_address'):
name = name + "\n" + partner._display_address(without_company=True)
name = name.replace('\n\n', '\n')
name = name.replace('\n\n', '\n')
if self._context.get('partner_show_db_id'):
name = "%s (%s)" % (name, partner.id)
if self._context.get('address_inline'):
name = name.replace('\n', ', ')
if self._context.get('show_email') and partner.email:
name = "%s <%s>" % (name, partner.email)
if self._context.get('html_format'):
name = name.replace('\n', '<br/>')
if self._context.get('show_vat') and partner.vat:
name = "%s ‒ %s" % (name, partner.vat)
return name
def name_get(self):
res = []
for partner in self:
name = partner._get_name()
res.append((partner.id, name))
return res
def _parse_partner_name(self, text):
""" Parse partner name (given by text) in order to find a name and an
email. Supported syntax:
* Raoul <raoul@grosbedon.fr>
* "Raoul le Grand" <raoul@grosbedon.fr>
* Raoul raoul@grosbedon.fr (strange fault tolerant support from df40926d2a57c101a3e2d221ecfd08fbb4fea30e)
Otherwise: default, everything is set as the name. Starting from 13.3
returned email will be normalized to have a coherent encoding.
"""
name, email = '', ''
split_results = tools.email_split_tuples(text)
if split_results:
name, email = split_results[0]
if email and not name:
fallback_emails = tools.email_split(text.replace(' ', ','))
if fallback_emails:
email = fallback_emails[0]
name = text[:text.index(email)].replace('"', '').replace('<', '').strip()
if email:
email = tools.email_normalize(email)
else:
name, email = text, ''
return name, email
@api.model
def name_create(self, name):
""" Override of orm's name_create method for partners. The purpose is
to handle some basic formats to create partners using the
name_create.
If only an email address is received and that the regex cannot find
a name, the name will have the email value.
If 'force_email' key in context: must find the email address. """
default_type = self._context.get('default_type')
if default_type and default_type not in self._fields['type'].get_values(self.env):
context = dict(self._context)
context.pop('default_type')
self = self.with_context(context)
name, email = self._parse_partner_name(name)
if self._context.get('force_email') and not email:
raise UserError(_("Couldn't create contact without email address!"))
create_values = {self._rec_name: name or email}
if email: # keep default_email in context
create_values['email'] = email
partner = self.create(create_values)
return partner.name_get()[0]
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
""" Override search() to always show inactive children when searching via ``child_of`` operator. The ORM will
always call search() with a simple domain of the form [('parent_id', 'in', [ids])]. """
# a special ``domain`` is set on the ``child_ids`` o2m to bypass this logic, as it uses similar domain expressions
if len(args) == 1 and len(args[0]) == 3 and args[0][:2] == ('parent_id','in') \
and args[0][2] != [False]:
self = self.with_context(active_test=False)
return super(Partner, self)._search(args, offset=offset, limit=limit, order=order,
count=count, access_rights_uid=access_rights_uid)
def _get_name_search_order_by_fields(self):
return ''
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
self = self.with_user(name_get_uid or self.env.uid)
# as the implementation is in SQL, we force the recompute of fields if necessary
self.recompute(['display_name'])
self.flush()
if args is None:
args = []
order_by_rank = self.env.context.get('res_partner_search_mode')
if (name or order_by_rank) and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
self.check_access_rights('read')
where_query = self._where_calc(args)
self._apply_ir_rules(where_query, 'read')
from_clause, where_clause, where_clause_params = where_query.get_sql()
from_str = from_clause if from_clause else 'res_partner'
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(self.env.cr)
fields = self._get_name_search_order_by_fields()
query = """SELECT res_partner.id
FROM {from_str}
{where} ({email} {operator} {percent}
OR {display_name} {operator} {percent}
OR {reference} {operator} {percent}
OR {vat} {operator} {percent})
-- don't panic, trust postgres bitmap
ORDER BY {fields} {display_name} {operator} {percent} desc,
{display_name}
""".format(from_str=from_str,
fields=fields,
where=where_str,
operator=operator,
email=unaccent('res_partner.email'),
display_name=unaccent('res_partner.display_name'),
reference=unaccent('res_partner.ref'),
percent=unaccent('%s'),
vat=unaccent('res_partner.vat'),)
where_clause_params += [search_name]*3 # for email / display_name, reference
where_clause_params += [re.sub(r'[^a-zA-Z0-9\-\.]+', '', search_name) or None] # for vat
where_clause_params += [search_name] # for order by
if limit:
query += ' limit %s'
where_clause_params.append(limit)
self.env.cr.execute(query, where_clause_params)
return [row[0] for row in self.env.cr.fetchall()]
return super(Partner, self)._name_search(name, args, operator=operator, limit=limit, name_get_uid=name_get_uid)
@api.model
@api.returns('self', lambda value: value.id)
def find_or_create(self, email, assert_valid_email=False):
""" Find a partner with the given ``email`` or use :py:method:`~.name_create`
to create a new one.
:param str email: email-like string, which should contain at least one email,
e.g. ``"Raoul Grosbedon <r.g@grosbedon.fr>"``
:param boolean assert_valid_email: raise if no valid email is found
:return: newly created record
"""
if not email:
raise ValueError(_('An email is required for find_or_create to work'))
parsed_name, parsed_email = self._parse_partner_name(email)
if not parsed_email and assert_valid_email:
raise ValueError(_('A valid email is required for find_or_create to work properly.'))
partners = self.search([('email', '=ilike', parsed_email)], limit=1)
if partners:
return partners
create_values = {self._rec_name: parsed_name or parsed_email}
if parsed_email: # keep default_email in context
create_values['email'] = parsed_email
return self.create(create_values)
def _get_gravatar_image(self, email):
email_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
url = "https://www.gravatar.com/avatar/" + email_hash
try:
res = requests.get(url, params={'d': '404', 's': '128'}, timeout=5)
if res.status_code != requests.codes.ok:
return False
except requests.exceptions.ConnectionError as e:
return False
except requests.exceptions.Timeout as e:
return False
return base64.b64encode(res.content)
def _email_send(self, email_from, subject, body, on_error=None):
for partner in self.filtered('email'):
tools.email_send(email_from, [partner.email], subject, body, on_error)
return True
def address_get(self, adr_pref=None):
""" Find contacts/addresses of the right type(s) by doing a depth-first-search
through descendants within company boundaries (stop at entities flagged ``is_company``)
then continuing the search at the ancestors that are within the same company boundaries.
Defaults to partners of type ``'default'`` when the exact type is not found, or to the
provided partner itself if no type ``'default'`` is found either. """
adr_pref = set(adr_pref or [])
if 'contact' not in adr_pref:
adr_pref.add('contact')
result = {}
visited = set()
for partner in self:
current_partner = partner
while current_partner:
to_scan = [current_partner]
# Scan descendants, DFS
while to_scan:
record = to_scan.pop(0)
visited.add(record)
if record.type in adr_pref and not result.get(record.type):
result[record.type] = record.id
if len(result) == len(adr_pref):
return result
to_scan = [c for c in record.child_ids
if c not in visited
if not c.is_company] + to_scan
# Continue scanning at ancestor if current_partner is not a commercial entity
if current_partner.is_company or not current_partner.parent_id:
break
current_partner = current_partner.parent_id
# default to type 'contact' or the partner itself
default = result.get('contact', self.id or False)
for adr_type in adr_pref:
result[adr_type] = result.get(adr_type) or default
return result
@api.model
def view_header_get(self, view_id, view_type):
if self.env.context.get('category_id'):
return _(
'Partners: %(category)s',
category=self.env['res.partner.category'].browse(self.env.context['category_id']).name,
)
return super().view_header_get(view_id, view_type)
@api.model
@api.returns('self')
def main_partner(self):
''' Return the main partner '''
return self.env.ref('base.main_partner')
@api.model
def _get_default_address_format(self):
return "%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s"
@api.model
def _get_address_format(self):
return self.country_id.address_format or self._get_default_address_format()
def _display_address(self, without_company=False):
'''
The purpose of this function is to build and return an address formatted accordingly to the
standards of the country where it belongs.
:param address: browse record of the res.partner to format
:returns: the address formatted in a display that fit its country habits (or the default ones
if not country is specified)
:rtype: string
'''
# get the information that will be injected into the display format
# get the address format
address_format = self._get_address_format()
args = {
'state_code': self.state_id.code or '',
'state_name': self.state_id.name or '',
'country_code': self.country_id.code or '',
'country_name': self._get_country_name(),
'company_name': self.commercial_company_name or '',
}
for field in self._formatting_address_fields():
args[field] = getattr(self, field) or ''
if without_company:
args['company_name'] = ''
elif self.commercial_company_name:
address_format = '%(company_name)s\n' + address_format
return address_format % args
def _display_address_depends(self):
# field dependencies of method _display_address()
return self._formatting_address_fields() + [
'country_id.address_format', 'country_id.code', 'country_id.name',
'company_name', 'state_id.code', 'state_id.name',
]
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Customers'),
'template': '/base/static/xls/res_partner.xls'
}]
@api.model
def _check_import_consistency(self, vals_list):
"""
The values created by an import are generated by a name search, field by field.
As a result there is no check that the field values are consistent with each others.
We check that if the state is given a value, it does belong to the given country, or we remove it.
"""
States = self.env['res.country.state']
states_ids = {vals['state_id'] for vals in vals_list if vals.get('state_id')}
state_to_country = States.search([('id', 'in', list(states_ids))]).read(['country_id'])
for vals in vals_list:
if vals.get('state_id'):
country_id = next(c['country_id'][0] for c in state_to_country if c['id'] == vals.get('state_id'))
state = States.browse(vals['state_id'])
if state.country_id.id != country_id:
state_domain = [('code', '=', state.code),
('country_id', '=', country_id)]
state = States.search(state_domain, limit=1)
vals['state_id'] = state.id # replace state or remove it if not found
def _get_country_name(self):
return self.country_id.name or ''
class ResPartnerIndustry(models.Model):
_description = 'Industry'
_name = "res.partner.industry"
_order = "name"
name = fields.Char('Name', translate=True)
full_name = fields.Char('Full Name', translate=True)
active = fields.Boolean('Active', default=True)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.