repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
cokelaer/bioconda-recipes | recipes/biopet-sampleconfig/biopet-sampleconfig.py | 53 | 3375 | #!/usr/bin/env python
#
# Wrapper script for starting the biopet-sampleconfig JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'SampleConfig-assembly-0.3.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
ravibhure/ansible | lib/ansible/modules/network/cloudengine/ce_command.py | 11 | 7815 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_command
version_added: "2.3"
author: "JackyGao2016 (@CloudEngine-Ansible)"
short_description: Run arbitrary command on HUAWEI CloudEngine devices.
description:
- Sends an arbitrary command to an HUAWEI CloudEngine node and returns
the results read from the device. The ce_command module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
options:
commands:
description:
- The commands to send to the remote HUAWEI CloudEngine device
over the configured provider. The resulting output from the
command is returned. If the I(wait_for) argument is provided,
the module is not returned until the condition is satisfied
or the number of I(retries) has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
- name: CloudEngine command test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Run display version on remote devices"
ce_command:
commands: display version
provider: "{{ cli }}"
- name: "Run display version and check to see if output contains HUAWEI"
ce_command:
commands: display version
wait_for: result[0] contains HUAWEI
provider: "{{ cli }}"
- name: "Run multiple commands on remote nodes"
ce_command:
commands:
- display version
- display device
provider: "{{ cli }}"
- name: "Run multiple commands and evaluate the output"
ce_command:
commands:
- display version
- display device
wait_for:
- result[0] contains HUAWEI
- result[1] contains Device
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, check_args
from ansible.module_utils.network.cloudengine.ce import run_commands
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
def to_lines(stdout):
lines = list()
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def parse_commands(module, warnings):
transform = ComplexList(dict(
command=dict(key=True),
output=dict(),
prompt=dict(),
response=dict()
), module)
commands = transform(module.params['commands'])
for _, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('dis'):
warnings.append(
'Only display commands are supported when using check_mode, not '
'executing %s' % item['command']
)
return commands
def to_cli(obj):
cmd = obj['command']
return cmd
def main():
"""entry point for module execution
"""
argument_spec = dict(
# { command: <str>, output: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
match=dict(default='all', choices=['any', 'all']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_native(exc), exception=traceback.format_exc())
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'stdout': responses,
'stdout_lines': to_lines(responses)
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
RuiNascimento/krepo | script.areswizard/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| gpl-2.0 |
Bysmyyr/chromium-crosswalk | tools/json_schema_compiler/features_h_generator.py | 94 | 2686 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
from code import Code
import cpp_util
class HGenerator(object):
def Generate(self, features, source_file, namespace):
return _Generator(features, source_file, namespace).Generate()
class _Generator(object):
"""A .cc generator for features.
"""
def __init__(self, features, source_file, namespace):
self._feature_defs = features
self._source_file = source_file
self._source_file_filename, _ = os.path.splitext(source_file)
self._class_name = cpp_util.ClassName(self._source_file_filename)
self._namespace = namespace
def Generate(self):
"""Generates a Code object for features.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FEATURE_MESSAGE % self._source_file)
.Append()
)
# Hack: for the purpose of gyp the header file will always be the source
# file with its file extension replaced by '.h'. Assume so.
output_file = os.path.splitext(self._namespace.source_file)[0] + '.h'
ifndef_name = cpp_util.GenerateIfndefName(output_file)
(c.Append('#ifndef %s' % ifndef_name)
.Append('#define %s' % ifndef_name)
.Append()
)
(c.Append('#include <map>')
.Append('#include <string>')
.Append()
.Concat(cpp_util.OpenNamespace(self._namespace))
.Append()
)
(c.Append('class %s {' % self._class_name)
.Append(' public:')
.Sblock()
.Concat(self._GeneratePublicBody())
.Eblock()
.Append(' private:')
.Sblock()
.Concat(self._GeneratePrivateBody())
.Eblock('};')
.Append()
.Cblock(cpp_util.CloseNamespace(self._namespace))
)
(c.Append('#endif // %s' % ifndef_name)
.Append()
)
return c
def _GeneratePublicBody(self):
c = Code()
(c.Append('%s();' % self._class_name)
.Append()
.Append('enum ID {')
.Concat(self._GenerateEnumConstants())
.Eblock('};')
.Append()
.Append('const char* ToString(ID id) const;')
.Append('ID FromString(const std::string& id) const;')
.Append()
)
return c
def _GeneratePrivateBody(self):
return Code().Append('std::map<std::string, '
'%s::ID> features_;' % self._class_name)
def _GenerateEnumConstants(self):
c = Code()
(c.Sblock()
.Append('kUnknown,')
)
for feature in self._feature_defs:
c.Append('%s,' % cpp_util.ConstantName(feature.name))
c.Append('kEnumBoundary')
return c
| bsd-3-clause |
gnowxilef/plexpy | lib/cherrypy/_cprequest.py | 58 | 37180 |
import os
import sys
import time
import warnings
import cherrypy
from cherrypy._cpcompat import basestring, copykeys, ntob, unicodestr
from cherrypy._cpcompat import SimpleCookie, CookieError, py3k
from cherrypy import _cpreqbody, _cpconfig
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil, file_generator
class Hook(object):
"""A callback and its metadata: failsafe, priority, and kwargs."""
callback = None
"""
The bare callable that this Hook object is wrapping, which will
be called when the Hook is called."""
failsafe = False
"""
If True, the callback is guaranteed to run even if other callbacks
from the same call point raise exceptions."""
priority = 50
"""
Defines the order of execution for a list of Hooks. Priority numbers
should be limited to the closed interval [0, 100], but values outside
this range are acceptable, as are fractional values."""
kwargs = {}
"""
A set of keyword arguments that will be passed to the
callable on each call."""
def __init__(self, callback, failsafe=None, priority=None, **kwargs):
self.callback = callback
if failsafe is None:
failsafe = getattr(callback, "failsafe", False)
self.failsafe = failsafe
if priority is None:
priority = getattr(callback, "priority", 50)
self.priority = priority
self.kwargs = kwargs
def __lt__(self, other):
# Python 3
return self.priority < other.priority
def __cmp__(self, other):
# Python 2
return cmp(self.priority, other.priority)
def __call__(self):
"""Run self.callback(**self.kwargs)."""
return self.callback(**self.kwargs)
def __repr__(self):
cls = self.__class__
return ("%s.%s(callback=%r, failsafe=%r, priority=%r, %s)"
% (cls.__module__, cls.__name__, self.callback,
self.failsafe, self.priority,
", ".join(['%s=%r' % (k, v)
for k, v in self.kwargs.items()])))
class HookMap(dict):
"""A map of call points to lists of callbacks (Hook objects)."""
def __new__(cls, points=None):
d = dict.__new__(cls)
for p in points or []:
d[p] = []
return d
def __init__(self, *a, **kw):
pass
def attach(self, point, callback, failsafe=None, priority=None, **kwargs):
"""Append a new Hook made from the supplied arguments."""
self[point].append(Hook(callback, failsafe, priority, **kwargs))
def run(self, point):
"""Execute all registered Hooks (callbacks) for the given point."""
exc = None
hooks = self[point]
hooks.sort()
for hook in hooks:
# Some hooks are guaranteed to run even if others at
# the same hookpoint fail. We will still log the failure,
# but proceed on to the next hook. The only way
# to stop all processing from one of these hooks is
# to raise SystemExit and stop the whole server.
if exc is None or hook.failsafe:
try:
hook()
except (KeyboardInterrupt, SystemExit):
raise
except (cherrypy.HTTPError, cherrypy.HTTPRedirect,
cherrypy.InternalRedirect):
exc = sys.exc_info()[1]
except:
exc = sys.exc_info()[1]
cherrypy.log(traceback=True, severity=40)
if exc:
raise exc
def __copy__(self):
newmap = self.__class__()
# We can't just use 'update' because we want copies of the
# mutable values (each is a list) as well.
for k, v in self.items():
newmap[k] = v[:]
return newmap
copy = __copy__
def __repr__(self):
cls = self.__class__
return "%s.%s(points=%r)" % (
cls.__module__,
cls.__name__,
copykeys(self)
)
# Config namespace handlers
def hooks_namespace(k, v):
"""Attach bare hooks declared in config."""
# Use split again to allow multiple hooks for a single
# hookpoint per path (e.g. "hooks.before_handler.1").
# Little-known fact you only get from reading source ;)
hookpoint = k.split(".", 1)[0]
if isinstance(v, basestring):
v = cherrypy.lib.attributes(v)
if not isinstance(v, Hook):
v = Hook(v)
cherrypy.serving.request.hooks[hookpoint].append(v)
def request_namespace(k, v):
"""Attach request attributes declared in config."""
# Provides config entries to set request.body attrs (like
# attempt_charsets).
if k[:5] == 'body.':
setattr(cherrypy.serving.request.body, k[5:], v)
else:
setattr(cherrypy.serving.request, k, v)
def response_namespace(k, v):
"""Attach response attributes declared in config."""
# Provides config entries to set default response headers
# http://cherrypy.org/ticket/889
if k[:8] == 'headers.':
cherrypy.serving.response.headers[k.split('.', 1)[1]] = v
else:
setattr(cherrypy.serving.response, k, v)
def error_page_namespace(k, v):
"""Attach error pages declared in config."""
if k != 'default':
k = int(k)
cherrypy.serving.request.error_page[k] = v
hookpoints = ['on_start_resource', 'before_request_body',
'before_handler', 'before_finalize',
'on_end_resource', 'on_end_request',
'before_error_response', 'after_error_response']
class Request(object):
"""An HTTP request.
This object represents the metadata of an HTTP request message;
that is, it contains attributes which describe the environment
in which the request URL, headers, and body were sent (if you
want tools to interpret the headers and body, those are elsewhere,
mostly in Tools). This 'metadata' consists of socket data,
transport characteristics, and the Request-Line. This object
also contains data regarding the configuration in effect for
the given URL, and the execution plan for generating a response.
"""
prev = None
"""
The previous Request object (if any). This should be None
unless we are processing an InternalRedirect."""
# Conversation/connection attributes
local = httputil.Host("127.0.0.1", 80)
"An httputil.Host(ip, port, hostname) object for the server socket."
remote = httputil.Host("127.0.0.1", 1111)
"An httputil.Host(ip, port, hostname) object for the client socket."
scheme = "http"
"""
The protocol used between client and server. In most cases,
this will be either 'http' or 'https'."""
server_protocol = "HTTP/1.1"
"""
The HTTP version for which the HTTP server is at least
conditionally compliant."""
base = ""
"""The (scheme://host) portion of the requested URL.
In some cases (e.g. when proxying via mod_rewrite), this may contain
path segments which cherrypy.url uses when constructing url's, but
which otherwise are ignored by CherryPy. Regardless, this value
MUST NOT end in a slash."""
# Request-Line attributes
request_line = ""
"""
The complete Request-Line received from the client. This is a
single string consisting of the request method, URI, and protocol
version (joined by spaces). Any final CRLF is removed."""
method = "GET"
"""
Indicates the HTTP method to be performed on the resource identified
by the Request-URI. Common methods include GET, HEAD, POST, PUT, and
DELETE. CherryPy allows any extension method; however, various HTTP
servers and gateways may restrict the set of allowable methods.
CherryPy applications SHOULD restrict the set (on a per-URI basis)."""
query_string = ""
"""
The query component of the Request-URI, a string of information to be
interpreted by the resource. The query portion of a URI follows the
path component, and is separated by a '?'. For example, the URI
'http://www.cherrypy.org/wiki?a=3&b=4' has the query component,
'a=3&b=4'."""
query_string_encoding = 'utf8'
"""
The encoding expected for query string arguments after % HEX HEX decoding).
If a query string is provided that cannot be decoded with this encoding,
404 is raised (since technically it's a different URI). If you want
arbitrary encodings to not error, set this to 'Latin-1'; you can then
encode back to bytes and re-decode to whatever encoding you like later.
"""
protocol = (1, 1)
"""The HTTP protocol version corresponding to the set
of features which should be allowed in the response. If BOTH
the client's request message AND the server's level of HTTP
compliance is HTTP/1.1, this attribute will be the tuple (1, 1).
If either is 1.0, this attribute will be the tuple (1, 0).
Lower HTTP protocol versions are not explicitly supported."""
params = {}
"""
A dict which combines query string (GET) and request entity (POST)
variables. This is populated in two stages: GET params are added
before the 'on_start_resource' hook, and POST params are added
between the 'before_request_body' and 'before_handler' hooks."""
# Message attributes
header_list = []
"""
A list of the HTTP request headers as (name, value) tuples.
In general, you should use request.headers (a dict) instead."""
headers = httputil.HeaderMap()
"""
A dict-like object containing the request headers. Keys are header
names (in Title-Case format); however, you may get and set them in
a case-insensitive manner. That is, headers['Content-Type'] and
headers['content-type'] refer to the same value. Values are header
values (decoded according to :rfc:`2047` if necessary). See also:
httputil.HeaderMap, httputil.HeaderElement."""
cookie = SimpleCookie()
"""See help(Cookie)."""
rfile = None
"""
If the request included an entity (body), it will be available
as a stream in this attribute. However, the rfile will normally
be read for you between the 'before_request_body' hook and the
'before_handler' hook, and the resulting string is placed into
either request.params or the request.body attribute.
You may disable the automatic consumption of the rfile by setting
request.process_request_body to False, either in config for the desired
path, or in an 'on_start_resource' or 'before_request_body' hook.
WARNING: In almost every case, you should not attempt to read from the
rfile stream after CherryPy's automatic mechanism has read it. If you
turn off the automatic parsing of rfile, you should read exactly the
number of bytes specified in request.headers['Content-Length'].
Ignoring either of these warnings may result in a hung request thread
or in corruption of the next (pipelined) request.
"""
process_request_body = True
"""
If True, the rfile (if any) is automatically read and parsed,
and the result placed into request.params or request.body."""
methods_with_bodies = ("POST", "PUT")
"""
A sequence of HTTP methods for which CherryPy will automatically
attempt to read a body from the rfile. If you are going to change
this property, modify it on the configuration (recommended)
or on the "hook point" `on_start_resource`.
"""
body = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded'
or multipart, this will be None. Otherwise, this will be an instance
of :class:`RequestBody<cherrypy._cpreqbody.RequestBody>` (which you
can .read()); this value is set between the 'before_request_body' and
'before_handler' hooks (assuming that process_request_body is True)."""
# Dispatch attributes
dispatch = cherrypy.dispatch.Dispatcher()
"""
The object which looks up the 'page handler' callable and collects
config for the current request based on the path_info, other
request attributes, and the application architecture. The core
calls the dispatcher as early as possible, passing it a 'path_info'
argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
See help(cherrypy.dispatch) for more information."""
script_name = ""
"""
The 'mount point' of the application which is handling this request.
This attribute MUST NOT end in a slash. If the script_name refers to
the root of the URI, it MUST be an empty string (not "/").
"""
path_info = "/"
"""
The 'relative path' portion of the Request-URI. This is relative
to the script_name ('mount point') of the application which is
handling this request."""
login = None
"""
When authentication is used during the request processing this is
set to 'False' if it failed and to the 'username' value if it succeeded.
The default 'None' implies that no authentication happened."""
# Note that cherrypy.url uses "if request.app:" to determine whether
# the call is during a real HTTP request or not. So leave this None.
app = None
"""The cherrypy.Application object which is handling this request."""
handler = None
"""
The function, method, or other callable which CherryPy will call to
produce the response. The discovery of the handler and the arguments
it will receive are determined by the request.dispatch object.
By default, the handler is discovered by walking a tree of objects
starting at request.app.root, and is then passed all HTTP params
(from the query string and POST body) as keyword arguments."""
toolmaps = {}
"""
A nested dict of all Toolboxes and Tools in effect for this request,
of the form: {Toolbox.namespace: {Tool.name: config dict}}."""
config = None
"""
A flat dict of all configuration entries which apply to the
current request. These entries are collected from global config,
application config (based on request.path_info), and from handler
config (exactly how is governed by the request.dispatch object in
effect for this request; by default, handler config can be attached
anywhere in the tree between request.app.root and the final handler,
and inherits downward)."""
is_index = None
"""
This will be True if the current request is mapped to an 'index'
resource handler (also, a 'default' handler if path_info ends with
a slash). The value may be used to automatically redirect the
user-agent to a 'more canonical' URL which either adds or removes
the trailing slash. See cherrypy.tools.trailing_slash."""
hooks = HookMap(hookpoints)
"""
A HookMap (dict-like object) of the form: {hookpoint: [hook, ...]}.
Each key is a str naming the hook point, and each value is a list
of hooks which will be called at that hook point during this request.
The list of hooks is generally populated as early as possible (mostly
from Tools specified in config), but may be extended at any time.
See also: _cprequest.Hook, _cprequest.HookMap, and cherrypy.tools."""
error_response = cherrypy.HTTPError(500).set_response
"""
The no-arg callable which will handle unexpected, untrapped errors
during request processing. This is not used for expected exceptions
(like NotFound, HTTPError, or HTTPRedirect) which are raised in
response to expected conditions (those should be customized either
via request.error_page or by overriding HTTPError.set_response).
By default, error_response uses HTTPError(500) to return a generic
error response to the user-agent."""
error_page = {}
"""
A dict of {error code: response filename or callable} pairs.
The error code must be an int representing a given HTTP error code,
or the string 'default', which will be used if no matching entry
is found for a given numeric code.
If a filename is provided, the file should contain a Python string-
formatting template, and can expect by default to receive format
values with the mapping keys %(status)s, %(message)s, %(traceback)s,
and %(version)s. The set of format mappings can be extended by
overriding HTTPError.set_response.
If a callable is provided, it will be called by default with keyword
arguments 'status', 'message', 'traceback', and 'version', as for a
string-formatting template. The callable must return a string or
iterable of strings which will be set to response.body. It may also
override headers or perform any other processing.
If no entry is given for an error code, and no 'default' entry exists,
a default template will be used.
"""
show_tracebacks = True
"""
If True, unexpected errors encountered during request processing will
include a traceback in the response body."""
show_mismatched_params = True
"""
If True, mismatched parameters encountered during PageHandler invocation
processing will be included in the response body."""
throws = (KeyboardInterrupt, SystemExit, cherrypy.InternalRedirect)
"""The sequence of exceptions which Request.run does not trap."""
throw_errors = False
"""
If True, Request.run will not trap any errors (except HTTPRedirect and
HTTPError, which are more properly called 'exceptions', not errors)."""
closed = False
"""True once the close method has been called, False otherwise."""
stage = None
"""
A string containing the stage reached in the request-handling process.
This is useful when debugging a live server with hung requests."""
namespaces = _cpconfig.NamespaceSet(
**{"hooks": hooks_namespace,
"request": request_namespace,
"response": response_namespace,
"error_page": error_page_namespace,
"tools": cherrypy.tools,
})
def __init__(self, local_host, remote_host, scheme="http",
server_protocol="HTTP/1.1"):
"""Populate a new Request object.
local_host should be an httputil.Host object with the server info.
remote_host should be an httputil.Host object with the client info.
scheme should be a string, either "http" or "https".
"""
self.local = local_host
self.remote = remote_host
self.scheme = scheme
self.server_protocol = server_protocol
self.closed = False
# Put a *copy* of the class error_page into self.
self.error_page = self.error_page.copy()
# Put a *copy* of the class namespaces into self.
self.namespaces = self.namespaces.copy()
self.stage = None
def close(self):
"""Run cleanup code. (Core)"""
if not self.closed:
self.closed = True
self.stage = 'on_end_request'
self.hooks.run('on_end_request')
self.stage = 'close'
def run(self, method, path, query_string, req_protocol, headers, rfile):
r"""Process the Request. (Core)
method, path, query_string, and req_protocol should be pulled directly
from the Request-Line (e.g. "GET /path?key=val HTTP/1.0").
path
This should be %XX-unquoted, but query_string should not be.
When using Python 2, they both MUST be byte strings,
not unicode strings.
When using Python 3, they both MUST be unicode strings,
not byte strings, and preferably not bytes \x00-\xFF
disguised as unicode.
headers
A list of (name, value) tuples.
rfile
A file-like object containing the HTTP request entity.
When run() is done, the returned object should have 3 attributes:
* status, e.g. "200 OK"
* header_list, a list of (name, value) tuples
* body, an iterable yielding strings
Consumer code (HTTP servers) should then access these response
attributes to build the outbound stream.
"""
response = cherrypy.serving.response
self.stage = 'run'
try:
self.error_response = cherrypy.HTTPError(500).set_response
self.method = method
path = path or "/"
self.query_string = query_string or ''
self.params = {}
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
sp = int(self.server_protocol[5]), int(self.server_protocol[7])
self.protocol = min(rp, sp)
response.headers.protocol = self.protocol
# Rebuild first line of the request (e.g. "GET /path HTTP/1.0").
url = path
if query_string:
url += '?' + query_string
self.request_line = '%s %s %s' % (method, url, req_protocol)
self.header_list = list(headers)
self.headers = httputil.HeaderMap()
self.rfile = rfile
self.body = None
self.cookie = SimpleCookie()
self.handler = None
# path_info should be the path from the
# app root (script_name) to the handler.
self.script_name = self.app.script_name
self.path_info = pi = path[len(self.script_name):]
self.stage = 'respond'
self.respond(pi)
except self.throws:
raise
except:
if self.throw_errors:
raise
else:
# Failure in setup, error handler or finalize. Bypass them.
# Can't use handle_error because we may not have hooks yet.
cherrypy.log(traceback=True, severity=40)
if self.show_tracebacks:
body = format_exc()
else:
body = ""
r = bare_error(body)
response.output_status, response.header_list, response.body = r
if self.method == "HEAD":
# HEAD requests MUST NOT return a message-body in the response.
response.body = []
try:
cherrypy.log.access()
except:
cherrypy.log.error(traceback=True)
if response.timed_out:
raise cherrypy.TimeoutError()
return response
# Uncomment for stage debugging
# stage = property(lambda self: self._stage, lambda self, v: print(v))
def respond(self, path_info):
"""Generate a response for the resource at self.path_info. (Core)"""
response = cherrypy.serving.response
try:
try:
try:
if self.app is None:
raise cherrypy.NotFound()
# Get the 'Host' header, so we can HTTPRedirect properly.
self.stage = 'process_headers'
self.process_headers()
# Make a copy of the class hooks
self.hooks = self.__class__.hooks.copy()
self.toolmaps = {}
self.stage = 'get_resource'
self.get_resource(path_info)
self.body = _cpreqbody.RequestBody(
self.rfile, self.headers, request_params=self.params)
self.namespaces(self.config)
self.stage = 'on_start_resource'
self.hooks.run('on_start_resource')
# Parse the querystring
self.stage = 'process_query_string'
self.process_query_string()
# Process the body
if self.process_request_body:
if self.method not in self.methods_with_bodies:
self.process_request_body = False
self.stage = 'before_request_body'
self.hooks.run('before_request_body')
if self.process_request_body:
self.body.process()
# Run the handler
self.stage = 'before_handler'
self.hooks.run('before_handler')
if self.handler:
self.stage = 'handler'
response.body = self.handler()
# Finalize
self.stage = 'before_finalize'
self.hooks.run('before_finalize')
response.finalize()
except (cherrypy.HTTPRedirect, cherrypy.HTTPError):
inst = sys.exc_info()[1]
inst.set_response()
self.stage = 'before_finalize (HTTPError)'
self.hooks.run('before_finalize')
response.finalize()
finally:
self.stage = 'on_end_resource'
self.hooks.run('on_end_resource')
except self.throws:
raise
except:
if self.throw_errors:
raise
self.handle_error()
def process_query_string(self):
"""Parse the query string into Python structures. (Core)"""
try:
p = httputil.parse_query_string(
self.query_string, encoding=self.query_string_encoding)
except UnicodeDecodeError:
raise cherrypy.HTTPError(
404, "The given query string could not be processed. Query "
"strings for this resource must be encoded with %r." %
self.query_string_encoding)
# Python 2 only: keyword arguments must be byte strings (type 'str').
if not py3k:
for key, value in p.items():
if isinstance(key, unicode):
del p[key]
p[key.encode(self.query_string_encoding)] = value
self.params.update(p)
def process_headers(self):
"""Parse HTTP header data into Python structures. (Core)"""
# Process the headers into self.headers
headers = self.headers
for name, value in self.header_list:
# Call title() now (and use dict.__method__(headers))
# so title doesn't have to be called twice.
name = name.title()
value = value.strip()
# Warning: if there is more than one header entry for cookies
# (AFAIK, only Konqueror does that), only the last one will
# remain in headers (but they will be correctly stored in
# request.cookie).
if "=?" in value:
dict.__setitem__(headers, name, httputil.decode_TEXT(value))
else:
dict.__setitem__(headers, name, value)
# Handle cookies differently because on Konqueror, multiple
# cookies come on different lines with the same key
if name == 'Cookie':
try:
self.cookie.load(value)
except CookieError:
msg = "Illegal cookie name %s" % value.split('=')[0]
raise cherrypy.HTTPError(400, msg)
if not dict.__contains__(headers, 'Host'):
# All Internet-based HTTP/1.1 servers MUST respond with a 400
# (Bad Request) status code to any HTTP/1.1 request message
# which lacks a Host header field.
if self.protocol >= (1, 1):
msg = "HTTP/1.1 requires a 'Host' request header."
raise cherrypy.HTTPError(400, msg)
host = dict.get(headers, 'Host')
if not host:
host = self.local.name or self.local.ip
self.base = "%s://%s" % (self.scheme, host)
def get_resource(self, path):
"""Call a dispatcher (which sets self.handler and .config). (Core)"""
# First, see if there is a custom dispatch at this URI. Custom
# dispatchers can only be specified in app.config, not in _cp_config
# (since custom dispatchers may not even have an app.root).
dispatch = self.app.find_config(
path, "request.dispatch", self.dispatch)
# dispatch() should set self.handler and self.config
dispatch(path)
def handle_error(self):
"""Handle the last unanticipated exception. (Core)"""
try:
self.hooks.run("before_error_response")
if self.error_response:
self.error_response()
self.hooks.run("after_error_response")
cherrypy.serving.response.finalize()
except cherrypy.HTTPRedirect:
inst = sys.exc_info()[1]
inst.set_response()
cherrypy.serving.response.finalize()
# ------------------------- Properties ------------------------- #
def _get_body_params(self):
warnings.warn(
"body_params is deprecated in CherryPy 3.2, will be removed in "
"CherryPy 3.3.",
DeprecationWarning
)
return self.body.params
body_params = property(_get_body_params,
doc="""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True).
Deprecated in 3.2, will be removed for 3.3 in favor of
:attr:`request.body.params<cherrypy._cprequest.RequestBody.params>`.""")
class ResponseBody(object):
"""The body of the HTTP response (the response entity)."""
if py3k:
unicode_err = ("Page handlers MUST return bytes. Use tools.encode "
"if you wish to return unicode.")
def __get__(self, obj, objclass=None):
if obj is None:
# When calling on the class instead of an instance...
return self
else:
return obj._body
def __set__(self, obj, value):
# Convert the given value to an iterable object.
if py3k and isinstance(value, str):
raise ValueError(self.unicode_err)
if isinstance(value, basestring):
# strings get wrapped in a list because iterating over a single
# item list is much faster than iterating over every character
# in a long string.
if value:
value = [value]
else:
# [''] doesn't evaluate to False, so replace it with [].
value = []
elif py3k and isinstance(value, list):
# every item in a list must be bytes...
for i, item in enumerate(value):
if isinstance(item, str):
raise ValueError(self.unicode_err)
# Don't use isinstance here; io.IOBase which has an ABC takes
# 1000 times as long as, say, isinstance(value, str)
elif hasattr(value, 'read'):
value = file_generator(value)
elif value is None:
value = []
obj._body = value
class Response(object):
"""An HTTP Response, including status, headers, and body."""
status = ""
"""The HTTP Status-Code and Reason-Phrase."""
header_list = []
"""
A list of the HTTP response headers as (name, value) tuples.
In general, you should use response.headers (a dict) instead. This
attribute is generated from response.headers and is not valid until
after the finalize phase."""
headers = httputil.HeaderMap()
"""
A dict-like object containing the response headers. Keys are header
names (in Title-Case format); however, you may get and set them in
a case-insensitive manner. That is, headers['Content-Type'] and
headers['content-type'] refer to the same value. Values are header
values (decoded according to :rfc:`2047` if necessary).
.. seealso:: classes :class:`HeaderMap`, :class:`HeaderElement`
"""
cookie = SimpleCookie()
"""See help(Cookie)."""
body = ResponseBody()
"""The body (entity) of the HTTP response."""
time = None
"""The value of time.time() when created. Use in HTTP dates."""
timeout = 300
"""Seconds after which the response will be aborted."""
timed_out = False
"""
Flag to indicate the response should be aborted, because it has
exceeded its timeout."""
stream = False
"""If False, buffer the response body."""
def __init__(self):
self.status = None
self.header_list = None
self._body = []
self.time = time.time()
self.headers = httputil.HeaderMap()
# Since we know all our keys are titled strings, we can
# bypass HeaderMap.update and get a big speed boost.
dict.update(self.headers, {
"Content-Type": 'text/html',
"Server": "CherryPy/" + cherrypy.__version__,
"Date": httputil.HTTPDate(self.time),
})
self.cookie = SimpleCookie()
def collapse_body(self):
"""Collapse self.body to a single string; replace it and return it."""
if isinstance(self.body, basestring):
return self.body
newbody = []
for chunk in self.body:
if py3k and not isinstance(chunk, bytes):
raise TypeError("Chunk %s is not of type 'bytes'." %
repr(chunk))
newbody.append(chunk)
newbody = ntob('').join(newbody)
self.body = newbody
return newbody
def finalize(self):
"""Transform headers (and cookies) into self.header_list. (Core)"""
try:
code, reason, _ = httputil.valid_status(self.status)
except ValueError:
raise cherrypy.HTTPError(500, sys.exc_info()[1].args[0])
headers = self.headers
self.status = "%s %s" % (code, reason)
self.output_status = ntob(str(code), 'ascii') + \
ntob(" ") + headers.encode(reason)
if self.stream:
# The upshot: wsgiserver will chunk the response if
# you pop Content-Length (or set it explicitly to None).
# Note that lib.static sets C-L to the file's st_size.
if dict.get(headers, 'Content-Length') is None:
dict.pop(headers, 'Content-Length', None)
elif code < 200 or code in (204, 205, 304):
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body."
dict.pop(headers, 'Content-Length', None)
self.body = ntob("")
else:
# Responses which are not streamed should have a Content-Length,
# but allow user code to set Content-Length if desired.
if dict.get(headers, 'Content-Length') is None:
content = self.collapse_body()
dict.__setitem__(headers, 'Content-Length', len(content))
# Transform our header dict into a list of tuples.
self.header_list = h = headers.output()
cookie = self.cookie.output()
if cookie:
for line in cookie.split("\n"):
if line.endswith("\r"):
# Python 2.4 emits cookies joined by LF but 2.5+ by CRLF.
line = line[:-1]
name, value = line.split(": ", 1)
if isinstance(name, unicodestr):
name = name.encode("ISO-8859-1")
if isinstance(value, unicodestr):
value = headers.encode(value)
h.append((name, value))
def check_timeout(self):
"""If now > self.time + self.timeout, set self.timed_out.
This purposefully sets a flag, rather than raising an error,
so that a monitor thread can interrupt the Response thread.
"""
if time.time() > self.time + self.timeout:
self.timed_out = True
| gpl-3.0 |
gmission/gmission | hkust-gmission/gmission/models/crowdsourcing.py | 1 | 2810 | __author__ = 'chenzhao'
from base import *
# type = text / image / selection
class HIT(db.Model, BasicModelMixin):
__tablename__ = 'hit'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(20))
title = db.Column(db.String(500))
description = db.Column(db.TEXT)
attachment_id = db.Column(db.Integer, db.ForeignKey('attachment.id'))
attachment = db.relationship('Attachment', foreign_keys=attachment_id)
campaign_id = db.Column(db.Integer, db.ForeignKey('campaign.id'))
campaign = db.relationship('Campaign', lazy='select')
credit = db.Column(db.Integer, default=10)
status = db.Column(db.String(20), default='open') # or closed
required_answer_count = db.Column(db.Integer, default=3)
min_selection_count = db.Column(db.Integer, default=1)
max_selection_count = db.Column(db.Integer, default=1)
begin_time = db.Column(db.DateTime, default=datetime.datetime.now)
end_time = db.Column(db.DateTime, default=lambda: datetime.datetime.now() + datetime.timedelta(days=1))
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
location_id = db.Column(db.Integer, db.ForeignKey('location.id'), nullable=True)
location = db.relationship('Location', foreign_keys=location_id)
requester = db.relationship('User')
requester_id = db.Column(db.Integer, db.ForeignKey('user.id'))
selections = db.relationship('Selection', lazy='select')
answers = db.relationship('Answer', lazy='select')
def __unicode__(self):
return '<%s,%s>' % (repr(self.id), self.task)
class Answer(db.Model, BasicModelMixin):
id = db.Column(db.Integer, primary_key=True)
hit = db.relationship('HIT', lazy='select')
hit_id = db.Column(db.Integer, db.ForeignKey('hit.id'))
brief = db.Column(db.String(100))
attachment_id = db.Column(db.Integer, db.ForeignKey('attachment.id'))
attachment = db.relationship('Attachment', lazy='immediate', foreign_keys=attachment_id)
type = db.Column(db.String(20))
accepted = db.Column(db.Boolean, default=False)
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
location = db.relationship('Location', lazy='select')
location_id = db.Column(db.Integer, db.ForeignKey('location.id'))
worker = db.relationship('User', lazy='select')
worker_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __unicode__(self):
return '<%d,%s,%s>' % (self.id, self.task, self.option)
class Selection(db.Model, BasicModelMixin):
id = db.Column(db.Integer, primary_key=True)
hit = db.relationship('HIT', lazy='select')
hit_id = db.Column(db.Integer, db.ForeignKey('hit.id'))
brief = db.Column(db.String(100))
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
| mit |
ibab/tensorflow | tensorflow/models/image/cifar10/cifar10.py | 9 | 13565 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
return cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir,
batch_size=FLAGS.batch_size)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| apache-2.0 |
Beeblio/django | django/db/models/aggregates.py | 6 | 2701 | """
Classes to represent the definitions of aggregate functions.
"""
from django.db.models.constants import LOOKUP_SEP
__all__ = [
'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance',
]
def refs_aggregate(lookup_parts, aggregates):
"""
A little helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for match.
"""
for i in range(len(lookup_parts) + 1):
if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates:
return True
return False
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| bsd-3-clause |
GDGLima/contentbox | third_party/django/contrib/gis/db/backends/base.py | 112 | 11144 | """
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.contrib.gis import gdal
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_functions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = set()
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError
def get_expression_column(self, evaluator):
"""
Helper method to return the quoted column string from the evaluator
for its expression.
"""
for expr, col_tup in evaluator.cols:
if expr is evaluator.expression:
return '%s.%s' % tuple(map(self.quote_name, col_tup))
raise Exception("Could not find the column for the expression.")
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplementedError
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError
def spatial_ref_sys(self):
raise NotImplementedError
@python_2_unicode_compatible
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception as msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception as msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m: return (float(m.group('major')), float(m.group('flattening')))
else: return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __str__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return six.text_type(self.srs)
except:
return six.text_type(self.wkt)
| apache-2.0 |
FiloSottile/youtube-dl | youtube_dl/extractor/facebook.py | 7 | 6654 | from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
limit_length,
urlencode_postdata,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:\w+\.)?facebook\.com/
(?:[^#]*?\#!/)?
(?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
(?:v|video_id)=(?P<id>[0-9]+)
(?:.*)'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
}
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}]
def _login(self):
(useremail, password) = self._get_login_info()
if useremail is None:
return
login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
login_page_req.add_header('Cookie', 'locale=en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return
check_form = {
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
'h': self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
'name_action_selected': 'dont_save',
}
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % compat_str(err))
return
def _real_initialize(self):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
webpage = self._download_webpage(url, video_id)
BEFORE = '{swf.addParam(param[0], param[1]);});\n'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
if not m:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
else:
raise ExtractorError('Cannot parse data')
data = dict(json.loads(m.group(1)))
params_raw = compat_urllib_parse.unquote(data['params'])
params = json.loads(params_raw)
video_data = params['video_data'][0]
formats = []
for quality in ['sd', 'hd']:
src = video_data.get('%s_src' % quality)
if src is not None:
formats.append({
'format_id': quality,
'url': src,
})
if not formats:
raise ExtractorError('Cannot find video formats')
video_title = self._html_search_regex(
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
fatal=False)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
return {
'id': video_id,
'title': video_title,
'formats': formats,
'duration': int_or_none(video_data.get('video_duration')),
'thumbnail': video_data.get('thumbnail_src'),
}
| unlicense |
emad8878/finalproject | read_data.py | 1 | 1788 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Student selection process"""
from student_data import *
import pprint
def passed_exam(student_list):
"""This function will filter and selct students for medical program.
Args:
final_selections(list): This list will have students with 125 and higer.
Return:
retrurns students list.
"""
selected_list = []
for student in student_list:
if student[1] > 125:
selected_list.append(student)
return selected_list
if __name__ == '__main__':
import doctest
doctest.testmod()
def final_list(final_selections):
"""This function will filter and selct students who has passed interview.
Args:
final_selections(list): This list will have students with high MACT and
passed interviews.
Return:
retrurns students list.
Examples:
>>> fianl_list(student_list)
('Tomas', 126),
('Amy', 130),
('Dave', 130),
('Justin', 130),
('Joseph', 134),
('James', 140),
('Sumon', 160),
('Smith', 165)]
"""
final_selections = passed_exam(student_list)
final_round=[]
for item in final_selections:
passed=raw_input('passed the interview '+str(item[0])+' with MCAT '
+str(item[1])+' (enter n for no, y for yes: )')
final_round.append(passed)
final_list=[]
for score in range(0,len(final_round)):
if final_round[score]=='y':
final_list.append(final_selections[score])
final_list.sort(key=lambda tup: tup[1])
print('Student sorted in increasing order of their GPA: ')
final_list.insert(0, final_list)
pprint.pprint(final_list)
| mpl-2.0 |
agaffney/ansible | test/lib/ansible_test/_internal/cloud/acme.py | 2 | 6056 | """ACME plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..util import (
find_executable,
display,
ApplicationError,
SubprocessError,
)
from ..http import (
HttpClient,
)
from ..docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
get_docker_hostname,
get_docker_container_ip,
get_docker_preferred_network_name,
is_docker_user_defined_network,
)
class ACMEProvider(CloudProvider):
"""ACME plugin. Sets up cloud resources for tests."""
DOCKER_SIMULATOR_NAME = 'acme-simulator'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(ACMEProvider, self).__init__(args)
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
if os.environ.get('ANSIBLE_ACME_CONTAINER'):
self.image = os.environ.get('ANSIBLE_ACME_CONTAINER')
else:
self.image = 'quay.io/ansible/acme-test-container:2.0.0'
self.container_name = ''
def _wait_for_service(self, protocol, acme_host, port, local_part, name):
"""Wait for an endpoint to accept connections."""
if self.args.explain:
return
client = HttpClient(self.args, always=True, insecure=True)
endpoint = '%s://%s:%d/%s' % (protocol, acme_host, port, local_part)
for dummy in range(1, 30):
display.info('Waiting for %s: %s' % (name, endpoint), verbosity=1)
try:
client.get(endpoint)
return
except SubprocessError:
pass
time.sleep(1)
raise ApplicationError('Timeout waiting for %s.' % name)
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(ACMEProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
network = get_docker_preferred_network_name(self.args)
if self.managed and not is_docker_user_defined_network(network):
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(ACMEProvider, self).cleanup()
def _setup_dynamic(self):
"""Create a ACME test container using docker."""
container_id = get_docker_container_id()
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0].get('State', {}).get('Running'):
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing ACME docker test container.', verbosity=1)
else:
display.info('Starting a new ACME docker test container.', verbosity=1)
if not self.args.docker and not container_id:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', '5000:5000', # control port for flask app in container
'-p', '14000:14000', # Pebble ACME CA
]
else:
publish_ports = []
if not os.environ.get('ANSIBLE_ACME_CONTAINER'):
docker_pull(self.args, self.image)
docker_run(
self.args,
self.image,
['-d', '--name', self.container_name] + publish_ports,
)
if self.args.docker:
acme_host = self.DOCKER_SIMULATOR_NAME
acme_host_ip = self._get_simulator_address()
elif container_id:
acme_host = self._get_simulator_address()
acme_host_ip = acme_host
display.info('Found ACME test container address: %s' % acme_host, verbosity=1)
else:
acme_host = get_docker_hostname()
acme_host_ip = acme_host
self._set_cloud_config('acme_host', acme_host)
self._wait_for_service('http', acme_host_ip, 5000, '', 'ACME controller')
self._wait_for_service('https', acme_host_ip, 14000, 'dir', 'ACME CA endpoint')
def _get_simulator_address(self):
return get_docker_container_ip(self.args, self.container_name)
def _setup_static(self):
raise NotImplementedError()
class ACMEEnvironment(CloudEnvironment):
"""ACME environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
ansible_vars = dict(
acme_host=self._get_cloud_config('acme_host'),
)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
| gpl-3.0 |
angelapper/odoo | addons/hr_payroll/report/report_contribution_register.py | 47 | 2406 | #-*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from datetime import datetime
from dateutil import relativedelta
from openerp.osv import osv
from openerp.report import report_sxw
class contribution_register_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(contribution_register_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_payslip_lines': self._get_payslip_lines,
'sum_total': self.sum_total,
})
def set_context(self, objects, data, ids, report_type=None):
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
self.date_to = data['form'].get('date_to', str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10])
return super(contribution_register_report, self).set_context(objects, data, ids, report_type=report_type)
def sum_total(self):
return self.regi_total
def _get_payslip_lines(self, obj):
payslip_line = self.pool.get('hr.payslip.line')
payslip_lines = []
res = []
self.regi_total = 0.0
self.cr.execute("SELECT pl.id from hr_payslip_line as pl "\
"LEFT JOIN hr_payslip AS hp on (pl.slip_id = hp.id) "\
"WHERE (hp.date_from >= %s) AND (hp.date_to <= %s) "\
"AND pl.register_id = %s "\
"AND hp.state = 'done' "\
"ORDER BY pl.slip_id, pl.sequence",
(self.date_from, self.date_to, obj.id))
payslip_lines = [x[0] for x in self.cr.fetchall()]
for line in payslip_line.browse(self.cr, self.uid, payslip_lines):
res.append({
'payslip_name': line.slip_id.name,
'name': line.name,
'code': line.code,
'quantity': line.quantity,
'amount': line.amount,
'total': line.total,
})
self.regi_total += line.total
return res
class wrapped_report_contribution_register(osv.AbstractModel):
_name = 'report.hr_payroll.report_contributionregister'
_inherit = 'report.abstract_report'
_template = 'hr_payroll.report_contributionregister'
_wrapped_report_class = contribution_register_report
| agpl-3.0 |
Russell-IO/ansible | lib/ansible/modules/packaging/os/swupd.py | 37 | 8830 | #!/usr/bin/python
# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: swupd
short_description: Manages updates and bundles in ClearLinux systems.
description:
- Manages updates and bundles with the swupd bundle manager, which is used by the
Clear Linux Project for Intel Architecture.
version_added: "2.3"
author: Alberto Murillo (@albertomurillo)
options:
contenturl:
description:
- URL pointing to the contents of available bundles.
If not specified, the contents are retrieved from clearlinux.org.
format:
description:
- The format suffix for version file downloads. For example [1,2,3,staging,etc].
If not specified, the default format is used.
manifest:
description:
- The manifest contains information about the bundles at certaion version of the OS.
Specify a Manifest version to verify against that version or leave unspecified to
verify against the current version.
aliases: [release, version]
name:
description:
- Name of the (I)bundle to install or remove.
aliases: [bundle]
state:
description:
- Indicates the desired (I)bundle state. C(present) ensures the bundle
is installed while C(absent) ensures the (I)bundle is not installed.
default: present
choices: [present, absent]
update:
description:
- Updates the OS to the latest version.
url:
description:
- Overrides both I(contenturl) and I(versionurl).
verify:
description:
- Verify content for OS version.
versionurl:
description:
- URL for version string download.
'''
EXAMPLES = '''
- name: Update the OS to the latest version
swupd:
update: yes
- name: Installs the "foo" bundle
swupd:
name: foo
state: present
- name: Removes the "foo" bundle
swupd:
name: foo
state: absent
- name: Check integrity of filesystem
swupd:
verify: yes
- name: Downgrade OS to release 12920
swupd:
verify: yes
manifest: 12920
'''
RETURN = '''
stdout:
description: stdout of swupd
returned: always
type: string
stderr:
description: stderr of swupd
returned: always
type: string
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Swupd(object):
FILES_NOT_MATCH = "files did not match"
FILES_REPLACED = "missing files were replaced"
FILES_FIXED = "files were fixed"
FILES_DELETED = "files were deleted"
def __init__(self, module):
# Fail if swupd is not found
self.module = module
self.swupd_cmd = module.get_bin_path("swupd", False)
if not self.swupd_cmd:
module.fail_json(msg="Could not find swupd.")
# Initialize parameters
for key in module.params.keys():
setattr(self, key, module.params[key])
# Initialize return values
self.changed = False
self.failed = False
self.msg = None
self.rc = None
self.stderr = ""
self.stdout = ""
def _run_cmd(self, cmd):
self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
def _get_cmd(self, command):
cmd = "%s %s" % (self.swupd_cmd, command)
if self.format:
cmd += " --format=%s" % self.format
if self.manifest:
cmd += " --manifest=%s" % self.manifest
if self.url:
cmd += " --url=%s" % self.url
else:
if self.contenturl and command != "check-update":
cmd += " --contenturl=%s" % self.contenturl
if self.versionurl:
cmd += " --versionurl=%s" % self.versionurl
return cmd
def _is_bundle_installed(self, bundle):
try:
os.stat("/usr/share/clear/bundles/%s" % bundle)
except OSError:
return False
return True
def _needs_update(self):
cmd = self._get_cmd("check-update")
self._run_cmd(cmd)
if self.rc == 0:
return True
if self.rc == 1:
return False
self.failed = True
self.msg = "Failed to check for updates"
def _needs_verify(self):
cmd = self._get_cmd("verify")
self._run_cmd(cmd)
if self.rc != 0:
self.failed = True
self.msg = "Failed to check for filesystem inconsistencies."
if self.FILES_NOT_MATCH in self.stdout:
return True
return False
def install_bundle(self, bundle):
"""Installs a bundle with `swupd bundle-add bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=not self._is_bundle_installed(bundle))
if self._is_bundle_installed(bundle):
self.msg = "Bundle %s is already installed" % bundle
return
cmd = self._get_cmd("bundle-add %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s installed" % bundle
return
if self.rc == 18:
self.msg = "Bundle name %s is invalid" % bundle
return
self.failed = True
self.msg = "Failed to install bundle %s" % bundle
def remove_bundle(self, bundle):
"""Removes a bundle with `swupd bundle-remove bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=self._is_bundle_installed(bundle))
if not self._is_bundle_installed(bundle):
self.msg = "Bundle %s not installed"
return
cmd = self._get_cmd("bundle-remove %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s removed" % bundle
return
self.failed = True
self.msg = "Failed to remove bundle %s" % bundle
def update_os(self):
"""Updates the os with `swupd update`"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_update())
if not self._needs_update():
self.msg = "There are no updates available"
return
cmd = self._get_cmd("update")
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Update successful"
return
self.failed = True
self.msg = "Failed to check for updates"
def verify_os(self):
"""Verifies filesystem against specified or current version"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_verify())
if not self._needs_verify():
self.msg = "No files where changed"
return
cmd = self._get_cmd("verify --fix")
self._run_cmd(cmd)
if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
self.changed = True
self.msg = "Fix successful"
return
self.failed = True
self.msg = "Failed to verify the OS"
def main():
"""The main function."""
module = AnsibleModule(
argument_spec=dict(
contenturl=dict(type="str"),
format=dict(type="str"),
manifest=dict(aliases=["release", "version"], type="int"),
name=dict(aliases=["bundle"], type="str"),
state=dict(default="present", choices=["present", "absent"], type="str"),
update=dict(default=False, type="bool"),
url=dict(type="str"),
verify=dict(default=False, type="bool"),
versionurl=dict(type="str"),
),
required_one_of=[["name", "update", "verify"]],
mutually_exclusive=[["name", "update", "verify"]],
supports_check_mode=True
)
swupd = Swupd(module)
name = module.params["name"]
state = module.params["state"]
update = module.params["update"]
verify = module.params["verify"]
if update:
swupd.update_os()
elif verify:
swupd.verify_os()
elif state == "present":
swupd.install_bundle(name)
elif state == "absent":
swupd.remove_bundle(name)
else:
swupd.failed = True
if swupd.failed:
module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
else:
module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
if __name__ == '__main__':
main()
| gpl-3.0 |
alfayez/gnuradio | gr-digital/examples/ofdm/ofdm_mod_demod_test.py | 27 | 38561 | #!/usr/bin/env python
#
# Copyright 2005,2006,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, ofdm_packet_utils
import gnuradio.gr.gr_threading as _threading
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import random, time, struct, sys, math, os
class my_top_block(gr.top_block):
def __init__(self, callback, options):
gr.top_block.__init__(self)
# hard-coded known symbol
ks1 = known_symbols_4512_1[0:options.occupied_tones]
ks2 = known_symbols_4512_2[0:options.occupied_tones]
self._rcvd_pktq = gr.msg_queue()
# accepts messages from the outside world
self.ofdm_mapper = gr.ofdm_bpsk_mapper(4, options.occupied_tones, options.fft_length, ks1, ks2)
self.ofdm_corr = gr.ofdm_correlator(options.occupied_tones, options.fft_length, 0, ks1, ks2)
self.ofdm_framer = gr.ofdm_frame_sink(self._rcvd_pktq, options.occupied_tones)
if 0: # set to 1 to put the correlator in the path to take over the signalling
self.connect((self.ofdm_mapper, 0), (self.ofdm_corr, 0))
self.connect((self.ofdm_corr, 0), (self.ofdm_framer, 0))
self.connect((self.ofdm_corr, 1), (self.ofdm_framer, 1))
self.connect((self.ofdm_mapper,0), gr.file_sink(gr.sizeof_gr_complex*options.fft_length, "ofdm_mapper.dat"))
self.connect((self.ofdm_corr,0), gr.file_sink(gr.sizeof_gr_complex*options.occupied_tones, "ofdm_corr.dat"))
self.connect((self.ofdm_corr,1), gr.file_sink(gr.sizeof_char, "ofdm_sig.dat"))
else:
self.connect((self.ofdm_mapper, 0), (self.ofdm_framer, 0))
self.connect((self.ofdm_mapper, 1), (self.ofdm_framer, 1))
self.connect((self.ofdm_mapper,0), gr.file_sink(gr.sizeof_gr_complex*options.fft_length, "ofdm_mapper.dat"))
self.connect((self.ofdm_mapper,1), gr.file_sink(gr.sizeof_char, "ofdm_sig.dat"))
self._watcher = _queue_watcher_thread(self._rcvd_pktq, callback)
def send_pkt(self, payload='', eof=False):
if eof:
msg = gr.message(1)
else:
pkt = ofdm_packet_utils.make_packet(payload, 1, 1, False, whiten=False)
msg = gr.message_from_string(pkt)
self.ofdm_mapper.msgq().insert_tail(msg)
class _queue_watcher_thread(_threading.Thread):
def __init__(self, rcvd_pktq, callback):
_threading.Thread.__init__(self)
self.setDaemon(1)
self.rcvd_pktq = rcvd_pktq
self.callback = callback
self.keep_running = True
self.start()
def run(self):
while self.keep_running:
msg = self.rcvd_pktq.delete_head()
ok, payload = ofdm_packet_utils.unmake_packet(msg.to_string(), whiten=False)
if self.callback:
self.callback(ok, payload)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
''' Use this program to tie the OFDM modulators straight into the frame sink, with or without
the correlator in between. This is for testing of the modulators and demodulators only without
receiver and sync functionality.'''
global n_rcvd, n_right
n_rcvd = 0
n_right = 0
def send_pkt(payload='', eof=False):
return fg.send_pkt(payload, eof)
def rx_callback(ok, payload):
global n_rcvd, n_right
n_rcvd += 1
(pktno,) = struct.unpack('!H', payload[0:2])
if ok:
n_right += 1
print "ok: %r \t pktno: %d \t n_rcvd: %d \t n_right: %d" % (ok, pktno, n_rcvd, n_right)
printlst = list()
for x in payload[2:]:
t = hex(ord(x)).replace('0x', '')
if(len(t) == 1):
t = '0' + t
printlst.append(t)
printable = ''.join(printlst)
print printable
print "\n"
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
expert = parser.add_option_group("Expert")
parser.add_option("-s", "--size", type="eng_float", default=1450,
help="set packet size [default=%default]")
parser.add_option("-M", "--megabytes", type="eng_float", default=1.0,
help="set megabytes to transmit [default=%default]")
expert.add_option("", "--fft-length", type="intx", default=512,
help="set the number of FFT bins [default=%default]")
expert.add_option("", "--occupied-tones", type="intx", default=200,
help="set the number of occupied FFT bins [default=%default]")
expert.add_option("", "--cp-length", type="intx", default=128,
help="set the number of bits in the cyclic prefix [default=%default]")
expert.add_option("", "--fft-length", type="intx", default=512,
help="set the number of FFT bins [default=%default]")
expert.add_option("", "--occupied-tones", type="intx", default=200,
help="set the number of occupied FFT bins [default=%default]")
expert.add_option("", "--cp-length", type="intx", default=128,
help="set the number of bits in the cyclic prefix [default=%default]")
(options, args) = parser.parse_args ()
# build the graph
tb = my_top_block(rx_callback, options)
tb.start() # start flow graph
# generate and send packets
nbytes = int(1e6 * options.megabytes)
n = 0
pktno = 0
pkt_size = int(options.size)
while n < nbytes:
#r = ''.join([chr(random.randint(0,255)) for i in range(pkt_size-2)])
#pkt_contents = struct.pack('!H', pktno) + r
pkt_contents = struct.pack('!H', pktno) + (pkt_size - 2) * chr(pktno & 0xff)
send_pkt(pkt_contents)
n += pkt_size
pktno += 1
send_pkt(eof=True)
tb.wait() # wait for it to finish
known_symbols_4512_1 = [-1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1]
known_symbols_4512_2 = [1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1]
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
prtkm/jasp | jasp/jasp_vib.py | 3 | 9632 | '''module for vibrational calculations in jasp'''
from jasp import *
import numpy as np
from ase.data import atomic_masses
from ase.io import read
def get_vibrational_modes(self,
mode=None,
massweighted=False,
show=False,
npoints=30,
amplitude=0.5):
'''Read the OUTCAR and get the eigenvectors. Return value depends
on the arguments.
mode= None returns all modes
mode= 2 returns mode 2
mode=[1,2] returns modes 1 and 2
massweighted = True returns sqrt(mass) weighted
eigenvectors. E.g. M * evectors * M
show=True makes a trajectory that can be visualized
npoints = number of points in the trajectory
amplitude = magnitude of the vibrations
some special cases to handle:
ibrion=5 + selective dynamics
may lead to unexpected number of modes
if nwrite=3, there will be a sqrt(mass) weighted vectors
and two sets of vectors.
I am not sure if these eigenvectors are mass-weighted. And I am
not sure if the order of the eigenvectors in OUTCAR is the same as
the atoms.
Note: it seems like it might be much easier to get this out of
vasprun.xml
'''
atoms = self.get_atoms()
if hasattr(atoms, 'constraints') and self.int_params['ibrion'] == 5:
# count how many modes to get.
NMODES = 0
f = open('OUTCAR')
for line in f:
if ('f' in line and 'THz' in line and 'cm-1' in line):
NMODES += 1
f.close()
else:
NMODES = 3*len(atoms)
frequencies, eigenvectors = [], []
'''
now we find where the data starts. I think the unweighted vectors
always come first. if nwrite=3, then there are sqrt(mass) weighted
vectors that follow this section
'''
f = open('OUTCAR', 'r')
while True:
line = f.readline()
if line.startswith(' Eigenvectors and eigenvalues'
' of the dynamical matrix'):
break
f.readline() # skip ------
f.readline() # skip two blank lines
f.readline()
for i in range(NMODES):
freqline = f.readline()
fields = freqline.split()
if 'f/i=' in freqline: # imaginary frequency
frequencies.append(complex(float(fields[-2]) * 0.001, 0j))
else:
frequencies.append(float(fields[-2]) * 0.001)
# X Y Z dx dy dz
f.readline()
thismode = []
for i in range(len(atoms)):
line = f.readline().strip()
X, Y, Z, dx, dy, dz = [float(x) for x in line.split()]
thismode.append(np.array([dx, dy, dz]))
f.readline() # blank line
thismode = np.array(thismode)
# now we need to resort the vectors in this mode so they match
# the atoms order
thismode = thismode[self.resort]
if massweighted:
# construct M
numbers = [a.get('number') for a in atoms]
M = []
for i in range(len(atoms)):
for j in range(3):
an = numbers[i]
M.append(1./np.sqrt(atomic_masses[an]))
M = np.array(M)
M = np.diag(M) # diagonal array
thismode = np.dot(M, thismode.flat)
thismode = thismode.reshape((len(atoms), 3))
# renormalize the mode
mag = np.linalg.norm(thismode)
thismode /= mag
eigenvectors.append(thismode)
f.close()
eigenvectors = np.array(eigenvectors)
if mode is None:
retval = (frequencies, eigenvectors)
else:
retval = (frequencies[mode], eigenvectors[mode])
if show:
from ase.visualize import view
if mode is None:
mode = [0]
elif not isinstance(mode, list):
mode = [mode] # make a list for next code
# symmetric path from -1 to 1 to -1
X = np.append(np.linspace(0, 1, npoints/3),
np.linspace(1, -1, npoints/3))
X = np.append(X,
np.linspace(-1, 0, npoints/3))
X *= amplitude
for m in mode:
traj = []
for i, x in enumerate(X):
a = atoms.copy()
a.positions += x*eigenvectors[m]
traj += [a]
view(traj)
return retval
Vasp.get_vibrational_modes = get_vibrational_modes
def get_vibrational_frequencies(self):
'''Returns an array of frequencies in wavenumbers.
You should have run the calculation already. This function does not
run a calculation.
'''
atoms = self.get_atoms()
N = len(atoms)
frequencies = []
f = open('OUTCAR', 'r')
while True:
line = f.readline()
if line.startswith(' Eigenvectors and eigenvalues'
' of the dynamical matrix'):
break
f.readline() # skip ------
f.readline() # skip two blank lines
f.readline()
for i in range(3*N):
# the next line contains the frequencies
line = f.readline()
fields = line.split()
if 'f/i=' in line: # imaginary frequency
# frequency in wave-numbers
frequencies.append(complex(float(fields[6]), 0j))
else:
frequencies.append(float(fields[7]))
# now skip 1 one line, a line for each atom, and a blank line
for j in range(1 + N + 1):
f.readline() # skip the next few lines
f.close()
return frequencies
Vasp.get_vibrational_frequencies = get_vibrational_frequencies
def get_infrared_intensities(self):
'''Calculate infrared intensities of vibrational modes.
Returns an array of normalized intensities for each vibrational
mode. You should have run the vibrational calculation already. This
function does not run it for you.
python translation of # A utility for calculating the vibrational
intensities from VASP output (OUTCAR) # (C) David Karhanek,
2011-03-25, ICIQ Tarragona, Spain (www.iciq.es)
http://homepage.univie.ac.at/david.karhanek/downloads.html#Entry02
'''
atoms = read('POSCAR', format='vasp')
NIONS = len(atoms)
BORN_NROWS = NIONS*4 + 1
with open('OUTCAR', 'r') as f:
alltext = f.read()
f.seek(0)
alllines = f.readlines()
f.close()
if 'BORN' not in alltext:
raise Exception('Born effective charges missing. '
'Did you use IBRION=7 or 8?')
if 'Eigenvectors after division by SQRT(mass)' not in alltext:
raise Exception('You must rerun with NWRITE=3 to get '
'sqrt(mass) weighted eigenvectors')
# get the Born charges
for i, line in enumerate(alllines):
if 'BORN EFFECTIVE CHARGES' in line:
break
BORN_MATRICES = []
i += 2 # skip a line
for j in range(NIONS):
BM = []
i += 1 # skips the ion count line
for k in range(3):
line = alllines[i]
fields = line.split()
BM.append([float(x) for x in fields[1:4]])
i += 1 # advance a line
BORN_MATRICES.append(BM)
BORN_MATRICES = np.array(BORN_MATRICES)
# Get the eigenvectors and eigenvalues. maybe I can replace this
# code with my other code. for now I just reproduce the count
# number of vibs. this gets the number from outcar. it seems like
# it should be known in advance unless constraints make it hard to
# tell.
# the next code in the shell script just copies code to eigenvectors.txt
for i, line in enumerate(alllines):
if 'Eigenvectors after division by SQRT(mass)' in line:
break
EIG_NVIBS = 0
for line in alllines[i:]:
if ('f' in line
and 'THz' in line
and 'cm-1' in line):
EIG_NVIBS += 1
EIG_NIONS = BORN_NROWS
# I guess this counts blank rows and non-data rows
EIG_NROWS = (EIG_NIONS + 3)*EIG_NVIBS + 3
# i is where the data starts
i += 6
EIGENVALUES = []
EIGENVECTORS = []
for j in range(EIG_NVIBS):
mode = []
EIGENVALUES.append(alllines[i]) # frequencies are here
i += 1 # skip the frequency line
i += 1 # skip the xyz line
for k in range(3):
fields = [float(x) for x in alllines[i].split()]
mode.append(fields[3:])
i += 1
EIGENVECTORS.append(mode)
i += 1 # skip blank line
EIGENVECTORS = np.array(EIGENVECTORS)
# now we are ready to compute intensities. see
# http://othes.univie.ac.at/10117/1/2010-05-05_0547640.pdf, page
# 21.
'''
I(\omega) = \sum_{\alpha=1}^3 | \sum_{l=1}^M \sum_{\beta=1}^3 Z_{\alpha\beta}(l)e_{\beta}(l)|^2
omega is the vibrational mode
alpha, beta are the cartesian polarizations
l is the atom number
e_beta is the eigenvector of the mode
'''
intensities = []
for mode in range(len(EIGENVECTORS)):
S = 0 # This is the triple sum
for alpha in [0, 1, 2]:
s = 0
for l in [0, 1, 2]: # this is the atom number
for beta in [0, 1, 2]:
e = EIGENVECTORS[mode][l]
Zab = BORN_MATRICES[l][alpha][beta]
s += Zab * e[beta]
S += s**2
intensities.append(S)
intensities = np.array(intensities)/max(intensities)
return intensities
Vasp.get_infrared_intensities = get_infrared_intensities
| gpl-2.0 |
mancoast/CPythonPyc_test | cpython/211_test_ucn.py | 15 | 3237 | """ Test script for the Unicode implementation.
Written by Bill Tutt.
Modified for Python 2.0 by Fredrik Lundh (fredrik@pythonware.com)
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
from test_support import verify, verbose
print 'Testing General Unicode Character Name, and case insensitivity...',
# General and case insensitivity test:
try:
# put all \N escapes inside exec'd raw strings, to make sure this
# script runs even if the compiler chokes on \N escapes
exec r"""
s = u"\N{LATIN CAPITAL LETTER T}" \
u"\N{LATIN SMALL LETTER H}" \
u"\N{LATIN SMALL LETTER E}" \
u"\N{SPACE}" \
u"\N{LATIN SMALL LETTER R}" \
u"\N{LATIN CAPITAL LETTER E}" \
u"\N{LATIN SMALL LETTER D}" \
u"\N{SPACE}" \
u"\N{LATIN SMALL LETTER f}" \
u"\N{LATIN CAPITAL LeTtEr o}" \
u"\N{LATIN SMaLl LETTER x}" \
u"\N{SPACE}" \
u"\N{LATIN SMALL LETTER A}" \
u"\N{LATIN SMALL LETTER T}" \
u"\N{LATIN SMALL LETTER E}" \
u"\N{SPACE}" \
u"\N{LATIN SMALL LETTER T}" \
u"\N{LATIN SMALL LETTER H}" \
u"\N{LATIN SMALL LETTER E}" \
u"\N{SpAcE}" \
u"\N{LATIN SMALL LETTER S}" \
u"\N{LATIN SMALL LETTER H}" \
u"\N{LATIN SMALL LETTER E}" \
u"\N{LATIN SMALL LETTER E}" \
u"\N{LATIN SMALL LETTER P}" \
u"\N{FULL STOP}"
verify(s == u"The rEd fOx ate the sheep.", s)
"""
except UnicodeError, v:
print v
print "done."
import unicodedata
print "Testing name to code mapping....",
for char in "SPAM":
name = "LATIN SMALL LETTER %s" % char
code = unicodedata.lookup(name)
verify(unicodedata.name(code) == name)
print "done."
print "Testing code to name mapping for all characters....",
count = 0
for code in range(65536):
try:
char = unichr(code)
name = unicodedata.name(char)
verify(unicodedata.lookup(name) == char)
count += 1
except (KeyError, ValueError):
pass
print "done."
print "Found", count, "characters in the unicode name database"
# misc. symbol testing
print "Testing misc. symbols for unicode character name expansion....",
exec r"""
verify(u"\N{PILCROW SIGN}" == u"\u00b6")
verify(u"\N{REPLACEMENT CHARACTER}" == u"\uFFFD")
verify(u"\N{HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK}" == u"\uFF9F")
verify(u"\N{FULLWIDTH LATIN SMALL LETTER A}" == u"\uFF41")
"""
print "done."
# strict error testing:
print "Testing unicode character name expansion strict error handling....",
try:
unicode("\N{blah}", 'unicode-escape', 'strict')
except UnicodeError:
pass
else:
raise AssertionError, "failed to raise an exception when given a bogus character name"
try:
unicode("\N{" + "x" * 100000 + "}", 'unicode-escape', 'strict')
except UnicodeError:
pass
else:
raise AssertionError, "failed to raise an exception when given a very " \
"long bogus character name"
try:
unicode("\N{SPACE", 'unicode-escape', 'strict')
except UnicodeError:
pass
else:
raise AssertionError, "failed to raise an exception for a missing closing brace."
try:
unicode("\NSPACE", 'unicode-escape', 'strict')
except UnicodeError:
pass
else:
raise AssertionError, "failed to raise an exception for a missing opening brace."
print "done."
| gpl-3.0 |
damiansoriano/odoo | addons/account/wizard/account_report_partner_balance.py | 364 | 2199 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_partner_balance(osv.osv_memory):
"""
This wizard will provide the partner balance report by periods, between any two dates.
"""
_inherit = 'account.common.partner.report'
_name = 'account.partner.balance'
_description = 'Print Account Partner Balance'
_columns = {
'display_partner': fields.selection([('non-zero_balance', 'With balance is not equal to 0'), ('all', 'All Partners')]
,'Display Partners'),
'journal_ids': fields.many2many('account.journal', 'account_partner_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'display_partner': 'non-zero_balance',
}
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['display_partner'])[0])
return self.pool['report'].get_action(cr, uid, [], 'account.report_partnerbalance', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
waelrash1/or-tools | examples/python/jobshop_ft06_distance.py | 32 | 3927 | # Copyright 2010-2014 Google
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This model implements a simple jobshop named ft06.
A jobshop is a standard scheduling problem when you must sequence a
series of tasks on a set of machines. Each job contains one task per
machine. The order of execution and the length of each job on each
machine is task dependent.
The objective is to minimize the maximum completion time of all
jobs. This is called the makespan.
"""
from google.apputils import app
import gflags
from ortools.constraint_solver import pywrapcp
FLAGS = gflags.FLAGS
class Dist:
def __init__(self):
pass
def distance(self, x, y):
return abs(x - y)
def main(unused_argv):
# Creates the solver.
solver = pywrapcp.Solver('jobshop ft06')
machines_count = 6
jobs_count = 6
all_machines = range(0, machines_count)
all_jobs = range(0, jobs_count)
durations = [[1, 3, 6, 7, 3, 6],
[8, 5, 10, 10, 10, 4],
[5, 4, 8, 9, 1, 7],
[5, 5, 5, 3, 8, 9],
[9, 3, 5, 4, 3, 1],
[3, 3, 9, 10, 4, 1]]
machines = [[2, 0, 1, 3, 5, 4],
[1, 2, 4, 5, 0, 3],
[2, 3, 5, 0, 1, 4],
[1, 0, 2, 3, 4, 5],
[2, 1, 4, 5, 0, 3],
[1, 3, 5, 0, 4, 2]]
# Computes horizon dynamically.
horizon = sum([sum(durations[i]) for i in all_jobs])
# Creates jobs.
all_tasks = {}
for i in all_jobs:
for j in all_machines:
all_tasks[(i, j)] = solver.FixedDurationIntervalVar(0,
horizon,
durations[i][j],
False,
'Job_%i_%i' % (i, j))
# Creates sequence variables and add disjuctive constraints.
all_sequences = {}
all_transitions = []
for i in all_machines:
machines_jobs = []
for j in all_jobs:
for k in all_machines:
if machines[j][k] == i:
machines_jobs.append(all_tasks[(j, k)])
disj = solver.DisjunctiveConstraint(machines_jobs, 'machine %i' % i)
distance_obj = Dist()
distance_callback = distance_obj.distance
# Store all instances of the distance callbacks to have the same
# life cycle as the solver.
all_transitions.append(distance_callback)
disj.SetTransitionTime(distance_callback)
all_sequences[i] = disj.SequenceVar()
solver.Add(disj)
# Makespan objective.
obj_var = solver.Max([all_tasks[(i, machines_count - 1)].EndExpr()
for i in all_jobs])
objective = solver.Minimize(obj_var, 1)
# Precedences inside a job.
for i in all_jobs:
for j in range(0, machines_count - 1):
solver.Add(all_tasks[(i, j + 1)].StartsAfterEnd(all_tasks[(i, j)]))
# Creates search phases.
vars_phase = solver.Phase([obj_var],
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
sequence_phase = solver.Phase([all_sequences[i] for i in all_machines],
solver.SEQUENCE_DEFAULT)
main_phase = solver.Compose([sequence_phase, vars_phase])
# Creates the search log.
search_log = solver.SearchLog(100, obj_var)
# Solves the problem.
solver.Solve(main_phase, [search_log, objective])
if __name__ == '__main__':
app.run()
| apache-2.0 |
oliverlee/sympy | sympy/core/tests/test_trace.py | 99 | 2825 | from sympy import symbols, Matrix, Tuple
from sympy.core.trace import Tr
from sympy.utilities.pytest import raises
def test_trace_new():
a, b, c, d, Y = symbols('a b c d Y')
A, B, C, D = symbols('A B C D', commutative=False)
assert Tr(a + b) == a + b
assert Tr(A + B) == Tr(A) + Tr(B)
#check trace args not implicitly permuted
assert Tr(C*D*A*B).args[0].args == (C, D, A, B)
# check for mul and adds
assert Tr((a*b) + ( c*d)) == (a*b) + (c*d)
# Tr(scalar*A) = scalar*Tr(A)
assert Tr(a*A) == a*Tr(A)
assert Tr(a*A*B*b) == a*b*Tr(A*B)
# since A is symbol and not commutative
assert isinstance(Tr(A), Tr)
#POW
assert Tr(pow(a, b)) == a**b
assert isinstance(Tr(pow(A, a)), Tr)
#Matrix
M = Matrix([[1, 1], [2, 2]])
assert Tr(M) == 3
##test indices in different forms
#no index
t = Tr(A)
assert t.args[1] == Tuple()
#single index
t = Tr(A, 0)
assert t.args[1] == Tuple(0)
#index in a list
t = Tr(A, [0])
assert t.args[1] == Tuple(0)
t = Tr(A, [0, 1, 2])
assert t.args[1] == Tuple(0, 1, 2)
#index is tuple
t = Tr(A, (0))
assert t.args[1] == Tuple(0)
t = Tr(A, (1, 2))
assert t.args[1] == Tuple(1, 2)
#trace indices test
t = Tr((A + B), [2])
assert t.args[0].args[1] == Tuple(2) and t.args[1].args[1] == Tuple(2)
t = Tr(a*A, [2, 3])
assert t.args[1].args[1] == Tuple(2, 3)
#class with trace method defined
#to simulate numpy objects
class Foo:
def trace(self):
return 1
assert Tr(Foo()) == 1
#argument test
# check for value error, when either/both arguments are not provided
raises(ValueError, lambda: Tr())
raises(ValueError, lambda: Tr(A, 1, 2))
def test_trace_doit():
a, b, c, d = symbols('a b c d')
A, B, C, D = symbols('A B C D', commutative=False)
#TODO: needed while testing reduced density operations, etc.
def test_permute():
A, B, C, D, E, F, G = symbols('A B C D E F G', commutative=False)
t = Tr(A*B*C*D*E*F*G)
assert t.permute(0).args[0].args == (A, B, C, D, E, F, G)
assert t.permute(2).args[0].args == (F, G, A, B, C, D, E)
assert t.permute(4).args[0].args == (D, E, F, G, A, B, C)
assert t.permute(6).args[0].args == (B, C, D, E, F, G, A)
assert t.permute(8).args[0].args == t.permute(1).args[0].args
assert t.permute(-1).args[0].args == (B, C, D, E, F, G, A)
assert t.permute(-3).args[0].args == (D, E, F, G, A, B, C)
assert t.permute(-5).args[0].args == (F, G, A, B, C, D, E)
assert t.permute(-8).args[0].args == t.permute(-1).args[0].args
t = Tr((A + B)*(B*B)*C*D)
assert t.permute(2).args[0].args == (C, D, (A + B), (B**2))
t1 = Tr(A*B)
t2 = t1.permute(1)
assert id(t1) != id(t2) and t1 == t2
| bsd-3-clause |
kalxas/QGIS | python/core/contextmanagers.py | 45 | 2189 | # -*- coding: utf-8 -*-
"""
***************************************************************************
contextmanagers.py
---------------------
Date : May 2014
Copyright : (C) 2014 by Nathan Woodrow
Email : woodrow dot nathan at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nathan Woodrow'
__date__ = 'May 2014'
__copyright__ = '(C) 2014, Nathan Woodrow'
import sys
from contextlib import contextmanager
from qgis.core import QgsApplication
@contextmanager
def qgisapp(args=None, guienabled=True, configpath=None, sysexit=True):
"""
Create a new QGIS Qt application.
You should use this before creating any Qt widgets or QGIS objects for
your custom QGIS based application.
usage:
from qgis.core.contextmanagers import qgisapp
def main(app):
# Run your main code block
with qgisapp(sys.argv) as app:
main(app)
args - args passed to the underlying QApplication.
guienabled - True by default will create a QApplication with a GUI. Pass
False if you wish to create no GUI based app, e.g a server app.
configpath - Custom config path QGIS will use to load settings.
sysexit - Call sys.exit on app exit. True by default.
"""
if not args:
args = []
app = QgsApplication(args, guienabled, configpath)
QgsApplication.initQgis()
yield app
if guienabled:
exitcode = app.exec_()
else:
exitcode = 0
QgsApplication.exitQgis()
if sysexit:
sys.exit(exitcode)
| gpl-2.0 |
GauravSahu/odoo | addons/base_import/__openerp__.py | 317 | 1227 | {
'name': 'Base import',
'description': """
New extensible file import for OpenERP
======================================
Re-implement openerp's file import system:
* Server side, the previous system forces most of the logic into the
client which duplicates the effort (between clients), makes the
import system much harder to use without a client (direct RPC or
other forms of automation) and makes knowledge about the
import/export system much harder to gather as it is spread over
3+ different projects.
* In a more extensible manner, so users and partners can build their
own front-end to import from other file formats (e.g. OpenDocument
files) which may be simpler to handle in their work flow or from
their data production sources.
* In a module, so that administrators and users of OpenERP who do not
need or want an online import can avoid it being available to users.
""",
'category': 'Uncategorized',
'website': 'https://www.odoo.com',
'author': 'OpenERP SA',
'depends': ['web'],
'installable': True,
'auto_install': True,
'data': [
'security/ir.model.access.csv',
'views/base_import.xml',
],
'qweb': ['static/src/xml/import.xml'],
}
| agpl-3.0 |
ishank08/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
rastrexando-eu/rastrexando-eu | core/migrations/0034_auto_20170424_2055.py | 1 | 1367 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-24 18:55
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0033_auto_20170423_2022'),
]
operations = [
migrations.CreateModel(
name='LeagueClasification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('year', models.PositiveIntegerField(verbose_name='Ano')),
('clasification', django.contrib.postgres.fields.jsonb.JSONField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='teammatch',
name='points',
field=models.PositiveIntegerField(default=0, verbose_name='Puntos'),
preserve_default=False,
),
]
| gpl-3.0 |
cyberark-bizdev/ansible | lib/ansible/modules/database/misc/redis.py | 35 | 9782 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redis
short_description: Various redis commands, slave and flush
description:
- Unified utility to interact with redis instances.
version_added: "1.3"
options:
command:
description:
- The selected redis command
- C(config) (new in 1.6), ensures a configuration setting on an instance.
- C(flush) flushes all the instance or a specified db.
- C(slave) sets a redis instance in slave or master mode.
required: true
choices: [ config, flush, slave ]
login_password:
description:
- The password used to authenticate with (usually not used)
login_host:
description:
- The host running the database
default: localhost
login_port:
description:
- The port to connect to
default: 6379
master_host:
description:
- The host of the master instance [slave command]
default: null
master_port:
description:
- The port of the master instance [slave command]
slave_mode:
description:
- the mode of the redis instance [slave command]
default: slave
choices: [ master, slave ]
db:
description:
- The database to flush (used in db mode) [flush command]
flush_mode:
description:
- Type of flush (all the dbs in a redis instance or a specific one)
[flush command]
default: all
choices: [ all, db ]
name:
description:
- A redis config key.
version_added: 1.6
value:
description:
- A redis config value.
version_added: 1.6
notes:
- Requires the redis-py Python package on the remote host. You can
install it with pip (pip install redis) or with a package manager.
https://github.com/andymccurdy/redis-py
- If the redis master instance we are making slave of is password protected
this needs to be in the redis.conf in the masterauth variable
requirements: [ redis ]
author: "Xabier Larrakoetxea (@slok)"
'''
EXAMPLES = '''
- name: Set local redis instance to be slave of melee.island on port 6377
redis:
command: slave
master_host: melee.island
master_port: 6377
- name: Deactivate slave mode
redis:
command: slave
slave_mode: master
- name: Flush all the redis db
redis:
command: flush
flush_mode: all
- name: Flush only one db in a redis instance
redis:
command: flush
db: 1
flush_mode: db
- name: Configure local redis to have 10000 max clients
redis:
command: config
name: maxclients
value: 10000
- name: Configure local redis to have lua time limit of 100 ms
redis:
command: config
name: lua-time-limit
value: 100
'''
import traceback
try:
import redis
except ImportError:
redis_found = False
else:
redis_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
# Redis module specific support methods.
def set_slave_mode(client, master_host, master_port):
try:
return client.slaveof(master_host, master_port)
except Exception:
return False
def set_master_mode(client):
try:
return client.slaveof()
except Exception:
return False
def flush(client, db=None):
try:
if not isinstance(db, int):
return client.flushall()
else:
# The passed client has been connected to the database already
return client.flushdb()
except Exception:
return False
# Module execution.
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(type='str', choices=['config', 'flush', 'slave']),
login_password=dict(type='str', no_log=True),
login_host=dict(type='str', default='localhost'),
login_port=dict(type='int', default=6379),
master_host=dict(type='str'),
master_port=dict(type='int'),
slave_mode=dict(type='str', default='slave', choices=['master', 'slave']),
db=dict(type='int'),
flush_mode=dict(type='str', default='all', choices=['all', 'db']),
name=dict(type='str'),
value=dict(type='str')
),
supports_check_mode=True,
)
if not redis_found:
module.fail_json(msg="python redis module is required")
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
command = module.params['command']
# Slave Command section -----------
if command == "slave":
master_host = module.params['master_host']
master_port = module.params['master_port']
mode = module.params['slave_mode']
# Check if we have all the data
if mode == "slave": # Only need data if we want to be slave
if not master_host:
module.fail_json(msg='In slave mode master host must be provided')
if not master_port:
module.fail_json(msg='In slave mode master port must be provided')
# Connect and check
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
# Check if we are already in the mode that we want
info = r.info()
if mode == "master" and info["role"] == "master":
module.exit_json(changed=False, mode=mode)
elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
status = dict(
status=mode,
master_host=master_host,
master_port=master_port,
)
module.exit_json(changed=False, mode=status)
else:
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "slave":
if module.check_mode or\
set_slave_mode(r, master_host, master_port):
info = r.info()
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=True, mode=status)
else:
module.fail_json(msg='Unable to set slave mode')
else:
if module.check_mode or set_master_mode(r):
module.exit_json(changed=True, mode=mode)
else:
module.fail_json(msg='Unable to set master mode')
# flush Command section -----------
elif command == "flush":
db = module.params['db']
mode = module.params['flush_mode']
# Check if we have all the data
if mode == "db":
if db is None:
module.fail_json(msg="In db mode the db number must be provided")
# Connect and check
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "all":
if module.check_mode or flush(r):
module.exit_json(changed=True, flushed=True)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush all databases")
else:
if module.check_mode or flush(r, db):
module.exit_json(changed=True, flushed=True, db=db)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush '%d' database" % db)
elif command == 'config':
name = module.params['name']
value = module.params['value']
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
try:
old_value = r.config_get(name)[name]
except Exception as e:
module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
changed = old_value != value
if module.check_mode or not changed:
module.exit_json(changed=changed, name=name, value=value)
else:
try:
r.config_set(name, value)
except Exception as e:
module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, name=name, value=value)
else:
module.fail_json(msg='A valid command must be provided')
if __name__ == '__main__':
main()
| gpl-3.0 |
Starbow/StarbowWebSite | starbowmodweb/ladder/views.py | 1 | 5026 | from django.shortcuts import render
from django import db
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from starbowmodweb.ladder.forms import CrashReportForm, CrashReport
from starbowmodweb.ladder.helpers import get_matchhistory
from starbowmodweb.ladder.models import Client, REGION_LOOKUP
from starbowmodweb import utils
from datetime import datetime, timedelta
import json
def show_player(request, client_id):
client_id = int(client_id)
matches = get_matchhistory(client_id)
try:
client = Client.objects.select_related().get(pk=client_id)
return render(request, 'ladder/player.html', dict(client=client, matches=matches))
except Client.DoesNotExist:
return render(request, 'ladder/player_not_found.html', dict(client_id=client_id))
@login_required
def crash_report(request):
if request.method == 'POST':
report = CrashReport(user=request.user)
form = CrashReportForm(request.POST, request.FILES, instance=report)
if form.is_valid():
form.save()
return render(request, 'ladder/crash_report_success.html', dict(report=report))
else:
form = CrashReportForm()
return render(request, 'ladder/crash_report_submit.html', dict(form=form))
class LeaderboardDatatable(utils.DatatableQuery):
COLUMN_LOOKUP = dict(
username='clients.username',
rank='rank',
division='division',
clientid="clients.id as clientid",
ladder_points='stats.ladder_points',
ladder_wins='(stats.ladder_wins-stats.ladder_walkovers) as ladder_wins',
ladder_losses='(stats.ladder_losses-stats.ladder_forefeits) as ladder_losses',
ladder_forfeits='stats.ladder_forefeits as ladder_forfeits',
ladder_walkovers='stats.ladder_walkovers',
)
def tables(self, params):
if 'region' in self.args:
params.append(int(self.args['region']))
return """(SELECT (@rank:=@rank+1) as rank, tmp.* FROM (
SELECT divisions.name as division, client_region_stats.*
FROM client_region_stats, divisions
WHERE region = %s
AND division_id = divisions.id
AND placement_matches_remaining = 0
ORDER BY divisions.ladder_group DESC, ladder_points DESC) as tmp
) as stats, clients"""
else:
return """(SELECT (@rank:=@rank+1) as rank, tmp.* FROM (
SELECT divisions.name as division, clients.id as client_id, clients.*
FROM clients, divisions
WHERE division_id = divisions.id
AND placement_matches_remaining = 0
ORDER BY divisions.ladder_group DESC, ladder_points DESC) as tmp
) as stats, clients"""
def where(self, params):
return "stats.client_id = clients.id"
def execute(self, cursor):
cursor.execute("SET @rank:=0")
return utils.DatatableQuery.execute(self, cursor)
def datatable_leaderboard(request):
cursor = db.connection.cursor()
data = LeaderboardDatatable(request.GET).execute(cursor)
return HttpResponse(json.dumps(data), mimetype='application/json')
def show_global(request):
start_date = datetime.utcnow()-timedelta(seconds=7*86400)
cursor = db.connection.cursor()
global_stats_query = """
select sum(race='zerg')/count(*) as zerg,
sum(race='protoss')/count(*) as protoss,
sum(race='terran')/count(*) as terran,
count(distinct matchid) as matches,
count(distinct clientid) as players
from match_result_players, match_results
where match_results.id=matchid
AND FROM_UNIXTIME(datetime) > %s
"""
cursor.execute(global_stats_query, [start_date])
global_stats = utils.dictfetchall(cursor)[0]
return render(request, 'ladder/global.html', dict(global_stats=global_stats))
def show_region(request, region):
region_id = REGION_LOOKUP[region.upper()]
start_date = datetime.utcnow()-timedelta(seconds=7*86400)
cursor = db.connection.cursor()
region_stats_query = """
select sum(race='zerg')/count(*) as zerg,
sum(race='protoss')/count(*) as protoss,
sum(race='terran')/count(*) as terran,
count(distinct matchid) as matches,
count(distinct clientid) as players
from match_result_players, match_results
where match_results.id=matchid
AND region=%s
AND FROM_UNIXTIME(datetime) > %s
"""
cursor.execute(region_stats_query, [region_id, start_date])
region_stats = utils.dictfetchall(cursor)[0]
return render(request, 'ladder/region.html', dict(region_str=region.upper(), region=REGION_LOOKUP[region.upper()], region_stats=region_stats))
| mit |
burzillibus/RobHome | venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.py | 335 | 14810 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Class representing the list of files in a distribution.
Equivalent to distutils.filelist, but fixes some problems.
"""
import fnmatch
import logging
import os
import re
import sys
from . import DistlibException
from .compat import fsdecode
from .util import convert_path
__all__ = ['Manifest']
logger = logging.getLogger(__name__)
# a \ followed by some spaces + EOL
_COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M)
_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
#
# Due to the different results returned by fnmatch.translate, we need
# to do slightly different processing for Python 2.7 and 3.2 ... this needed
# to be brought in for Python 3.6 onwards.
#
_PYTHON_VERSION = sys.version_info[:2]
class Manifest(object):
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
"""
def __init__(self, base=None):
"""
Initialise an instance.
:param base: The base directory to explore under.
"""
self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
self.prefix = self.base + os.sep
self.allfiles = None
self.files = set()
#
# Public API
#
def findall(self):
"""Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found.
"""
from stat import S_ISREG, S_ISDIR, S_ISLNK
self.allfiles = allfiles = []
root = self.base
stack = [root]
pop = stack.pop
push = stack.append
while stack:
root = pop()
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat.st_mode
if S_ISREG(mode):
allfiles.append(fsdecode(fullname))
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname)
def add(self, item):
"""
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
"""
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item))
def add_many(self, items):
"""
Add a list of files to the manifest.
:param items: The pathnames to add. These can be relative to the base.
"""
for item in items:
self.add(item)
def sorted(self, wantdirs=False):
"""
Return sorted files in directory order
"""
def add_dir(dirs, d):
dirs.add(d)
logger.debug('add_dir added %s', d)
if d != self.base:
parent, _ = os.path.split(d)
assert parent not in ('', '/')
add_dir(dirs, parent)
result = set(self.files) # make a copy!
if wantdirs:
dirs = set()
for f in result:
add_dir(dirs, os.path.dirname(f))
result |= dirs
return [os.path.join(*path_tuple) for path_tuple in
sorted(os.path.split(path) for path in result)]
def clear(self):
"""Clear all collected files."""
self.files = set()
self.allfiles = []
def process_directive(self, directive):
"""
Process a directive which either adds some files from ``allfiles`` to
``files``, or removes some files from ``files``.
:param directive: The directive to process. This should be in a format
compatible with distutils ``MANIFEST.in`` files:
http://docs.python.org/distutils/sourcedist.html#commands
"""
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dirpattern).
action, patterns, thedir, dirpattern = self._parse_directive(directive)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=True):
logger.warning('no files found matching %r', pattern)
elif action == 'exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=True)
#if not found:
# logger.warning('no previously-included files '
# 'found matching %r', pattern)
elif action == 'global-include':
for pattern in patterns:
if not self._include_pattern(pattern, anchor=False):
logger.warning('no files found matching %r '
'anywhere in distribution', pattern)
elif action == 'global-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, anchor=False)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found anywhere in '
# 'distribution', pattern)
elif action == 'recursive-include':
for pattern in patterns:
if not self._include_pattern(pattern, prefix=thedir):
logger.warning('no files found matching %r '
'under directory %r', pattern, thedir)
elif action == 'recursive-exclude':
for pattern in patterns:
found = self._exclude_pattern(pattern, prefix=thedir)
#if not found:
# logger.warning('no previously-included files '
# 'matching %r found under directory %r',
# pattern, thedir)
elif action == 'graft':
if not self._include_pattern(None, prefix=dirpattern):
logger.warning('no directories found matching %r',
dirpattern)
elif action == 'prune':
if not self._exclude_pattern(None, prefix=dirpattern):
logger.warning('no previously-included directories found '
'matching %r', dirpattern)
else: # pragma: no cover
# This should never happen, as it should be caught in
# _parse_template_line
raise DistlibException(
'invalid action %r' % action)
#
# Private API
#
def _parse_directive(self, directive):
"""
Validate a directive.
:param directive: The directive to validate.
:return: A tuple of action, patterns, thedir, dir_patterns
"""
words = directive.split()
if len(words) == 1 and words[0] not in ('include', 'exclude',
'global-include',
'global-exclude',
'recursive-include',
'recursive-exclude',
'graft', 'prune'):
# no action given, let's use the default 'include'
words.insert(0, 'include')
action = words[0]
patterns = thedir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistlibException(
'%r expects <pattern1> <pattern2> ...' % action)
patterns = [convert_path(word) for word in words[1:]]
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistlibException(
'%r expects <dir> <pattern1> <pattern2> ...' % action)
thedir = convert_path(words[1])
patterns = [convert_path(word) for word in words[2:]]
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistlibException(
'%r expects a single <dir_pattern>' % action)
dir_pattern = convert_path(words[1])
else:
raise DistlibException('unknown action %r' % action)
return action, patterns, thedir, dir_pattern
def _include_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern.
Patterns are not quite the same as implemented by the 'fnmatch'
module: '*' and '?' match non-special characters, where "special"
is platform-dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return True if files are found.
"""
# XXX docstring lying about what the special chars are?
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.files.add(name)
found = True
return found
def _exclude_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Remove strings (presumably filenames) from 'files' that match
'pattern'.
Other parameters are the same as for 'include_pattern()', above.
The list 'self.files' is modified in place. Return True if files are
found.
This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
packaging source distributions
"""
found = False
pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
for f in list(self.files):
if pattern_re.search(f):
self.files.remove(f)
found = True
return found
def _translate_pattern(self, pattern, anchor=True, prefix=None,
is_regex=False):
"""Translate a shell-like wildcard pattern to a compiled regular
expression.
Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
if _PYTHON_VERSION > (3, 2):
# ditch start and end characters
start, _, end = self._glob_to_re('_').partition('_')
if pattern:
pattern_re = self._glob_to_re(pattern)
if _PYTHON_VERSION > (3, 2):
assert pattern_re.startswith(start) and pattern_re.endswith(end)
else:
pattern_re = ''
base = re.escape(os.path.join(self.base, ''))
if prefix is not None:
# ditch end of pattern character
if _PYTHON_VERSION <= (3, 2):
empty_pattern = self._glob_to_re('')
prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
else:
prefix_re = self._glob_to_re(prefix)
assert prefix_re.startswith(start) and prefix_re.endswith(end)
prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
sep = os.sep
if os.sep == '\\':
sep = r'\\'
if _PYTHON_VERSION <= (3, 2):
pattern_re = '^' + base + sep.join((prefix_re,
'.*' + pattern_re))
else:
pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,
pattern_re, end)
else: # no prefix -- respect anchor flag
if anchor:
if _PYTHON_VERSION <= (3, 2):
pattern_re = '^' + base + pattern_re
else:
pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])
return re.compile(pattern_re)
def _glob_to_re(self, pattern):
"""Translate a shell-like glob pattern to a regular expression.
Return a string containing the regex. Differs from
'fnmatch.translate()' in that '*' does not match "special characters"
(which are platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = r'\1[^%s]' % sep
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re
| mit |
raajitr/django_hangman | env/lib/python2.7/site-packages/django/test/html.py | 59 | 8064 | """
Comparing two html documents.
"""
from __future__ import unicode_literals
import re
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html_parser import HTMLParseError, HTMLParser
WHITESPACE = re.compile(r'\s+')
def normalize_whitespace(string):
return WHITESPACE.sub(' ', string)
@python_2_unicode_compatible
class Element(object):
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, six.string_types):
element = force_text(element)
element = normalize_whitespace(element)
if self.children:
if isinstance(self.children[-1], six.string_types):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], six.string_types):
if self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children:
if isinstance(children[-1], six.string_types):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, six.string_types):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name'):
return False
if hasattr(element, 'name') and self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
if self.children != element.children:
return False
return True
def __hash__(self):
return hash((self.name,) + tuple(a for a in self.attributes))
def __ne__(self, element):
return not self.__eq__(element)
def _count(self, element, count=True):
if not isinstance(element, six.string_types):
if self == element:
return 1
if isinstance(element, RootElement):
if self.children == element.children:
return 1
i = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, six.string_types):
if isinstance(element, six.string_types):
if count:
i += child.count(element)
elif element in child:
return 1
else:
i += child._count(element, count=count)
if not count and i:
return i
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(six.text_type(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += ' />'
return output
def __repr__(self):
return six.text_type(self)
@python_2_unicode_compatible
class RootElement(Element):
def __init__(self):
super(RootElement, self).__init__(None, ())
def __str__(self):
return ''.join(six.text_type(c) for c in self.children)
class Parser(HTMLParser):
SELF_CLOSING_TAGS = (
'br', 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base',
'col',
)
def __init__(self):
HTMLParser.__init__(self)
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, " ".join(sorted(value.split(" "))))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def handle_charref(self, name):
self.current.append('&%s;' % name)
def handle_entityref(self, name):
self.current.append('&%s;' % name)
def parse_html(html):
"""
Takes a string that contains *valid* HTML and turns it into a Python object
structure that can be easily compared against other HTML on semantic
equivalence. Syntactical differences like which quotation is used on
arguments will be ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1:
if not isinstance(document.children[0], six.string_types):
document = document.children[0]
return document
| mit |
rhyolight/nupic.son | app/soc/mapreduce/gci_insert_dummy_data.py | 1 | 1438 | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(bool, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
| apache-2.0 |
yiliaofan/faker | faker/providers/person/lt_LT/__init__.py | 19 | 3334 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{last_name}}, {{first_name}}'
)
first_names = (
'Tomas', 'Lukas', 'Mantas', 'Deividas', 'Arnas', 'Artūras',
'Karolis', 'Dovydas', 'Dominykas', 'Darius', 'Edvinas', 'Jonas',
'Martynas', 'Kajus', 'Donatas', 'Andrius', 'Matas', 'Rokas',
'Augustas', 'Danielius', 'Mindaugas', 'Paulius', 'Marius',
'Armandas', 'Edgaras', 'Jokūbas', 'Nedas', 'Tadas', 'Nerijus',
'Simonas', 'Vytautas', 'Artūras', 'Robertas', 'Eimantas', 'Arijus',
'Nojus', 'Egidijus', 'Aurimas', 'Emilis', 'Laurynas', 'Edvardas',
'Joris', 'Pijus', 'Erikas', 'Domas', 'Vilius', 'Evaldas', 'Justinas',
'Aleksandras', 'Kristupas', 'Gabrielius', 'Benas', 'Gytis', 'Arminas',
'Vakris', 'Tautvydas', 'Domantas', 'Justas', 'Markas', 'Antanas',
'Arūnas', 'Ernestas', 'Aronas', 'Vaidas', 'Ąžuolas', 'Titas', 'Giedrius',
'Ignas', 'Povilas', 'Saulius', 'Julius', 'Arvydas', 'Kęstutis', 'Rytis',
'Aistis', 'Gediminas', 'Algirdas', 'Naglis', 'Irmantas', 'Rolandas',
'Aivaras', 'Simas', 'Faustas', 'Ramūnas', 'Šarūnas', 'Gustas', 'Tajus',
'Dainius', 'Arnoldas', 'Linas', 'Rojus', 'Adomas', 'Žygimantas',
'Ričardas', 'Orestas', 'Kipras', 'Juozas', 'Audrius', 'Romualdas',
'Petras', 'Eleonora', 'Raminta', 'Dovilė', 'Sandra', 'Dominyka', 'Ana',
'Erika', 'Kristina', 'Gintarė', 'Rūta', 'Edita', 'Karina', 'Živilė',
'Jolanta', 'Radvilė', 'Ramunė', 'Svetlana', 'Ugnė', 'Eglė', 'Viktorija',
'Justina', 'Brigita', 'Rasa', 'Marija', 'Giedrė', 'Iveta', 'Sonata',
'Vitalija', 'Adrija', 'Goda', 'Paulina', 'Kornelija', 'Liepa', 'Vakarė',
'Milda', 'Meda', 'Vaida', 'Izabelė', 'Jovita', 'Irma', 'Žemyna', 'Leila',
'Rimantė', 'Mantė', 'Rytė', 'Perla', 'Greta', 'Monika', 'Ieva', 'Indrė',
'Ema', 'Aurelija', 'Smiltė', 'Ingrida', 'Simona', 'Amelija', 'Sigita',
'Olivija', 'Laurita', 'Jorūnė', 'Leticija', 'Vigilija', 'Medėja', 'Laura',
'Agnė', 'Evelina', 'Kotryna', 'Lėja', 'Aušra', 'Neringa', 'Gerda',
'Jurgita', 'Rusnė', 'Aušrinė', 'Rita', 'Elena', 'Ineta', 'Ligita',
'Vasarė', 'Vėjūnė', 'Ignė', 'Gytė', 'Ariana', 'Arielė', 'Vytė', 'Eidvilė',
'Karolina', 'Miglė', 'Viltė', 'Jolanta', 'Enrika', 'Aurėja', 'Vanesa',
'Darija', 'Reda', 'Milana', 'Rugilė', 'Diana'
)
last_names = (
'Kazlauskas', 'Jankauskas', 'Petrauskas', 'Pocius', 'Stankevičius',
'Vsiliauskas', 'Žukauskas', 'Butkus', 'Paulauskas', 'Urbonas',
'Kavaliauskas', 'Sakalauskas', 'Žukauskas', 'Akelis' ,'Ambrasas',
'Kairys', 'Kalvaitis', 'Kalvelis', 'Kalvėnas', 'Kaupas', 'Kiška',
'Gagys', 'Gailius', 'Gailys', 'Gaižauskas', 'Gaičiūnas', 'Galdikas',
'Gintalas', 'Ginzburgas', 'Grinius', 'Gronskis', 'Nagys', 'Naujokas',
'Narušis', 'Nausėda', 'Poška', 'Povilonis'
)
| mit |
madhurauti/Map-Polygon | modules/tests/staff/create_staff_job_role.py | 2 | 2075 | """ Sahana Eden Automated Test - HRM001 Create Job Role
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateStaffJobRole(SeleniumUnitTest):
def test_hrm001_create_staff_job_role(self):
"""
@case: HRM001
@description: Create a Job Role
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
self.login(account="admin", nexturl="hrm/job_role/create")
self.create("hrm_job_role",
[( "name",
"Programme Coordinator"
),
( "comments",
"Comment/Description of the role job goes here."),
]
)
| mit |
jeffrey4l/nova | nova/openstack/common/report/views/text/threading.py | 39 | 2473 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides thread and stack-trace views
This module provides a collection of views for
visualizing threads, green threads, and stack traces
in human-readable form.
"""
from nova.openstack.common.report.views import jinja_view as jv
class StackTraceView(jv.JinjaView):
"""A Stack Trace View
This view displays stack trace models defined by
:class:`openstack.common.report.models.threading.StackTraceModel`
"""
VIEW_TEXT = (
"{% if root_exception is not none %}"
"Exception: {{ root_exception }}\n"
"------------------------------------\n"
"\n"
"{% endif %}"
"{% for line in lines %}\n"
"{{ line.filename }}:{{ line.line }} in {{ line.name }}\n"
" {% if line.code is not none %}"
"`{{ line.code }}`"
"{% else %}"
"(source not found)"
"{% endif %}\n"
"{% else %}\n"
"No Traceback!\n"
"{% endfor %}"
)
class GreenThreadView(object):
"""A Green Thread View
This view displays a green thread provided by the data
model :class:`openstack.common.report.models.threading.GreenThreadModel`
"""
FORMAT_STR = "------{thread_str: ^60}------" + "\n" + "{stack_trace}"
def __call__(self, model):
return self.FORMAT_STR.format(
thread_str=" Green Thread ",
stack_trace=model.stack_trace
)
class ThreadView(object):
"""A Thread Collection View
This view displays a python thread provided by the data
model :class:`openstack.common.report.models.threading.ThreadModel` # noqa
"""
FORMAT_STR = "------{thread_str: ^60}------" + "\n" + "{stack_trace}"
def __call__(self, model):
return self.FORMAT_STR.format(
thread_str=" Thread #{0} ".format(model.thread_id),
stack_trace=model.stack_trace
)
| apache-2.0 |
aerophile/django | tests/migrations/test_writer.py | 10 | 19538 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import math
import os
import re
import tokenize
import unittest
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django.conf import settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.writer import (
MigrationWriter, OperationWriter, SettingsReference,
)
from django.test import SimpleTestCase, TestCase, ignore_warnings
from django.utils import datetime_safe, six
from django.utils._os import upath
from django.utils.deconstruct import deconstructible
from django.utils.timezone import FixedOffset, get_default_timezone, utc
from django.utils.translation import ugettext_lazy as _
from .models import FoodManager, FoodQuerySet
class TestModel1(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
writer = OperationWriter(operation)
writer.indentation = 0
buff, imports = writer.serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.TestOperation(\n'
'),'
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
writer = OperationWriter(operation)
writer.indentation = 0
buff, imports = writer.serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
'),'
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
writer = OperationWriter(operation)
writer.indentation = 0
buff, imports = writer.serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
'),'
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(1, 2, kwarg2=4)
writer = OperationWriter(operation)
writer.indentation = 0
buff, imports = writer.serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsKwargsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' kwarg2=4,\n'
'),'
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
writer = OperationWriter(operation)
writer.indentation = 0
buff, imports = writer.serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' 1,\n'
' 2,\n'
' ],\n'
'),'
)
class WriterTests(TestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
def safe_exec(self, string, value=None):
l = {}
try:
exec(string, globals(), l)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return l
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set())
)
def test_serialize_functions(self):
with six.assertRaisesRegex(self, ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2013, 12, 31, 22, 1, tzinfo=FixedOffset(180)))
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)",
{'import datetime', 'from django.utils.timezone import utc'},
)
)
def test_serialize_datetime_safe(self):
self.assertSerializedResultEqual(
datetime_safe.date(2014, 3, 31),
("datetime.date(2014, 3, 31)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime_safe.time(10, 25),
("datetime.time(10, 25)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime_safe.datetime(2014, 3, 31, 16, 4, 31),
("datetime.datetime(2014, 3, 31, 16, 4, 31)", {'import datetime'})
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"})
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
("models.TextField(blank=True, null=True)", {'from django.db import models'})
)
def test_serialize_settings(self):
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"})
)
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r'^\w+$', re.U)
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r'^\w+$', re.U))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$', 32))")
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r'^[0-9]+$', flags=re.U)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=32)")
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with six.assertRaisesRegex(self, ImportError, "No module named '?custom'?"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
@unittest.skipUnless(six.PY2, "Only applies on Python 2")
def test_serialize_direct_function_reference(self):
"""
Ticket #22436: You cannot use a function straight from its body
(e.g. define the method and use it in the same body)
"""
with self.assertRaises(ValueError):
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""
Neither py2 or py3 can serialize a reference in a local scope.
"""
class TestModel2(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaises(ValueError):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_local_function_reference_message(self):
"""
Make sure user is seeing which module/function is the issue
"""
class TestModel2(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with six.assertRaisesRegex(self, ValueError,
'^Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
# It should NOT be unicode.
self.assertIsInstance(output, six.binary_type, "Migration as_string returned unicode")
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
# In order to preserve compatibility with Python 3.2 unicode literals
# prefix shouldn't be added to strings.
tokens = tokenize.generate_tokens(six.StringIO(str(output)).readline)
for token_type, token_source, (srow, scol), __, line in tokens:
if token_type == tokenize.STRING:
self.assertFalse(
token_source.startswith('u'),
"Unicode literal prefix found at %d:%d: %r" % (
srow, scol, line.strip()
)
)
# Silence warning on Python 2: Not importing directory
# 'tests/migrations/migrations_test_apps/without_init_file/migrations':
# missing __init__.py
@ignore_warnings(category=ImportWarning)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(upath(__file__)))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_sorted_imports(self):
"""
#24155 - Tests ordering of imports.
"""
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
migrations.AddField("mymodel", "myfield", models.DateTimeField(
default=datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
)),
]
})
writer = MigrationWriter(migration)
output = writer.as_string().decode('utf-8')
self.assertIn(
"import datetime\n"
"from django.db import migrations, models\n"
"from django.utils.timezone import utc\n",
output
)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
class DeconstructableInstances(object):
def deconstruct(self):
return ('DeconstructableInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructableInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructableInstances)")
| bsd-3-clause |
marcelocure/django | tests/postgres_tests/fields.py | 302 | 1087 | """
Indirection layer for PostgreSQL-specific fields, so the tests don't fail when
run with a backend other than PostgreSQL.
"""
from django.db import models
try:
from django.contrib.postgres.fields import (
ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField,
FloatRangeField, HStoreField, IntegerRangeField, JSONField,
)
except ImportError:
class DummyArrayField(models.Field):
def __init__(self, base_field, size=None, **kwargs):
super(DummyArrayField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(DummyArrayField, self).deconstruct()
kwargs.update({
'base_field': '',
'size': 1,
})
return name, path, args, kwargs
ArrayField = DummyArrayField
BigIntegerRangeField = models.Field
DateRangeField = models.Field
DateTimeRangeField = models.Field
FloatRangeField = models.Field
HStoreField = models.Field
IntegerRangeField = models.Field
JSONField = models.Field
| bsd-3-clause |
xuyuhan/depot_tools | third_party/retry_decorator/decorators.py | 55 | 1565 | import time
from functools import wraps
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
| bsd-3-clause |
ncrocfer/radric | radric/generators.py | 1 | 9868 | # -*- coding: utf-8 -*-
import re
import os
import yaml
import logging
from slugify import slugify
from jinja2 import Environment, FileSystemLoader
from docutils.core import publish_parts
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from radric import rstdirective
from radric.post import Post
from radric.page import Page
from radric.exceptions import InvalidContentSyntax, FormatException
logger = logging.getLogger()
class BaseGenerator(object):
def __init__(self, settings, context):
self.settings = settings
self.context = context
theme_path = os.path.join(
self.settings['SOURCE_PATH'],
'themes',
self.settings['THEME']
)
self.env = Environment(loader=FileSystemLoader(theme_path))
def extract_meta(self, content):
r = re.compile('---(.*?)---(.*)', re.DOTALL)
src = r.search(content)
if not src:
raise InvalidContentSyntax()
metas = {}
metas = yaml.load(src.group(1))
return metas, src.group(2)
def generate_html(self, file):
name, ext = os.path.splitext(file.path)
if 'rst' in ext:
parts = publish_parts(file.plaintext, writer_name='html',
settings_overrides={'initial_header_level':2,})
return parts['html_body']
else:
raise FormatException()
def generate_file(self, writer, template, context, object=None, path=None):
template = self.env.get_template(template)
content = template.render(**context).encode('utf-8')
# If we pass an object (post, category, tag...), we dynamically create
# the path, otherwise we use the `path` parameter
if object:
try:
folders = object.folders
except AttributeError:
folders = object['folders']
endpoint = writer.create_folders(folders)
else:
endpoint = path
writer.write(endpoint, content)
class PostsGenerator(BaseGenerator):
def __init__(self, *args, **kwargs):
self.posts = list()
self.drafts = list()
self.categories = dict()
self.tags = dict()
self.authors = dict()
super(PostsGenerator, self).__init__(*args, **kwargs)
def process(self, reader):
files = reader.get_files()
for post_path in files['posts']:
with open(post_path, "r") as f:
try:
metas, plaintext = self.extract_meta(f.read())
except InvalidContentSyntax:
logger.error(
"Bad syntax for {} file (skip it)".format(post_path)
)
continue
p = Post(
path=post_path,
metas=metas,
plaintext=plaintext,
settings=self.settings
)
if 'draft' in metas and metas['draft']:
self.drafts.append(p)
continue
# Categories
if 'categories' in metas:
for cat in metas['categories']:
if cat not in self.categories:
url = self.settings['CATEGORY_URL'].replace(
'{category}',
slugify(cat)
)
self.categories[cat] = dict()
self.categories[cat]['name'] = cat
self.categories[cat]['folders'] = url
self.categories[cat]['url'] = urljoin(
self.settings['SITE_URL'],
self.categories[cat]['folders']
)
self.categories[cat]['posts'] = list()
# Append the category to this post
p.categories.append(self.categories[cat])
# Append this post in the categories list
self.categories[cat]['posts'].append(p)
# Tags
if 'tags' in metas:
for tag in metas['tags']:
if tag not in self.tags:
url = self.settings['TAG_URL'].replace(
'{tag}',
slugify(tag)
)
self.tags[tag] = dict()
self.tags[tag]['name'] = tag
self.tags[tag]['folders'] = url
self.tags[tag]['url'] = urljoin(
self.settings['SITE_URL'],
self.tags[tag]['folders']
)
self.tags[tag]['posts'] = list()
# Append the tag to this post
p.tags.append(self.tags[tag])
# Append this post in the tags list
self.tags[tag]['posts'].append(p)
# Authors
if 'author' in metas:
if metas['author'] in self.settings['AUTHORS']:
author = metas['author']
if author not in self.authors:
url = self.settings['AUTHOR_URL'].replace(
'{author}',
author
)
# Here, id(self.authors[author]) == id(p.author)
self.authors[author] = self.settings['AUTHORS'] \
.get(metas['author'])
self.authors[author]['slug'] = author
self.authors[author]['folders'] = url
self.authors[author]['url'] = urljoin(
self.settings['SITE_URL'],
self.authors[author]['folders']
)
self.authors[author]['posts'] = list()
self.authors[author]['posts'].append(p)
# Append the post to the posts list
self.posts.append(p)
self.context['categories'] = self.categories = self.categories.values()
self.context['tags'] = self.tags = self.tags.values()
self.context['authors'] = self.authors = self.authors.values()
self.context['posts'] = self.posts
return self
def generate(self, writer, context):
for post in self.posts:
self.generate_post(writer, context, post)
for category in self.categories:
self.generate_category(writer, context, category)
for tag in self.tags:
self.generate_tag(writer, context, tag)
for author in self.authors:
self.generate_author(writer, context, author)
self.generate_index(writer, context)
def generate_category(self, writer, context, category):
context['category'] = category
self.generate_file(writer, self.settings['CATEGORY_FILE'],
context, category)
def generate_tag(self, writer, context, tag):
context['tag'] = tag
self.generate_file(writer, self.settings['TAG_FILE'],
context, tag)
def generate_author(self, writer, context, author):
context['author'] = author
self.generate_file(writer, self.settings['AUTHOR_FILE'],
context, author)
def generate_post(self, writer, context, post):
html = self.generate_html(post)
post.content = html
context['post'] = post
self.generate_file(writer, self.settings['POST_FILE'],
context, post)
def generate_index(self, writer, context):
index_path = os.path.join(
self.settings['SOURCE_PATH'],
self.settings['PUBLIC_FOLDER'],
'index.html'
)
self.generate_file(writer, self.settings['INDEX_FILE'],
context, path=index_path)
class PagesGenerator(BaseGenerator):
def __init__(self, *args, **kwargs):
self.pages = list()
self.drafts = list()
super(PagesGenerator, self).__init__(*args, **kwargs)
def process(self, reader):
files = reader.get_files()
for page_path in files['pages']:
with open(page_path, "r") as f:
try:
metas, plaintext = self.extract_meta(f.read())
except InvalidContentSyntax:
logger.error(
"Bad syntax for {} file (skip it)".format(page_path)
)
continue
page = Page(
path=page_path,
metas=metas,
plaintext=plaintext,
settings=self.settings
)
if 'draft' in metas and metas['draft']:
self.drafts.append(page)
continue
self.pages.append(page)
self.context['pages'] = self.pages
return self
def generate(self, writer, context):
for page in self.pages:
self.generate_page(writer, context, page)
def generate_page(self, writer, context, page):
html = self.generate_html(page)
page.content = html
context['page'] = page
self.generate_file(writer, self.settings['PAGE_FILE'],
context, page)
| mit |
marcass/tank_lora | python/archive/queue_listen.py | 1 | 4136 | from multiprocessing import Queue as Q
from multiprocessing import Process as P
import time
import sys
import numpy as np
import paho.mqtt.publish as publish
import serial
import smtplib
import requests
import matplotlib.pyplot as plt
import datetime
import sqlite3
import creds
import tanks
s_port = '/dev/LORA'
#initialise global port
port = None
#thingspeak
water_APIKey = creds.water_APIKey #channel api key
batt_APIKey = creds.batt_APIKey
thingURL = "https://api.thingspeak.com/update"
#mqtt
broker = creds.mosq_auth['broker']
auth = creds.mosq_auth
#db
def setup_db():
# Create table
conn, c = tanks.get_db()
c.execute('''CREATE TABLE IF NOT EXISTS measurements
(timestamp TIMESTAMP, tank_id INTEGER, water_volume REAL, voltage REAL)''')
conn.commit() # Save (commit) the changes
def add_measurement(tank_id,water_volume,voltage):
# Insert a row of data
conn, c = tanks.get_db()
c.execute("INSERT INTO measurements VALUES (?,?,?,?)", (datetime.datetime.utcnow(),tank_id,water_volume,voltage) )
conn.commit() # Save (commit) the changes
def readlineCR(port):
rv = ''
while True:
ch = port.read()
rv += ch
if ch=='\n':# or ch=='':
if 'PY' in rv: #arduino formats message as PY;<nodeID>;<waterlevle;batteryvoltage;>\r\n
print rv
rec_split = rv.split(';') #make array like [PYTHON, nodeID, payloadance]
print rec_split
q.put(rec_split[1:4]) #put data in queue for processing at rate
rv = ''
#format mqtt message
def pub_msg():
while True:
while (q.empty() == False):
data = q.get()
in_node = data[0]
if tanks.tanks_by_nodeID.has_key(in_node):
tank = tanks.tanks_by_nodeID[in_node]
else:
break
print data
#check to see if it's a relay (and insert null water value if it is)
dist = data[1]
batt = data[2]
try:
dist = int(dist)
#check to see if in acceptable value range
if (dist < tank.invalid_min) or (dist > tank.max_payload):
vol = None
else:
vol = tank.volume(dist)
except:
vol = None
try:
batt = float(batt)
if batt > 5.5:
batt = None
except:
batt = None
#add to db
add_measurement(in_node,vol,batt)
#publish to thingspeak
#r = requests.post(thingURL, data = {'api_key':water_APIKey, 'field' +tank.nodeID: vol})
publish.single(tank.waterTop, vol , auth=auth, hostname=broker, retain=True)
print('Published ' +str(vol) +' for nodeID ' + str(tank.nodeID) + ' to ' +tank.waterTop)
#publish to thingspeak
#time.sleep(15)
#r = requests.post(thingURL, data = {'api_key':batt_APIKey, 'field' +tank.nodeID: batt})
publish.single(tank.batTop, batt , auth=auth, hostname=broker, retain=True)
print('Published ' +str(batt) +' for nodeID ' + str(tank.nodeID) + ' to ' +tank.batTop)
#time.sleep(15)
#Serial port function opening fucntion
count = 0
def port_check(in_port):
global port
try:
port = serial.Serial(in_port, baudrate=9600, timeout=3.0)
print s_port+' found'
count = 0
return port
except:
port = None
return port
#handle exceptions for absent port (and keep retrying for a while)
while (port_check(s_port) is None) and (count < 100):
count = count + 1
print s_port+' not found '+str(count)+' times'
time.sleep(10)
if count == 100:
print 'Exited because serial port not found'
sys.exit()
#instatiate queue
q = Q()
#setup database
setup_db()
fetch_process = P(target=readlineCR, args=(port,))
broadcast_process = P(target=pub_msg, args=())
broadcast_process.start()
fetch_process.start()
| gpl-3.0 |
uclouvain/osis | education_group/views/publication_contact/common.py | 1 | 3775 | #
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from base.business.education_groups.publication_contact import can_postpone_publication_contact
from base.models.education_group_publication_contact import EducationGroupPublicationContact
from base.models.education_group_year import EducationGroupYear
from base.models.enums.education_group_categories import Categories
from base.views.mixins import AjaxTemplateMixin
from education_group.forms.publication_contact import EducationGroupPublicationContactForm
from osis_role.contrib.views import PermissionRequiredMixin
class CommonEducationGroupPublicationContactView(PermissionRequiredMixin, AjaxTemplateMixin, SuccessMessageMixin):
model = EducationGroupPublicationContact
context_object_name = "publication_contact"
form_class = EducationGroupPublicationContactForm
template_name = "education_group_app/publication_contact/edit_inner.html"
force_reload = True
raise_exception = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["can_postpone"] = can_postpone_publication_contact(self.education_group_year)
return context
def to_postpone(self) -> bool:
return self.request.POST.get("to_postpone")
@cached_property
def person(self):
return self.request.user.person
@cached_property
def education_group_year(self):
return get_object_or_404(
EducationGroupYear.objects.all().select_related('education_group_type'),
partial_acronym=self.kwargs['code'],
academic_year__year=self.kwargs['year']
)
def get_success_url(self):
return ""
def get_permission_object(self):
return self.education_group_year
def get_permission_required(self):
if self.education_group_year.is_common:
return ('base.change_commonpedagogyinformation',)
elif self.education_group_year.education_group_type.category == Categories.TRAINING.name:
return ('base.change_training_pedagogyinformation',)
elif self.education_group_year.education_group_type.category == Categories.MINI_TRAINING.name:
return ('base.change_minitraining_pedagogyinformation',)
elif self.education_group_year.education_group_type.category == Categories.GROUP.name:
return ('base.change_group_pedagogyinformation',)
raise Exception("Unknown education group type")
| agpl-3.0 |
mete0r/testfixture | mete0r_testfixture/tests/fixture_bar.py | 1 | 1096 | # -*- coding: utf-8 -*-
#
# mete0r_testfixture: a testfixture helper
# Copyright (C) 2015-2017 mete0r <mete0r@sarangbang.or.kr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from mete0r_testfixture.testfixture import testfixture
@testfixture('Bar')
def bar(testfixtures):
return {
'Bar': testfixtures.get('Foo', 'foo'),
}
| agpl-3.0 |
tung7970/mbed-os-1 | tools/test/config_test/test08/test_data.py | 38 | 2118 | # This build on top of test7 by adding some configuration values in targets
# and overriding them in the application
expected_results = {
"base": {
"desc": "override values based on labels with libs and target params (no labels)",
"app.app1": "v_app1",
"app.app2": "v_app2",
"lib1.p1": "v_p1_lib1_app",
"lib1.p2": "v_p2_lib1",
"lib1.p3": "v_p3_lib1_app",
"lib2.p1": "v_p1_lib2_app",
"lib2.p2": "v_p2_lib2",
"target.par1": "v_par1_target_app",
"target.par2": "v_par2_base",
"target.par3": "v_par3_base"
},
"b1": {
"desc": "override values based on labels with libs and target params (first label)",
"app.app1": "v_app1[b1_label]",
"app.app2": "v_app2",
"lib1.p1": "v_p1_lib1_app",
"lib1.p2": "v_p2_lib1",
"lib1.p3": "v_p3_lib1_app",
"lib2.p1": "v_p1_lib2_app",
"lib2.p2": "v_p2_lib2[b1_label]",
"target.par1": "v_par1_target_app",
"target.par2": "v_par2_base",
"target.par3": "v_par3_base"
},
"b2": {
"desc": "override values based on labels with libs and target params (second label)",
"app.app1": "v_app1",
"app.app2": "v_app2[b2_label]",
"lib1.p1": "v_p1_lib1_app",
"lib1.p2": "v_p2_lib1[b2_label]",
"lib1.p3": "v_p3_lib1_app",
"lib2.p1": "v_p1_lib2_app",
"lib2.p2": "v_p2_lib2[b2_label]",
"target.par1": "v_par1_target_app",
"target.par2": "v_par2_b2",
"target.par3": "v_par3_base"
},
"both": {
"desc": "override values based on labels with libs and target params (both labels)",
"app.app1": "v_app1[b1_label]",
"app.app2": "v_app2[b2_label]",
"lib1.p1": "v_p1_lib1_app",
"lib1.p2": "v_p2_lib1[b2_label]",
"lib1.p3": "v_p3_lib1_app",
"lib2.p1": "v_p1_lib2_app",
"lib2.p2": "v_p2_lib2[b2_label]",
"target.par1": "v_par1_target_app",
"target.par2": "v_par2_b2",
"target.par3": "v_par3_both",
"target.par4": "v_par4_both"
}
}
| apache-2.0 |
watchcat/cbu-rotterdam | lib/web/wsgiserver/__init__.py | 4 | 84518 | """A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
CRLF = '\r\n'
import os
import Queue
import re
quoted_slash = re.compile("(?i)%2F")
import rfc822
import socket
import sys
if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
try:
import cStringIO as StringIO
except ImportError:
import StringIO
DEFAULT_BUFFER_SIZE = -1
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
import threading
import time
import traceback
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
from urllib import unquote
from urlparse import urlparse
import warnings
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return dict.fromkeys(nums).keys()
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']
import logging
if not hasattr(logging, 'statistics'): logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(":", 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See http://www.cherrypy.org/ticket/421
if len(data) < 256 or data[-1:] == "\n":
return ''.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = ''
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(";", 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = ''
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = ''
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find('\n')
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
def __init__(self, server, conn):
self.server= server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = "http"
if self.server.ssl_adapter is not None:
self.scheme = "https"
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
self.read_request_line()
except MaxSizeExceeded:
self.simple_response("414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
# Force self.ready = False so the connection will close.
self.ready = False
return
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
self.ready = False
return
if not request_line.endswith(CRLF):
self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
return
try:
method, uri, req_protocol = request_line.strip().split(" ", 2)
rp = int(req_protocol[5]), int(req_protocol[7])
except (ValueError, IndexError):
self.simple_response("400 Bad Request", "Malformed Request-Line")
return
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if '#' in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
self.scheme = scheme
qs = ''
if '?' in path:
path, qs = path.split('?', 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [unquote(x) for x in quoted_slash.split(path)]
except ValueError, ex:
self.simple_response("400 Bad Request", ex.args[0])
return
path = "%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
def read_request_headers(self):
"""Read self.rfile into self.inheaders. Return success."""
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError, ex:
self.simple_response("400 Bad Request", ex.args[0])
return False
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return False
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get("Connection", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get("Connection", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get("Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get("Expect", "") == "100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See http://www.cherrypy.org/ticket/951
msg = self.server.protocol + " 100 Continue\r\n\r\n"
try:
self.conn.wfile.sendall(msg)
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
return True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == "*":
return None, None, uri
i = uri.find('://')
if i > 0 and '?' not in uri[:i]:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
scheme, remainder = uri[:i].lower(), uri[i + 3:]
authority, path = remainder.split("/", 1)
return scheme, authority, path
if uri.startswith('/'):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get("Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [self.server.protocol + " " +
status + CRLF,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] in ("413", "414"):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append("Connection: close\r\n")
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = "400 Bad Request"
buf.append(CRLF)
if msg:
if isinstance(msg, unicode):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.sendall("".join(buf))
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
self.conn.wfile.sendall("".join(buf))
else:
self.conn.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + " " + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + ": " + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall("".join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def __init__(self, *args, **kwargs):
self.bytes_read = 0
self.bytes_written = 0
socket._fileobject.__init__(self, *args, **kwargs)
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
bytes_sent = self._sock.send(data)
self.bytes_written += bytes_sent
return bytes_sent
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
data = self._sock.recv(size)
self.bytes_read += len(data)
return data
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
if not _fileobject_uses_str_type:
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = DEFAULT_BUFFER_SIZE
wbufsize = DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_fileobject):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", self.wbufsize)
self.requests_seen = 0
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error, e:
errnum = e.args[0]
# sadly SSL sockets return a different (longer) time out string
if errnum == 'timed out' or errnum == 'The read operation timed out':
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See http://www.cherrypy.org/ticket/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error",
format_exc())
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception:
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error", format_exc())
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
if hasattr(self.socket, '_sock'):
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + ((self.start_time is None) and 0 or self.conn.requests_seen),
'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and 0 or self.conn.rfile.bytes_read),
'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and 0 or self.conn.wfile.bytes_written),
'Work Time': lambda s: self.work_time + ((self.start_time is None) and 0 or time.time() - self.start_time),
'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit), exc:
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for the CherryPyWSGIServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = Queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
for i in range(amount):
if self.max > 0 and len(self._threads) >= self.max:
break
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
self._threads.append(worker)
worker.start()
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
if amount > 0:
for i in range(min(amount, len(self._threads) - self.min)):
# Put a number of shutdown requests on the queue equal
# to 'amount'. Once each of those is processed by a worker,
# that worker will terminate and be culled from our list
# in self.put.
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See http://www.cherrypy.org/ticket/691.
KeyboardInterrupt), exc1:
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
"""
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server."""
_bind_addr = "127.0.0.1"
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create (default -1 = no limit)."""
server_name = None
"""The name of the server; defaults to socket.gethostname()."""
protocol = "HTTP/1.1"
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
shutdown_timeout = 5
"""The total time, in seconds, to wait for worker threads to cleanly exit."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = "CherryPy/3.2.0"
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``."""
ready = False
"""An internal flag which marks whether the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of SSLAdapter (or a subclass).
You must have the corresponding SSL driver library installed."""
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.clear_stats()
def clear_stats(self):
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and 0 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, "qsize", None),
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and 0 or sum([w['Requests'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Read': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Read'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Written': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Written'](w) for w
in s['Worker Threads'].values()], 0),
'Work Time': lambda s: (not s['Enabled']) and 0 or sum([w['Work Time'](w) for w
in s['Worker Threads'].values()], 0),
'Read Throughput': lambda s: (not s['Enabled']) and 0 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Write Throughput': lambda s: (not s['Enabled']) and 0 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Worker Threads': {},
}
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
def runtime(self):
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
if self.software is None:
self.software = "%s Server" % self.version
# SSL backward compatibility
if (self.ssl_adapter is None and
getattr(self, 'ssl_certificate', None) and
getattr(self, 'ssl_private_key', None)):
warnings.warn(
"SSL attributes are deprecated in CherryPy 3.2, and will "
"be removed in CherryPy 3.3. Use an ssl_adapter attribute "
"instead.",
DeprecationWarning
)
try:
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
except ImportError:
pass
else:
self.ssl_adapter = pyOpenSSLAdapter(
self.ssl_certificate, self.ssl_private_key,
getattr(self, 'ssl_certificate_chain', None))
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 0777)
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
self.tick()
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See http://www.cherrypy.org/ticket/871.
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_fileobject
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = CP_fileobject(s, "wb", DEFAULT_BUFFER_SIZE)
try:
wfile.sendall("".join(buf))
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error, x:
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See http://www.cherrypy.org/ticket/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See http://www.cherrypy.org/ticket/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See http://www.cherrypy.org/ticket/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See http://www.cherrypy.org/ticket/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
def __init__(self, req):
self.req = req
def respond(self):
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='pyopenssl'):
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# -------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
wsgi_version = (1, 0)
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.clear_stats()
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicode):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.req.status = status
for k, v in headers:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not a byte string." % k)
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not a byte string." % v)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
self.req.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response("500 Internal Server Error",
"The requested resource returned more bytes than the "
"declared Content-Length.")
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
"Response body exceeds the declared Content-Length.")
class WSGIGateway_10(WSGIGateway):
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path,
'QUERY_STRING': req.qs,
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method,
'REQUEST_URI': req.uri,
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol,
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme,
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# Request headers
for k, v in req.inheaders.iteritems():
env["HTTP_" + k.upper().replace("-", "_")] = v
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_u0(WSGIGateway_10):
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
env[u'wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault(u'wsgi.url_encoding', u'utf-8')
try:
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
for k, v in sorted(env.items()):
if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
env[k] = v.decode('ISO-8859-1')
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = apps.items()
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
| agpl-3.0 |
jelly/calibre | src/calibre/ebooks/djvu/djvu.py | 2 | 4668 | #! /usr/bin/env python2
# coding: utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Anthon van der Neut <A.van.der.Neut@ruamel.eu>'
# this code is based on:
# Lizardtech DjVu Reference
# DjVu v3
# November 2005
import sys
import struct
from calibre.ebooks.djvu.djvubzzdec import BZZDecoder
from calibre.constants import plugins
class DjvuChunk(object):
def __init__(self, buf, start, end, align=True, bigendian=True,
inclheader=False, verbose=0):
self.speedup, err = plugins['bzzdec']
if self.speedup is None:
raise RuntimeError('Failed to load bzzdec plugin: %s' % err)
self.subtype = None
self._subchunks = []
self.buf = buf
pos = start + 4
self.type = buf[start:pos]
self.align = align # whether to align to word (2-byte) boundaries
self.headersize = 0 if inclheader else 8
if bigendian:
self.strflag = b'>'
else:
self.strflag = b'<'
oldpos, pos = pos, pos+4
self.size = struct.unpack(self.strflag+b'L', buf[oldpos:pos])[0]
self.dataend = pos + self.size - (8 if inclheader else 0)
if self.type == b'FORM':
oldpos, pos = pos, pos+4
# print oldpos, pos
self.subtype = buf[oldpos:pos]
# self.headersize += 4
self.datastart = pos
if verbose > 0:
print ('found', self.type, self.subtype, pos, self.size)
if self.type in b'FORM'.split():
if verbose > 0:
print ('processing substuff %d %d (%x)' % (pos, self.dataend,
self.dataend))
numchunks = 0
while pos < self.dataend:
x = DjvuChunk(buf, pos, start+self.size, verbose=verbose)
numchunks += 1
self._subchunks.append(x)
newpos = pos + x.size + x.headersize + (1 if (x.size % 2) else 0)
if verbose > 0:
print ('newpos %d %d (%x, %x) %d' % (newpos, self.dataend,
newpos, self.dataend, x.headersize))
pos = newpos
if verbose > 0:
print (' end of chunk %d (%x)' % (pos, pos))
def dump(self, verbose=0, indent=1, out=None, txtout=None, maxlevel=100):
if out:
out.write(b' ' * indent)
out.write(b'%s%s [%d]\n' % (self.type,
b':' + self.subtype if self.subtype else b'', self.size))
if txtout and self.type == b'TXTz':
if True:
# Use the C BZZ decode implementation
txtout.write(self.speedup.decompress(self.buf[self.datastart:self.dataend]))
else:
inbuf = bytearray(self.buf[self.datastart: self.dataend])
outbuf = bytearray()
decoder = BZZDecoder(inbuf, outbuf)
while True:
xxres = decoder.convert(1024 * 1024)
if not xxres:
break
res = bytes(outbuf)
if not res.strip(b'\0'):
raise ValueError('TXTz block is completely null')
l = 0
for x in res[:3]:
l <<= 8
l += ord(x)
if verbose > 0 and out:
print (l, file=out)
txtout.write(res[3:3+l])
txtout.write(b'\037')
if txtout and self.type == b'TXTa':
res = self.buf[self.datastart: self.dataend]
l = 0
for x in res[:3]:
l <<= 8
l += ord(x)
if verbose > 0 and out:
print (l, file=out)
txtout.write(res[3:3+l])
txtout.write(b'\037')
if indent >= maxlevel:
return
for schunk in self._subchunks:
schunk.dump(verbose=verbose, indent=indent+1, out=out, txtout=txtout)
class DJVUFile(object):
def __init__(self, instream, verbose=0):
self.instream = instream
buf = self.instream.read(4)
assert(buf == b'AT&T')
buf = self.instream.read()
self.dc = DjvuChunk(buf, 0, len(buf), verbose=verbose)
def get_text(self, outfile=None):
self.dc.dump(txtout=outfile)
def dump(self, outfile=None, maxlevel=0):
self.dc.dump(out=outfile, maxlevel=maxlevel)
def main():
f = DJVUFile(open(sys.argv[-1], 'rb'))
print (f.get_text(sys.stdout))
if __name__ == '__main__':
main()
| gpl-3.0 |
t794104/ansible | lib/ansible/playbook/taggable.py | 96 | 3170 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
class Taggable:
untagged = frozenset(['untagged'])
_tags = FieldAttribute(isa='list', default=list, listof=(string_types, int), extend=True)
def _load_tags(self, attr, ds):
if isinstance(ds, list):
return ds
elif isinstance(ds, string_types):
value = ds.split(',')
if isinstance(value, list):
return [x.strip() for x in value]
else:
return [ds]
else:
raise AnsibleError('tags must be specified as a list', obj=ds)
def evaluate_tags(self, only_tags, skip_tags, all_vars):
''' this checks if the current item should be executed depending on tag options '''
if self.tags:
templar = Templar(loader=self._loader, variables=all_vars)
tags = templar.template(self.tags)
_temp_tags = set()
for tag in tags:
if isinstance(tag, list):
_temp_tags.update(tag)
else:
_temp_tags.add(tag)
tags = _temp_tags
self.tags = list(tags)
else:
# this makes isdisjoint work for untagged
tags = self.untagged
should_run = True # default, tasks to run
if only_tags:
if 'always' in tags:
should_run = True
elif ('all' in only_tags and 'never' not in tags):
should_run = True
elif not tags.isdisjoint(only_tags):
should_run = True
elif 'tagged' in only_tags and tags != self.untagged and 'never' not in tags:
should_run = True
else:
should_run = False
if should_run and skip_tags:
# Check for tags that we need to skip
if 'all' in skip_tags:
if 'always' not in tags or 'always' in skip_tags:
should_run = False
elif not tags.isdisjoint(skip_tags):
should_run = False
elif 'tagged' in skip_tags and tags != self.untagged:
should_run = False
return should_run
| gpl-3.0 |
hubert667/AIR | build/celery/build/lib.linux-i686-2.7/celery/bin/worker.py | 2 | 8853 | # -*- coding: utf-8 -*-
"""
The :program:`celery worker` command (previously known as ``celeryd``)
.. program:: celery worker
.. seealso::
See :ref:`preload-options`.
.. cmdoption:: -c, --concurrency
Number of child processes processing the queue. The default
is the number of CPUs available on your system.
.. cmdoption:: -P, --pool
Pool implementation:
prefork (default), eventlet, gevent, solo or threads.
.. cmdoption:: -f, --logfile
Path to log file. If no logfile is specified, `stderr` is used.
.. cmdoption:: -l, --loglevel
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`.
.. cmdoption:: -n, --hostname
Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname),
%n (name) and %d, (domain).
.. cmdoption:: -B, --beat
Also run the `celery beat` periodic task scheduler. Please note that
there must only be one instance of this service.
.. cmdoption:: -Q, --queues
List of queues to enable for this worker, separated by comma.
By default all configured queues are enabled.
Example: `-Q video,image`
.. cmdoption:: -I, --include
Comma separated list of additional modules to import.
Example: -I foo.tasks,bar.tasks
.. cmdoption:: -s, --schedule
Path to the schedule database if running with the `-B` option.
Defaults to `celerybeat-schedule`. The extension ".db" may be
appended to the filename.
.. cmdoption:: -O
Apply optimization profile. Supported: default, fair
.. cmdoption:: --scheduler
Scheduler class to use. Default is celery.beat.PersistentScheduler
.. cmdoption:: -S, --statedb
Path to the state database. The extension '.db' may
be appended to the filename. Default: {default}
.. cmdoption:: -E, --events
Send events that can be captured by monitors like :program:`celery events`,
`celerymon`, and others.
.. cmdoption:: --without-gossip
Do not subscribe to other workers events.
.. cmdoption:: --without-mingle
Do not synchronize with other workers at startup.
.. cmdoption:: --without-heartbeat
Do not send event heartbeats.
.. cmdoption:: --purge
Purges all waiting tasks before the daemon is started.
**WARNING**: This is unrecoverable, and the tasks will be
deleted from the messaging server.
.. cmdoption:: --time-limit
Enables a hard time limit (in seconds int/float) for tasks.
.. cmdoption:: --soft-time-limit
Enables a soft time limit (in seconds int/float) for tasks.
.. cmdoption:: --maxtasksperchild
Maximum number of tasks a pool worker can execute before it's
terminated and replaced by a new worker.
.. cmdoption:: --pidfile
Optional file used to store the workers pid.
The worker will not start if this file already exists
and the pid is still alive.
.. cmdoption:: --autoscale
Enable autoscaling by providing
max_concurrency, min_concurrency. Example::
--autoscale=10,3
(always keep 3 processes, but grow to 10 if necessary)
.. cmdoption:: --autoreload
Enable autoreloading.
.. cmdoption:: --no-execv
Don't do execv after multiprocessing child fork.
"""
from __future__ import absolute_import, unicode_literals
import sys
from celery import concurrency
from celery.bin.base import Command, Option, daemon_options
from celery.bin.celeryd_detach import detached_celeryd
from celery.five import string_t
from celery.platforms import maybe_drop_privileges
from celery.utils import default_nodename
from celery.utils.log import LOG_LEVELS, mlevel
__all__ = ['worker', 'main']
__MODULE_DOC__ = __doc__
class worker(Command):
"""Start worker instance.
Examples::
celery worker --app=proj -l info
celery worker -A proj -l info -Q hipri,lopri
celery worker -A proj --concurrency=4
celery worker -A proj --concurrency=1000 -P eventlet
celery worker --autoscale=10,0
"""
doc = __MODULE_DOC__ # parse help from this too
namespace = 'celeryd'
enable_config_from_cmdline = True
supports_args = False
def run_from_argv(self, prog_name, argv=None, command=None):
command = sys.argv[0] if command is None else command
argv = sys.argv[1:] if argv is None else argv
# parse options before detaching so errors can be handled.
options, args = self.prepare_args(
*self.parse_options(prog_name, argv, command))
self.maybe_detach([command] + sys.argv[1:])
return self(*args, **options)
def maybe_detach(self, argv, dopts=['-D', '--detach']):
if any(arg in argv for arg in dopts):
argv = [v for v in argv if v not in dopts]
# will never return
detached_celeryd(self.app).execute_from_commandline(argv)
raise SystemExit(0)
def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
loglevel=None, logfile=None, pidfile=None, state_db=None,
**kwargs):
maybe_drop_privileges(uid=uid, gid=gid)
# Pools like eventlet/gevent needs to patch libs as early
# as possible.
pool_cls = (concurrency.get_implementation(pool_cls) or
self.app.conf.CELERYD_POOL)
if self.app.IS_WINDOWS and kwargs.get('beat'):
self.die('-B option does not work on Windows. '
'Please run celery beat as a separate service.')
hostname = self.simple_format(default_nodename(hostname))
if loglevel:
try:
loglevel = mlevel(loglevel)
except KeyError: # pragma: no cover
self.die('Unknown level {0!r}. Please use one of {1}.'.format(
loglevel, '|'.join(
l for l in LOG_LEVELS if isinstance(l, string_t))))
return self.app.Worker(
hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
logfile=self.node_format(logfile, hostname),
pidfile=self.node_format(pidfile, hostname),
state_db=self.node_format(state_db, hostname), **kwargs
).start()
def with_pool_option(self, argv):
# this command support custom pools
# that may have to be loaded as early as possible.
return (['-P'], ['--pool'])
def get_options(self):
conf = self.app.conf
return (
Option('-c', '--concurrency',
default=conf.CELERYD_CONCURRENCY, type='int'),
Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'),
Option('--purge', '--discard', default=False, action='store_true'),
Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL),
Option('-n', '--hostname'),
Option('-B', '--beat', action='store_true'),
Option('-s', '--schedule', dest='schedule_filename',
default=conf.CELERYBEAT_SCHEDULE_FILENAME),
Option('--scheduler', dest='scheduler_cls'),
Option('-S', '--statedb',
default=conf.CELERYD_STATE_DB, dest='state_db'),
Option('-E', '--events', default=conf.CELERY_SEND_EVENTS,
action='store_true', dest='send_events'),
Option('--time-limit', type='float', dest='task_time_limit',
default=conf.CELERYD_TASK_TIME_LIMIT),
Option('--soft-time-limit', dest='task_soft_time_limit',
default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'),
Option('--maxtasksperchild', dest='max_tasks_per_child',
default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'),
Option('--queues', '-Q', default=[]),
Option('--exclude-queues', '-X', default=[]),
Option('--include', '-I', default=[]),
Option('--autoscale'),
Option('--autoreload', action='store_true'),
Option('--no-execv', action='store_true', default=False),
Option('--without-gossip', action='store_true', default=False),
Option('--without-mingle', action='store_true', default=False),
Option('--without-heartbeat', action='store_true', default=False),
Option('-O', dest='optimization'),
Option('-D', '--detach', action='store_true'),
) + daemon_options() + tuple(self.app.user_options['worker'])
def main(app=None):
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
if __name__ != '__main__': # pragma: no cover
sys.modules['__main__'] = sys.modules[__name__]
from billiard import freeze_support
freeze_support()
worker(app=app).execute_from_commandline()
if __name__ == '__main__': # pragma: no cover
main()
| gpl-3.0 |
Shrhawk/edx-platform | common/lib/xmodule/xmodule/tests/xml/factories.py | 77 | 4963 | """
Factories for generating edXML for testing XModule import
"""
import inspect
from fs.memoryfs import MemoryFS
from factory import Factory, lazy_attribute, post_generation, Sequence
from lxml import etree
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin
from xmodule.modulestore import only_xmodules
class XmlImportData(object):
"""
Class to capture all of the data needed to actually run an XML import,
so that the Factories have something to generate
"""
def __init__(self, xml_node, xml=None, course_id=None,
default_class=None, policy=None,
filesystem=None, parent=None,
xblock_mixins=(), xblock_select=None):
self._xml_node = xml_node
self._xml_string = xml
self.course_id = course_id
self.default_class = default_class
self.filesystem = filesystem
self.xblock_mixins = xblock_mixins
self.xblock_select = xblock_select
self.parent = parent
if policy is None:
self.policy = {}
else:
self.policy = policy
@property
def xml_string(self):
"""Return the stringified version of the generated xml"""
if self._xml_string is not None:
return self._xml_string
return etree.tostring(self._xml_node)
def __repr__(self):
return u"XmlImportData{!r}".format((
self._xml_node, self._xml_string, self.course_id,
self.default_class, self.policy,
self.filesystem, self.parent, self.xblock_mixins,
self.xblock_select,
))
# Extract all argument names used to construct XmlImportData objects,
# so that the factory doesn't treat them as XML attributes
XML_IMPORT_ARGS = inspect.getargspec(XmlImportData.__init__).args
class XmlImportFactory(Factory):
"""
Factory for generating XmlImportData's, which can hold all the data needed
to run an XModule XML import
"""
FACTORY_FOR = XmlImportData
filesystem = MemoryFS()
xblock_mixins = (InheritanceMixin, XModuleMixin)
xblock_select = only_xmodules
url_name = Sequence(str)
attribs = {}
policy = {}
inline_xml = True
tag = 'unknown'
course_id = 'edX/xml_test_course/101'
@classmethod
def _adjust_kwargs(cls, **kwargs):
"""
Adjust the kwargs to be passed to the generated class.
Any kwargs that match :fun:`XmlImportData.__init__` will be passed
through. Any other unknown `kwargs` will be treated as XML attributes
:param tag: xml tag for the generated :class:`Element` node
:param text: (Optional) specifies the text of the generated :class:`Element`.
:param policy: (Optional) specifies data for the policy json file for this node
:type policy: dict
:param attribs: (Optional) specify attributes for the XML node
:type attribs: dict
"""
tag = kwargs.pop('tag', 'unknown')
kwargs['policy'] = {'{tag}/{url_name}'.format(tag=tag, url_name=kwargs['url_name']): kwargs['policy']}
kwargs['xml_node'].text = kwargs.pop('text', None)
kwargs['xml_node'].attrib.update(kwargs.pop('attribs', {}))
# Make sure that the xml_module doesn't try and open a file to find the contents
# of this node.
inline_xml = kwargs.pop('inline_xml')
if inline_xml:
kwargs['xml_node'].set('not_a_pointer', 'true')
for key in kwargs.keys():
if key not in XML_IMPORT_ARGS:
kwargs['xml_node'].set(key, kwargs.pop(key))
if not inline_xml:
kwargs['xml_node'].write(
kwargs['filesystem'].open(
'{}/{}.xml'.format(kwargs['tag'], kwargs['url_name'])
),
encoding='utf-8'
)
return kwargs
@lazy_attribute
def xml_node(self):
"""An :class:`xml.etree.Element`"""
return etree.Element(self.tag)
@post_generation
def parent(self, _create, extracted, **_):
"""Hook to merge this xml into a parent xml node"""
if extracted is None:
return
extracted._xml_node.append(self._xml_node) # pylint: disable=no-member, protected-access
extracted.policy.update(self.policy)
class CourseFactory(XmlImportFactory):
"""Factory for <course> nodes"""
tag = 'course'
name = '101'
static_asset_path = 'xml_test_course'
class SequenceFactory(XmlImportFactory):
"""Factory for <sequential> nodes"""
tag = 'sequential'
class VerticalFactory(XmlImportFactory):
"""Factory for <vertical> nodes"""
tag = 'vertical'
class ProblemFactory(XmlImportFactory):
"""Factory for <problem> nodes"""
tag = 'problem'
text = '<h1>Empty Problem!</h1>'
class HtmlFactory(XmlImportFactory):
"""Factory for <html> nodes"""
tag = 'html'
| agpl-3.0 |
rhurkes/chasegame | venv/lib/python2.7/site-packages/setuptools/tests/test_dist_info.py | 452 | 2615 | """Test .dist-info style distributions.
"""
import os
import shutil
import tempfile
import unittest
import textwrap
try:
import ast
except:
pass
import pkg_resources
from setuptools.tests.py26compat import skipIf
def DALS(s):
"dedent and left-strip"
return textwrap.dedent(s).lstrip()
class TestDistInfo(unittest.TestCase):
def test_distinfo(self):
dists = {}
for d in pkg_resources.find_distributions(self.tmpdir):
dists[d.project_name] = d
assert len(dists) == 2, dists
unversioned = dists['UnversionedDistribution']
versioned = dists['VersionedDistribution']
assert versioned.version == '2.718' # from filename
assert unversioned.version == '0.3' # from METADATA
@skipIf('ast' not in globals(),
"ast is used to test conditional dependencies (Python >= 2.6)")
def test_conditional_dependencies(self):
requires = [pkg_resources.Requirement.parse('splort==4'),
pkg_resources.Requirement.parse('quux>=1.1')]
for d in pkg_resources.find_distributions(self.tmpdir):
self.assertEqual(d.requires(), requires[:1])
self.assertEqual(d.requires(extras=('baz',)), requires)
self.assertEqual(d.extras, ['baz'])
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
versioned = os.path.join(self.tmpdir,
'VersionedDistribution-2.718.dist-info')
os.mkdir(versioned)
metadata_file = open(os.path.join(versioned, 'METADATA'), 'w+')
try:
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: VersionedDistribution
Requires-Dist: splort (4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
finally:
metadata_file.close()
unversioned = os.path.join(self.tmpdir,
'UnversionedDistribution.dist-info')
os.mkdir(unversioned)
metadata_file = open(os.path.join(unversioned, 'METADATA'), 'w+')
try:
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: UnversionedDistribution
Version: 0.3
Requires-Dist: splort (==4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
finally:
metadata_file.close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
| mit |
redengineer/airflow | airflow/executors/__init__.py | 12 | 1209 | import logging
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.celery_executor import CeleryExecutor
from airflow.executors.sequential_executor import SequentialExecutor
# TODO Fix this emergency fix
try:
from airflow.contrib.executors.mesos_executor import MesosExecutor
except:
pass
from airflow.utils import AirflowException
_EXECUTOR = conf.get('core', 'EXECUTOR')
if _EXECUTOR == 'LocalExecutor':
DEFAULT_EXECUTOR = LocalExecutor()
elif _EXECUTOR == 'CeleryExecutor':
DEFAULT_EXECUTOR = CeleryExecutor()
elif _EXECUTOR == 'SequentialExecutor':
DEFAULT_EXECUTOR = SequentialExecutor()
elif _EXECUTOR == 'MesosExecutor':
DEFAULT_EXECUTOR = MesosExecutor()
else:
# Loading plugins
from airflow.plugins_manager import executors as _executors
for _executor in _executors:
globals()[_executor.__name__] = _executor
if _EXECUTOR in globals():
DEFAULT_EXECUTOR = globals()[_EXECUTOR]()
else:
raise AirflowException("Executor {0} not supported.".format(_EXECUTOR))
logging.info("Using executor " + _EXECUTOR)
| apache-2.0 |
amylittleyang/OtraCAD | cadnano25/cadnano/tests/xmlrunner.py | 2 | 12512 | """
XML Test Runner for PyUnit
"""
# Written by Sebastian Rittau <srittau@jroger.in-berlin.de> and placed in
# the Public Domain. With contributions by Paolo Borelli and others.
from __future__ import with_statement
__version__ = "0.1"
import os.path
import re
import sys
import time
import traceback
import unittest
from xml.sax.saxutils import escape
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class _TestInfo(object):
"""Information about a particular test. Used by _XMLTestResult."""
def __init__(self, test, time):
(self._class, self._method) = test.id().rsplit(".", 1)
self._time = time
self._error = None
self._failure = None
@staticmethod
def create_success(test, time):
"""Create a _TestInfo instance for a successful test."""
return _TestInfo(test, time)
@staticmethod
def create_failure(test, time, failure):
"""Create a _TestInfo instance for a failed test."""
info = _TestInfo(test, time)
info._failure = failure
return info
@staticmethod
def create_error(test, time, error):
"""Create a _TestInfo instance for an erroneous test."""
info = _TestInfo(test, time)
info._error = error
return info
def print_report(self, stream):
"""Print information about this test case in XML format to the
supplied stream.
"""
stream.write(' <testcase classname="%(class)s" name="%(method)s" time="%(time).4f">' % \
{
"class": self._class,
"method": self._method,
"time": self._time,
})
if self._failure is not None:
self._print_error(stream, 'failure', self._failure)
if self._error is not None:
self._print_error(stream, 'error', self._error)
stream.write('</testcase>\n')
def _print_error(self, stream, tagname, error):
"""Print information from a failure or error to the supplied stream."""
text = escape(str(error[1]))
stream.write('\n')
stream.write(' <%s type="%s">%s\n' \
% (tagname, _clsname(error[0]), text))
tb_stream = StringIO()
traceback.print_tb(error[2], None, tb_stream)
stream.write(escape(tb_stream.getvalue()))
stream.write(' </%s>\n' % tagname)
stream.write(' ')
def _clsname(cls):
return cls.__module__ + "." + cls.__name__
class _XMLTestResult(unittest.TestResult):
"""A test result class that stores result as XML.
Used by XMLTestRunner.
"""
def __init__(self, classname):
unittest.TestResult.__init__(self)
self._test_name = classname
self._start_time = None
self._tests = []
self._error = None
self._failure = None
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self._error = None
self._failure = None
self._start_time = time.time()
def stopTest(self, test):
time_taken = time.time() - self._start_time
unittest.TestResult.stopTest(self, test)
if self._error:
info = _TestInfo.create_error(test, time_taken, self._error)
elif self._failure:
info = _TestInfo.create_failure(test, time_taken, self._failure)
else:
info = _TestInfo.create_success(test, time_taken)
self._tests.append(info)
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self._error = err
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self._failure = err
def print_report(self, stream, time_taken, out, err):
"""Prints the XML report to the supplied stream.
The time the tests took to perform as well as the captured standard
output and standard error streams must be passed in.
"""
stream.write('<testsuite errors="%(e)d" failures="%(f)d" ' % \
{"e": len(self.errors), "f": len(self.failures)})
stream.write('name="%(n)s" tests="%(t)d" time="%(time).3f">\n' % \
{"n": self._test_name, "t": self.testsRun, "time": time_taken})
for info in self._tests:
info.print_report(stream)
stream.write(' <system-out><![CDATA[%s]]></system-out>\n' % out)
stream.write(' <system-err><![CDATA[%s]]></system-err>\n' % err)
stream.write('</testsuite>\n')
class XMLTestRunner(object):
"""A test runner that stores results in XML format compatible with JUnit.
XMLTestRunner(stream=None) -> XML test runner
The XML file is written to the supplied stream. If stream is None, the
results are stored in a file called TEST-<module>.<class>.xml in the
current working directory (if not overridden with the path property),
where <module> and <class> are the module and class name of the test class.
"""
def __init__(self, stream=None):
self._stream = stream
self._path = "."
def run(self, test):
"""Run the given test case or test suite."""
class_ = test.__class__
classname = class_.__module__ + "." + class_.__name__
if self._stream == None:
filename = "TEST-%s.xml" % classname
stream = file(os.path.join(self._path, filename), "w")
else:
stream = self._stream
stream.write('<?xml version="1.0" encoding="utf-8"?>\n')
result = _XMLTestResult(classname)
start_time = time.time()
with _fake_std_streams():
test(result)
try:
out_s = sys.stdout.getvalue()
except AttributeError:
out_s = ""
try:
err_s = sys.stderr.getvalue()
except AttributeError:
err_s = ""
time_taken = time.time() - start_time
result.print_report(stream, time_taken, out_s, err_s)
if self._stream is None:
stream.close()
return result
def _set_path(self, path):
self._path = path
path = property(lambda self: self._path, _set_path, None,
"""The path where the XML files are stored.
This property is ignored when the XML file is written to a file
stream.""")
class _fake_std_streams(object):
def __enter__(self):
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
class XMLTestRunnerTest(unittest.TestCase):
def setUp(self):
self._stream = StringIO()
def _try_test_run(self, test_class, expected):
"""Run the test suite against the supplied test class and compare the
XML result against the expected XML string. Fail if the expected
string doesn't match the actual string. All time attributes in the
expected string should have the value "0.000". All error and failure
messages are reduced to "Foobar".
"""
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(test_class))
got = self._stream.getvalue()
# Replace all time="X.YYY" attributes by time="0.000" to enable a
# simple string comparison.
got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got)
# Likewise, replace all failure and error messages by a simple "Foobar"
# string.
got = re.sub(r'(?s)<failure (.*?)>.*?</failure>', r'<failure \1>Foobar</failure>', got)
got = re.sub(r'(?s)<error (.*?)>.*?</error>', r'<error \1>Foobar</error>', got)
# And finally Python 3 compatibility.
got = got.replace('type="builtins.', 'type="exceptions.')
self.assertEqual(expected, got)
def test_no_tests(self):
"""
Regression test: Check whether a test run without any tests
matches a previous run.
"""
class TestTest(unittest.TestCase):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="0" time="0.000">
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_success(self):
"""
Regression test: Check whether a test run with a successful test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
pass
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_failure(self):
"""
Regression test: Check whether a test run with a failing test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
self.assert_(False)
self._try_test_run(TestTest, """<testsuite errors="0" failures="1" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<failure type="exceptions.AssertionError">Foobar</failure>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_error(self):
"""
Regression test: Check whether a test run with a erroneous test
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
raise IndexError()
self._try_test_run(TestTest, """<testsuite errors="1" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000">
<error type="exceptions.IndexError">Foobar</error>
</testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stdout_capture(self):
"""
Regression test: Check whether a test run with output to stdout
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stdout.write("Test\n")
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[Test
]]></system-out>
<system-err><![CDATA[]]></system-err>
</testsuite>
""")
def test_stderr_capture(self):
"""
Regression test: Check whether a test run with output to stderr
matches a previous run.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr.write("Test\n")
self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000">
<testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase>
<system-out><![CDATA[]]></system-out>
<system-err><![CDATA[Test
]]></system-err>
</testsuite>
""")
class NullStream(object):
"""A file-like object that discards everything written to it."""
def write(self, buffer):
pass
def test_unittests_changing_stdout(self):
"""
Check whether the XMLTestRunner recovers gracefully from unit tests
that change stdout, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stdout = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
def test_unittests_changing_stderr(self):
"""
Check whether the XMLTestRunner recovers gracefully from unit tests
that change stderr, but don't change it back properly.
"""
class TestTest(unittest.TestCase):
def test_foo(self):
sys.stderr = XMLTestRunnerTest.NullStream()
runner = XMLTestRunner(self._stream)
runner.run(unittest.makeSuite(TestTest))
if __name__ == "__main__":
unittest.main()
| mit |
ijks/textinator | tests/test_functions.py | 1 | 1611 | import pytest
from textinator import build_lines, value_to_char, calculate_size
@pytest.fixture(scope='module')
def image():
from PIL import Image
im = Image.open('tests/images/doge.jpg')
return im.convert('RGB')
@pytest.fixture(scope='module')
def expected_out():
return open('tests/images/doge.txt')
def test_build_lines(image, expected_out):
result_out = build_lines(image, '01', False, False)
for expected_line, result_line in zip(expected_out, result_out):
result_line += '\n'
# click adds newlines later
assert len(expected_line) == len(result_line)
assert expected_line == result_line
def test_value_to_char():
assert value_to_char((50, 200, 150), 'abcdef', colour=True) \
== '\x1b[38;5;79md\x1b[0m'
assert value_to_char((50, 200, 150), 'abcdef', colour=True, background=True) \
== '\x1b[48;5;79md\x1b[0m'
assert value_to_char([100]*3, 'abcdefghijk') == 'e'
assert value_to_char([192]*3, 'abcdefghijk') == 'i'
with pytest.raises(TypeError):
value_to_char(200, 'abcdef')
value_to_char((2, 4, 5, 6), 'abcdef')
def test_calculate_size():
# Width know, height unknown
assert calculate_size((1920, 1080), (20, None)) == (20, 11)
assert calculate_size((500, 1240), (200, None)) == (200, 496)
# Height known, width unknown
assert calculate_size((1024, 768), (None, 413)) == (550, 413)
assert calculate_size((10, 670), (None, 800)) == (11, 800)
# Width and height known
assert calculate_size((500, 600), (42, 612)) == (42, 612)
| mit |
bratsche/Neutron-Drive | neutron-drive/django/contrib/localflavor/ro/ro_counties.py | 428 | 1231 | # -*- coding: utf-8 -*-
"""
A list of Romanian counties as `choices` in a formfield.
This exists as a standalone file so that it's only imported into memory when
explicitly needed.
"""
COUNTIES_CHOICES = (
('AB', u'Alba'),
('AR', u'Arad'),
('AG', u'Argeş'),
('BC', u'Bacău'),
('BH', u'Bihor'),
('BN', u'Bistriţa-Năsăud'),
('BT', u'Botoşani'),
('BV', u'Braşov'),
('BR', u'Brăila'),
('B', u'Bucureşti'),
('BZ', u'Buzău'),
('CS', u'Caraş-Severin'),
('CL', u'Călăraşi'),
('CJ', u'Cluj'),
('CT', u'Constanţa'),
('CV', u'Covasna'),
('DB', u'Dâmboviţa'),
('DJ', u'Dolj'),
('GL', u'Galaţi'),
('GR', u'Giurgiu'),
('GJ', u'Gorj'),
('HR', u'Harghita'),
('HD', u'Hunedoara'),
('IL', u'Ialomiţa'),
('IS', u'Iaşi'),
('IF', u'Ilfov'),
('MM', u'Maramureş'),
('MH', u'Mehedinţi'),
('MS', u'Mureş'),
('NT', u'Neamţ'),
('OT', u'Olt'),
('PH', u'Prahova'),
('SM', u'Satu Mare'),
('SJ', u'Sălaj'),
('SB', u'Sibiu'),
('SV', u'Suceava'),
('TR', u'Teleorman'),
('TM', u'Timiş'),
('TL', u'Tulcea'),
('VS', u'Vaslui'),
('VL', u'Vâlcea'),
('VN', u'Vrancea'),
)
| bsd-3-clause |
guludo/ardupilot-1 | Tools/LogAnalyzer/tests/TestVibration.py | 261 | 3069 | from LogAnalyzer import Test,TestResult
import DataflashLog
import numpy
class TestVibration(Test):
'''test for accelerometer vibration (accX/accY/accZ) within recommendations'''
def __init__(self):
Test.__init__(self)
self.name = "Vibration"
def run(self, logdata, verbose):
self.result = TestResult()
if logdata.vehicleType != "ArduCopter":
self.result.status = TestResult.StatusType.NA
return
# constants
gravity = -9.81
aimRangeWarnXY = 1.5
aimRangeFailXY = 3.0
aimRangeWarnZ = 2.0 # gravity +/- aim range
aimRangeFailZ = 5.0 # gravity +/- aim range
if not "IMU" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No IMU log data"
return
# find some stable LOITER data to analyze, at least 10 seconds
chunks = DataflashLog.DataflashLogHelper.findLoiterChunks(logdata, minLengthSeconds=10, noRCInputs=True)
if not chunks:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No stable LOITER log data found"
return
# for now we'll just use the first (largest) chunk of LOITER data
# TODO: ignore the first couple of secs to avoid bad data during transition - or can we check more analytically that we're stable?
# TODO: accumulate all LOITER chunks over min size, or just use the largest one?
startLine = chunks[0][0]
endLine = chunks[0][1]
#print "TestVibration using LOITER chunk from lines %s to %s" % (`startLine`, `endLine`)
def getStdDevIMU(logdata, channelName, startLine,endLine):
loiterData = logdata.channels["IMU"][channelName].getSegment(startLine,endLine)
numpyData = numpy.array(loiterData.dictData.values())
return numpy.std(numpyData)
# use 2x standard deviations as the metric, so if 95% of samples lie within the aim range we're good
stdDevX = abs(2 * getStdDevIMU(logdata,"AccX",startLine,endLine))
stdDevY = abs(2 * getStdDevIMU(logdata,"AccY",startLine,endLine))
stdDevZ = abs(2 * getStdDevIMU(logdata,"AccZ",startLine,endLine))
if (stdDevX > aimRangeFailXY) or (stdDevY > aimRangeFailXY) or (stdDevZ > aimRangeFailZ):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Vibration too high (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
elif (stdDevX > aimRangeWarnXY) or (stdDevY > aimRangeWarnXY) or (stdDevZ > aimRangeWarnZ):
self.result.status = TestResult.StatusType.WARN
self.result.statusMessage = "Vibration slightly high (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
else:
self.result.status = TestResult.StatusType.GOOD
self.result.statusMessage = "Good vibration values (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
| gpl-3.0 |
hazmatzo/oppia | extensions/gadgets/ScoreBar/ScoreBar.py | 16 | 2130 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Michael Anuzis'
from extensions.gadgets import base
class ScoreBar(base.BaseGadget):
"""Gadget for providing a ScoreBar."""
name = 'ScoreBar'
description = 'A visual score bar that can represent progress or success.'
_dependency_ids = []
_customization_arg_specs = [
{
'name': 'title',
'description': 'Optional title for the score bar (e.g. "Score")',
'schema': {
'type': 'unicode',
},
'default_value': 'Score'
}, {
'name': 'maxValue',
'description': 'Maximum value (bar fills as a % of this value)',
'schema': {
'type': 'int',
},
'default_value': 100
}, {
# TODO(anuzis): Validate that the paramName corresponds to a valid
# parameter name in the exploration.
'name': 'paramName',
'description': 'The parameter name this score bar follows.',
'schema': {
'type': 'unicode',
},
'default_value': ''
}
]
# Constants for height and width.
# Static placeholders for now.
_HEIGHT = 100
_WIDTH = 250
def get_width(self, customization_args):
"""Returns integer representing width in pixels."""
return self._WIDTH
def get_height(self, customization_args):
"""Returns integer representing height in pixels."""
return self._HEIGHT
| apache-2.0 |
mikewiebe-ansible/ansible | test/units/modules/network/check_point/test_cp_mgmt_service_tcp_facts.py | 19 | 2854 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_service_tcp_facts
OBJECT = {
"from": 1,
"to": 1,
"total": 6,
"objects": [
"53de74b7-8f19-4cbe-99fc-a81ef0759bad"
]
}
SHOW_PLURAL_PAYLOAD = {
'limit': 1,
'details_level': 'uid'
}
SHOW_SINGLE_PAYLOAD = {
'name': 'object_which_is_not_exist'
}
api_call_object = 'service-tcp'
api_call_object_plural_version = 'services-tcp'
failure_msg = '''{u'message': u'Requested object [object_which_is_not_exist] not found', u'code': u'generic_err_object_not_found'}'''
class TestCheckpointServiceTcpFacts(object):
module = cp_mgmt_service_tcp_facts
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_show_single_object_which_is_not_exist(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(SHOW_SINGLE_PAYLOAD)
except Exception as e:
result = e.args[0]
assert result['failed']
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def test_show_few_objects(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(SHOW_PLURAL_PAYLOAD)
assert not result['changed']
assert OBJECT == result['ansible_facts'][api_call_object_plural_version]
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 |
cesarmarinhorj/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/workitems_unittest.py | 125 | 2015 | # Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from model.workitems import WorkItems
class WorkItemsTest(unittest.TestCase):
def test_display_position_for_attachment(self):
items = WorkItems()
items.item_ids = [0, 1, 2]
self.assertEqual(items.display_position_for_attachment(0), 1)
self.assertEqual(items.display_position_for_attachment(1), 2)
self.assertEqual(items.display_position_for_attachment(3), None)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
hyperized/ansible | lib/ansible/modules/cloud/vmware/vmware_dvs_portgroup_find.py | 23 | 6212 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_dvs_portgroup_find
short_description: Find portgroup(s) in a VMware environment
description:
- Find portgroup(s) based on different criteria such as distributed vSwitch, VLAN id or a string in the name.
version_added: 2.9
author:
- David Martinez (@dx0xm)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.7
- PyVmomi
options:
dvswitch:
description:
- Name of a distributed vSwitch to look for.
type: str
vlanid:
description:
- VLAN id can be any number between 1 and 4094.
- This search criteria will looks into VLAN ranges to find possible matches.
required: false
type: int
name:
description:
- string to check inside the name of the portgroup.
- Basic containment check using python C(in) operation.
type: str
show_uplink:
description:
- Show or hide uplink portgroups.
- Only relevant when C(vlanid) is supplied.
type: bool
default: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Get all portgroups in dvswitch vDS
vmware_dvs_portgroup_find:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
dvswitch: 'vDS'
validate_certs: no
delegate_to: localhost
- name: Confirm if vlan 15 is present
vmware_dvs_portgroup_find:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
vlanid: '15'
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
dvs_portgroups:
description: basic details of portgroups found
returned: on success
type: list
sample: [
{
"dvswitch": "vDS",
"name": "N-51",
"pvlan": true,
"trunk": true,
"vlan_id": "0"
}
]
'''
try:
from pyVmomi import vim
except ImportError as e:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_dvs_by_name
class DVSPortgroupFindManager(PyVmomi):
def __init__(self, module):
super(DVSPortgroupFindManager, self).__init__(module)
self.dvs_name = self.params['dvswitch']
self.vlan = self.params['vlanid']
self.cmp_vlans = True if self.vlan else False
self.pgs = self.find_portgroups_by_name(self.content, self.module.params['name'])
if self.dvs_name:
self.pgs = self.find_portgroups_by_dvs(self.pgs, self.dvs_name)
def find_portgroups_by_name(self, content, name=None):
vimtype = [vim.dvs.DistributedVirtualPortgroup]
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
if not name:
obj = container.view
else:
obj = []
for c in container.view:
if name in c.name:
obj.append(c)
return obj
def find_portgroups_by_dvs(self, pgl, dvs):
obj = []
for c in pgl:
if dvs in c.config.distributedVirtualSwitch.name:
obj.append(c)
return obj
def vlan_match(self, pgup, userup, vlanlst):
res = False
if pgup and userup:
return True
for ln in vlanlst:
if '-' in ln:
arr = ln.split('-')
if arr[0] < self.vlan and self.vlan < arr[1]:
res = True
elif ln == str(self.vlan):
res = True
return res
def get_dvs_portgroup(self):
pgroups = self.pgs
pglist = []
for pg in pgroups:
trunk = False
pvlan = False
vlanInfo = pg.config.defaultPortConfig.vlan
cl1 = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec
cl2 = vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec
vlan_id_list = []
if isinstance(vlanInfo, cl1):
trunk = True
for item in vlanInfo.vlanId:
if item.start == item.end:
vlan_id_list.append(str(item.start))
else:
vlan_id_list.append(str(item.start) + '-' + str(item.end))
elif isinstance(vlanInfo, cl2):
pvlan = True
vlan_id_list.append(str(vlanInfo.pvlanId))
else:
vlan_id_list.append(str(vlanInfo.vlanId))
if self.cmp_vlans:
if self.vlan_match(pg.config.uplink, self.module.params['show_uplink'], vlan_id_list):
pglist.append(dict(
name=pg.name,
trunk=trunk,
pvlan=pvlan,
vlan_id=','.join(vlan_id_list),
dvswitch=pg.config.distributedVirtualSwitch.name))
else:
pglist.append(dict(
name=pg.name,
trunk=trunk,
pvlan=pvlan,
vlan_id=','.join(vlan_id_list),
dvswitch=pg.config.distributedVirtualSwitch.name))
return pglist
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dvswitch=dict(type='str', required=False),
vlanid=dict(type='int', required=False),
name=dict(type='str', required=False),
show_uplink=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['show_uplink', 'True', 'vlanid']
]
)
dvs_pg_mgr = DVSPortgroupFindManager(module)
module.exit_json(changed=False,
dvs_portgroups=dvs_pg_mgr.get_dvs_portgroup())
if __name__ == "__main__":
main()
| gpl-3.0 |
ksmaheshkumar/FunkLoad | src/funkload/BenchRunner.py | 3 | 35815 | #!/usr/bin/python
# (C) Copyright 2005-2010 Nuxeo SAS <http://nuxeo.com>
# Author: bdelbosc@nuxeo.com
# Contributors: Tom Lazar
# Goutham Bhat
# Andrew McFague
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
"""FunkLoad Bench runner.
$Id: BenchRunner.py 24746 2005-08-31 09:59:27Z bdelbosc $
"""
import os
import platform
import sys
import threading
import time
import traceback
import unittest
from datetime import datetime
from optparse import OptionParser, TitledHelpFormatter
from socket import error as SocketError
from thread import error as ThreadError
from xmlrpclib import ServerProxy, Fault
import signal
from FunkLoadTestCase import FunkLoadTestCase
from FunkLoadHTTPServer import FunkLoadHTTPServer
from utils import mmn_encode, set_recording_flag, recording, thread_sleep, \
trace, red_str, green_str, get_version
try:
from funkload.rtfeedback import (FeedbackSender, DEFAULT_ENDPOINT,
DEFAULT_PUBSUB)
LIVE_FEEDBACK = True
except ImportError:
LIVE_FEEDBACK = False
DEFAULT_PUBSUB = DEFAULT_ENDPOINT = None
USAGE = """%prog [options] file class.method
%prog launch a FunkLoad unit test as load test.
A FunkLoad unittest uses a configuration file named [class].conf. This
configuration may be overriden by the command line options.
See http://funkload.nuxeo.org/ for more information.
Examples
========
%prog myFile.py MyTestCase.testSomething
%prog my_module MyTestCase.testSomething
Bench MyTestCase.testSomething using MyTestCase.conf.
%prog -u http://localhost:8080 -c 10:20 -D 30 myFile.py \\
MyTestCase.testSomething
Bench MyTestCase.testSomething on localhost:8080
with 2 cycles of 10 and 20 users for a duration of 30s.
%prog -h
More options.
Alternative Usage:
%prog discover [options]
Discover test modules in the current directory and
bench all of them.
"""
try:
import psyco
psyco.full()
except ImportError:
pass
# ------------------------------------------------------------
# utils
#
g_failures = 0 # result of the bench
g_errors = 0 # result of the bench
g_success = 0
def add_cycle_result(status):
"""Count number of result."""
# XXX use a thread.lock, but we don't mind if it is not accurate
# as the report use the xml log
global g_success, g_failures, g_errors
if status == 'success':
g_success += 1
elif status == 'error':
g_errors += 1
else:
g_failures += 1
return g_success, g_errors, g_failures
def get_cycle_results():
"""Return counters."""
global g_success, g_failures, g_errors
return g_success, g_failures, g_errors
def get_status(success, failures, errors, color=False):
"""Return a status and an exit code."""
if errors:
status = 'ERROR'
if color:
status = red_str(status)
code = -1
elif failures:
status = 'FAILURE'
if color:
status = red_str(status)
code = 1
else:
status = 'SUCCESSFUL'
if color:
status = green_str(status)
code = 0
return status, code
def reset_cycle_results():
"""Clear the previous results."""
global g_success, g_failures, g_errors
g_success = g_failures = g_errors = 0
def load_module(test_module):
module = __import__(test_module)
parts = test_module.split('.')[1:]
while parts:
part = parts.pop()
module = getattr(module, part)
return module
def load_unittest(test_module, test_class, test_name, options):
"""instantiate a unittest."""
module = load_module(test_module)
klass = getattr(module, test_class)
return klass(test_name, options)
class ThreadSignaller:
"""
A simple class to signal whether a thread should continue running or stop.
"""
def __init__(self):
self.keep_running = True
def running(self):
return self.keep_running
def set_running(self, val):
self.keep_running = val
class ThreadData:
"""Container for thread related data."""
def __init__(self, thread, thread_id, thread_signaller):
self.thread = thread
self.thread_id = thread_id
self.thread_signaller = thread_signaller
# ------------------------------------------------------------
# Classes
#
class LoopTestRunner(threading.Thread):
"""Run a unit test in loop."""
def __init__(self, test_module, test_class, test_name, options,
cycle, cvus, thread_id, thread_signaller, sleep_time,
debug=False, feedback=None):
meta_method_name = mmn_encode(test_name, cycle, cvus, thread_id)
threading.Thread.__init__(self, target=self.run, name=meta_method_name,
args=())
self.test = load_unittest(test_module, test_class, meta_method_name,
options)
if sys.platform.lower().startswith('win'):
self.color = False
else:
self.color = not options.no_color
self.sleep_time = sleep_time
self.debug = debug
self.thread_signaller = thread_signaller
# this makes threads endings if main stop with a KeyboardInterupt
self.setDaemon(1)
self.feedback = feedback
def run(self):
"""Run a test in loop."""
while (self.thread_signaller.running()):
test_result = unittest.TestResult()
self.test.clearContext()
self.test(test_result)
feedback = {}
if test_result.wasSuccessful():
if recording():
feedback['count'] = add_cycle_result('success')
if self.color:
trace(green_str('.'))
else:
trace('.')
feedback['result'] = 'success'
else:
if len(test_result.errors):
if recording():
feedback['count'] = add_cycle_result('error')
if self.color:
trace(red_str('E'))
else:
trace('E')
feedback['result'] = 'error'
else:
if recording():
feedback['count'] = add_cycle_result('failure')
if self.color:
trace(red_str('F'))
else:
trace('F')
feedback['result'] = 'failure'
if self.debug:
feedback['errors'] = test_result.errors
feedback['failures'] = test_result.failures
for (test, error) in test_result.errors:
trace("ERROR %s: %s" % (str(test), str(error)))
for (test, error) in test_result.failures:
trace("FAILURE %s: %s" % (str(test), str(error)))
if self.feedback is not None:
self.feedback.test_done(feedback)
thread_sleep(self.sleep_time)
class BenchRunner:
"""Run a unit test in bench mode."""
def __init__(self, module_name, class_name, method_name, options):
self.module_name = module_name
self.class_name = class_name
self.method_name = method_name
self.options = options
self.color = not options.no_color
# create a unittest to get the configuration file
test = load_unittest(self.module_name, class_name,
mmn_encode(method_name, 0, 0, 0), options)
self.config_path = test._config_path
self.result_path = test.result_path
self.class_title = test.conf_get('main', 'title')
self.class_description = test.conf_get('main', 'description')
self.test_id = self.method_name
self.test_description = test.conf_get(self.method_name, 'description',
'No test description')
self.test_url = test.conf_get('main', 'url')
self.cycles = map(int, test.conf_getList('bench', 'cycles'))
self.duration = test.conf_getInt('bench', 'duration')
self.startup_delay = test.conf_getFloat('bench', 'startup_delay')
self.cycle_time = test.conf_getFloat('bench', 'cycle_time')
self.sleep_time = test.conf_getFloat('bench', 'sleep_time')
self.sleep_time_min = test.conf_getFloat('bench', 'sleep_time_min')
self.sleep_time_max = test.conf_getFloat('bench', 'sleep_time_max')
self.threads = [] # Contains list of ThreadData objects
self.last_thread_id = -1
self.thread_creation_lock = threading.Lock()
# setup monitoring
monitor_hosts = [] # list of (host, port, descr)
if not options.is_distributed:
hosts = test.conf_get('monitor', 'hosts', '', quiet=True).split()
for host in hosts:
name = host
host = test.conf_get(host,'host',host.strip())
monitor_hosts.append((name, host, test.conf_getInt(name, 'port'),
test.conf_get(name, 'description', '')))
self.monitor_hosts = monitor_hosts
# keep the test to use the result logger for monitoring
# and call setUp/tearDown Cycle
self.test = test
# set up the feedback sender
if LIVE_FEEDBACK and options.is_distributed and options.feedback:
trace("* Creating Feedback sender")
self.feedback = FeedbackSender(endpoint=options.feedback_endpoint or
DEFAULT_ENDPOINT)
else:
self.feedback = None
def run(self):
"""Run all the cycles.
return 0 on success, 1 if there were some failures and -1 on errors."""
trace(str(self))
trace("Benching\n")
trace("========\n\n")
cycle = total_success = total_failures = total_errors = 0
self.logr_open()
trace("* setUpBench hook: ...")
self.test.setUpBench()
trace(' done.\n')
self.getMonitorsConfig()
trace('\n')
for cvus in self.cycles:
t_start = time.time()
reset_cycle_results()
text = "Cycle #%i with %s virtual users\n" % (cycle, cvus)
trace(text)
trace('-' * (len(text) - 1) + "\n\n")
monitor_key = '%s:%s:%s' % (self.method_name, cycle, cvus)
trace("* setUpCycle hook: ...")
self.test.setUpCycle()
trace(' done.\n')
self.startMonitors(monitor_key)
self.startThreads(cycle, cvus)
self.logging(cycle, cvus)
#self.dumpThreads()
self.stopThreads()
self.stopMonitors(monitor_key)
cycle += 1
trace("* tearDownCycle hook: ...")
self.test.tearDownCycle()
trace(' done.\n')
t_stop = time.time()
trace("* End of cycle, %.2fs elapsed.\n" % (t_stop - t_start))
success, failures, errors = get_cycle_results()
status, code = get_status(success, failures, errors, self.color)
trace("* Cycle result: **%s**, "
"%i success, %i failure, %i errors.\n\n" % (
status, success, failures, errors))
total_success += success
total_failures += failures
total_errors += errors
trace("* tearDownBench hook: ...")
self.test.tearDownBench()
trace(' done.\n\n')
self.logr_close()
# display bench result
trace("Result\n")
trace("======\n\n")
trace("* Success: %s\n" % total_success)
trace("* Failures: %s\n" % total_failures)
trace("* Errors: %s\n\n" % total_errors)
status, code = get_status(total_success, total_failures, total_errors)
trace("Bench status: **%s**\n" % status)
return code
def createThreadId(self):
self.last_thread_id += 1
return self.last_thread_id
def startThreads(self, cycle, number_of_threads):
"""Starts threads."""
self.thread_creation_lock.acquire()
try:
trace("* Current time: %s\n" % datetime.now().isoformat())
trace("* Starting threads: ")
set_recording_flag(False)
threads = self.createThreads(cycle, number_of_threads)
self.threads.extend(threads)
finally:
set_recording_flag(True)
self.thread_creation_lock.release()
def addThreads(self, number_of_threads):
"""Adds new threads to existing list. Used to dynamically add new
threads during a debug bench run."""
self.thread_creation_lock.acquire()
try:
trace("Adding new threads: ")
set_recording_flag(False)
# In debug bench, 'cycle' value is irrelevant.
threads = self.createThreads(0, number_of_threads)
self.threads.extend(threads)
finally:
set_recording_flag(True)
self.thread_creation_lock.release()
def createThreads(self, cycle, number_of_threads):
"""Creates number_of_threads threads and returns as a list.
NOTE: This method is not thread safe. Thread safety must be
handled by the caller."""
threads = []
i = 0
for i in range(number_of_threads):
thread_id = self.createThreadId()
thread_signaller = ThreadSignaller()
thread = LoopTestRunner(self.module_name, self.class_name,
self.method_name, self.options,
cycle, number_of_threads,
thread_id, thread_signaller,
self.sleep_time,
feedback=self.feedback)
trace(".")
try:
thread.start()
except ThreadError:
trace("\nERROR: Can not create more than %i threads, try a "
"smaller stack size using: 'ulimit -s 2048' "
"for example\n" % (i + 1))
raise
thread_data = ThreadData(thread, thread_id, thread_signaller)
threads.append(thread_data)
thread_sleep(self.startup_delay)
trace(' done.\n')
return threads
def logging(self, cycle, cvus):
"""Log activity during duration."""
duration = self.duration
end_time = time.time() + duration
mid_time = time.time() + duration / 2
trace("* Logging for %ds (until %s): " % (
duration, datetime.fromtimestamp(end_time).isoformat()))
set_recording_flag(True)
while time.time() < mid_time:
time.sleep(1)
self.test.midCycle(cycle, cvus)
while time.time() < end_time:
# wait
time.sleep(1)
set_recording_flag(False)
trace(" done.\n")
def stopThreads(self):
"""Stops all running threads."""
self.thread_creation_lock.acquire()
try:
trace("* Waiting end of threads: ")
self.deleteThreads(len(self.threads))
self.threads = []
trace(" done.\n")
trace("* Waiting cycle sleeptime %ds: ..." % self.cycle_time)
time.sleep(self.cycle_time)
trace(" done.\n")
self.last_thread_id = -1
finally:
self.thread_creation_lock.release()
def removeThreads(self, number_of_threads):
"""Removes threads. Used to dynamically remove threads during a
debug bench run."""
self.thread_creation_lock.acquire()
try:
trace('* Removing threads: ')
self.deleteThreads(number_of_threads)
trace(' done.\n')
finally:
self.thread_creation_lock.release()
def deleteThreads(self, number_of_threads):
"""Stops given number of threads and deletes from thread list.
NOTE: This method is not thread safe. Thread safety must be
handled by the caller."""
removed_threads = []
if number_of_threads > len(self.threads):
number_of_threads = len(self.threads)
for i in range(number_of_threads):
thread_data = self.threads.pop()
thread_data.thread_signaller.set_running(False)
removed_threads.append(thread_data)
for thread_data in removed_threads:
thread_data.thread.join()
del thread_data
trace('.')
def getNumberOfThreads(self):
return len(self.threads)
def dumpThreads(self):
"""Display all different traceback of Threads for debugging.
Require threadframe module."""
import threadframe
stacks = {}
frames = threadframe.dict()
for thread_id, frame in frames.iteritems():
stack = ''.join(traceback.format_stack(frame))
stacks[stack] = stacks.setdefault(stack, []) + [thread_id]
def sort_stack(x, y):
"""sort stack by number of thread."""
return cmp(len(x[1]), len(y[1]))
stacks = stacks.items()
stacks.sort(sort_stack)
for stack, thread_ids in stacks:
trace('=' * 72 + '\n')
trace('%i threads : %s\n' % (len(thread_ids), str(thread_ids)))
trace('-' * 72 + '\n')
trace(stack + '\n')
def getMonitorsConfig(self):
""" Get monitors configuration from hosts """
if not self.monitor_hosts:
return
monitor_hosts = []
for (name, host, port, desc) in self.monitor_hosts:
trace("* Getting monitoring config from %s: ..." % name)
server = ServerProxy("http://%s:%s" % (host, port))
try:
config = server.getMonitorsConfig()
data = []
for key in config.keys():
xml = '<monitorconfig host="%s" key="%s" value="%s" />' % (
name, key, config[key])
data.append(xml)
self.logr("\n".join(data))
except Fault:
trace(' not supported.\n')
monitor_hosts.append((name, host, port, desc))
except SocketError:
trace(' failed, server is down.\n')
else:
trace(' done.\n')
monitor_hosts.append((name, host, port, desc))
self.monitor_hosts = monitor_hosts
def startMonitors(self, monitor_key):
"""Start monitoring on hosts list."""
if not self.monitor_hosts:
return
monitor_hosts = []
for (name, host, port, desc) in self.monitor_hosts:
trace("* Start monitoring %s: ..." % name)
server = ServerProxy("http://%s:%s" % (host, port))
try:
server.startRecord(monitor_key)
except SocketError:
trace(' failed, server is down.\n')
else:
trace(' done.\n')
monitor_hosts.append((name, host, port, desc))
self.monitor_hosts = monitor_hosts
def stopMonitors(self, monitor_key):
"""Stop monitoring and save xml result."""
if not self.monitor_hosts:
return
for (name, host, port, desc) in self.monitor_hosts:
trace('* Stop monitoring %s: ' % name)
server = ServerProxy("http://%s:%s" % (host, port))
try:
server.stopRecord(monitor_key)
xml = server.getXmlResult(monitor_key)
except SocketError:
trace(' failed, server is down.\n')
else:
trace(' done.\n')
self.logr(xml)
def logr(self, message):
"""Log to the test result file."""
self.test._logr(message, force=True)
def logr_open(self):
"""Start logging tag."""
config = {'id': self.test_id,
'description': self.test_description,
'class_title': self.class_title,
'class_description': self.class_description,
'module': self.module_name,
'class': self.class_name,
'method': self.method_name,
'cycles': self.cycles,
'duration': self.duration,
'sleep_time': self.sleep_time,
'startup_delay': self.startup_delay,
'sleep_time_min': self.sleep_time_min,
'sleep_time_max': self.sleep_time_max,
'cycle_time': self.cycle_time,
'configuration_file': self.config_path,
'server_url': self.test_url,
'log_xml': self.result_path,
'node': platform.node(),
'python_version': platform.python_version()}
if self.options.label:
config['label'] = self.options.label
for (name, host, port, desc) in self.monitor_hosts:
config[name] = desc
self.test._open_result_log(**config)
def logr_close(self):
"""Stop logging tag."""
self.test._close_result_log()
self.test.logger_result.handlers = []
def __repr__(self):
"""Display bench information."""
text = []
text.append('=' * 72)
text.append('Benching %s.%s' % (self.class_name,
self.method_name))
text.append('=' * 72)
text.append(self.test_description)
text.append('-' * 72 + '\n')
text.append("Configuration")
text.append("=============\n")
text.append("* Current time: %s" % datetime.now().isoformat())
text.append("* Configuration file: %s" % self.config_path)
text.append("* Log xml: %s" % self.result_path)
text.append("* Server: %s" % self.test_url)
text.append("* Cycles: %s" % self.cycles)
text.append("* Cycle duration: %ss" % self.duration)
text.append("* Sleeptime between request: from %ss to %ss" % (
self.sleep_time_min, self.sleep_time_max))
text.append("* Sleeptime between test case: %ss" % self.sleep_time)
text.append("* Startup delay between thread: %ss\n\n" %
self.startup_delay)
return '\n'.join(text)
class BenchLoader(unittest.TestLoader):
suiteClass = list
def loadTestsFromTestCase(self, testCaseClass):
if not issubclass(testCaseClass, FunkLoadTestCase):
trace(red_str("Skipping "+ testCaseClass))
return []
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
return [dict(module_name = testCaseClass.__module__,
class_name = testCaseClass.__name__,
method_name = x)
for x in testCaseNames]
def discover(sys_args):
parser = get_shared_OptionParser()
options, args = parser.parse_args(sys_args)
options.label = None
loader = BenchLoader()
suite = loader.discover('.')
def flatten_test_suite(suite):
if type(suite) != BenchLoader.suiteClass:
# Wasn't a TestSuite - must have been a Test
return [suite]
flat = []
for x in suite:
flat += flatten_test_suite(x)
return flat
flattened = flatten_test_suite(suite)
retval = 0
for test in flattened:
module_name = test['module_name']
class_name = test['class_name']
method_name = test['method_name']
if options.distribute:
dist_args = sys_args[:]
dist_args.append(module_name)
dist_args.append('%s.%s' % (class_name, method_name))
ret = run_distributed(options, module_name, class_name,
method_name, dist_args)
else:
ret = run_local(options, module_name, class_name, method_name)
# Handle failures
if ret != 0:
retval = ret
if options.failfast:
break
return retval
_manager = None
def shutdown(*args):
trace('Aborting run...')
if _manager is not None:
_manager.abort()
trace('Aborted')
sys.exit(0)
def get_runner_class(class_path):
try:
module_path, class_name = class_path.rsplit('.', 1)
except ValueError:
raise Exception('Invalid class path {0}'.format(class_path))
_module = __import__(module_path, globals(), locals(), class_name, -1)
return getattr(_module, class_name)
def parse_sys_args(sys_args):
parser = get_shared_OptionParser()
parser.add_option("", "--config",
type="string",
dest="config",
metavar='CONFIG',
help="Path to alternative config file")
parser.add_option("-l", "--label",
type="string",
help="Add a label to this bench run for easier "
"identification (it will be appended to the "
"directory name for reports generated from it).")
options, args = parser.parse_args(sys_args)
if len(args) != 2:
parser.error("incorrect number of arguments")
if not args[1].count('.'):
parser.error("invalid argument; should be [class].[method]")
if options.as_fast_as_possible:
options.bench_sleep_time_min = '0'
options.bench_sleep_time_max = '0'
options.bench_sleep_time = '0'
if os.path.exists(args[0]):
# We were passed a file for the first argument
module_name = os.path.basename(os.path.splitext(args[0])[0])
else:
# We were passed a module name
module_name = args[0]
return options, args, module_name
def get_shared_OptionParser():
'''Make an OptionParser that can be used in both normal mode and in
discover mode.
'''
parser = OptionParser(USAGE, formatter=TitledHelpFormatter(),
version="FunkLoad %s" % get_version())
parser.add_option("-r", "--runner-class",
type="string",
dest="bench_runner_class",
default="funkload.BenchRunner.BenchRunner",
help="Python dotted import path to BenchRunner class to use.")
parser.add_option("", "--no-color",
action="store_true",
help="Monochrome output.")
parser.add_option("", "--accept-invalid-links",
action="store_true",
help="Do not fail if css/image links are not reachable.")
parser.add_option("", "--simple-fetch",
action="store_true",
dest="bench_simple_fetch",
help="Don't load additional links like css or images "
"when fetching an html page.")
parser.add_option("--enable-debug-server",
action="store_true",
dest="debugserver",
help="Instantiates a debug HTTP server which exposes an "
"interface using which parameters can be modified "
"at run-time. Currently supported parameters: "
"/cvu?inc=<integer> to increase the number of "
"CVUs, /cvu?dec=<integer> to decrease the number "
"of CVUs, /getcvu returns number of CVUs ")
parser.add_option("--debug-server-port",
type="string",
dest="debugport",
help="Port at which debug server should run during the "
"test")
parser.add_option("--distribute",
action="store_true",
dest="distribute",
help="Distributes the CVUs over a group of worker "
"machines that are defined in the workers section")
parser.add_option("--distribute-workers",
type="string",
dest="workerlist",
help="This parameter will override the list of "
"workers defined in the config file. expected "
"notation is uname@host,uname:pwd@host or just "
"host...")
parser.add_option("--distribute-python",
type="string",
dest="python_bin",
help="When running in distributed mode, this Python "
"binary will be used across all hosts.")
parser.add_option("--is-distributed",
action="store_true",
dest="is_distributed",
help="This parameter is for internal use only. It "
"signals to a worker node that it is in "
"distributed mode and shouldn't perform certain "
"actions.")
parser.add_option("--distributed-packages",
type="string",
dest="distributed_packages",
help="Additional packages to be passed to easy_install "
"on remote machines when being run in distributed "
"mode.")
parser.add_option("--distributed-log-path",
type="string",
dest="distributed_log_path",
help="Path where all the logs will be stored when "
"running a distributed test")
parser.add_option("--distributed-key-filename",
type="string",
dest="distributed_key_filename",
help=("Path of the SSH key to use when running a "
"distributed test"))
parser.add_option("--feedback-endpoint",
type="string",
dest="feedback_endpoint",
help=("ZMQ push/pull socket used between the master and "
"the node to send feedback."))
parser.add_option("--feedback-pubsub-endpoint",
type="string",
dest="feedback_pubsub_endpoint",
help="ZMQ pub/sub socket use to publish feedback.")
parser.add_option("--feedback",
action="store_true",
dest="feedback",
help="Activates the realtime feedback")
parser.add_option("--failfast",
action="store_true",
dest="failfast",
help="Stop on first fail or error. (For discover mode)")
parser.add_option("-u", "--url",
type="string",
dest="main_url",
help="Base URL to bench.")
parser.add_option("-c", "--cycles",
type="string",
dest="bench_cycles",
help="Cycles to bench, colon-separated list of "
"virtual concurrent users. To run a bench with 3 "
"cycles of 5, 10 and 20 users, use: -c 5:10:20")
parser.add_option("-D", "--duration",
type="string",
dest="bench_duration",
help="Duration of a cycle in seconds.")
parser.add_option("-m", "--sleep-time-min",
type="string",
dest="bench_sleep_time_min",
help="Minimum sleep time between requests.")
parser.add_option("-M", "--sleep-time-max",
type="string",
dest="bench_sleep_time_max",
help="Maximum sleep time between requests.")
parser.add_option("-t", "--test-sleep-time",
type="string",
dest="bench_sleep_time",
help="Sleep time between tests.")
parser.add_option("-s", "--startup-delay",
type="string",
dest="bench_startup_delay",
help="Startup delay between thread.")
parser.add_option("-f", "--as-fast-as-possible",
action="store_true",
help="Remove sleep times between requests and between "
"tests, shortcut for -m0 -M0 -t0")
return parser
def run_distributed(options, module_name, class_name, method_name, sys_args):
ret = None
from funkload.Distributed import DistributionMgr
global _manager
try:
distmgr = DistributionMgr(
module_name, class_name, method_name, options, sys_args)
_manager = distmgr
except UserWarning, error:
trace(red_str("Distribution failed with:%s \n" % (error)))
return 1
try:
try:
distmgr.prepare_workers(allow_errors=True)
ret = distmgr.run()
distmgr.final_collect()
except KeyboardInterrupt:
trace("* ^C received *")
finally:
# in any case we want to stop the workers at the end
distmgr.abort()
_manager = None
return ret
def run_local(options, module_name, class_name, method_name):
ret = None
RunnerClass = get_runner_class(options.bench_runner_class)
bench = RunnerClass(module_name, class_name, method_name, options)
# Start a HTTP server optionally
if options.debugserver:
http_server_thread = FunkLoadHTTPServer(bench, options.debugport)
http_server_thread.start()
try:
ret = bench.run()
except KeyboardInterrupt:
trace("* ^C received *")
return ret
def main(sys_args=sys.argv[1:]):
"""Default main."""
# enable loading of modules in the current path
cur_path = os.path.abspath(os.path.curdir)
sys.path.insert(0, cur_path)
# registering signals
if not sys.platform.lower().startswith('win'):
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGQUIT, shutdown)
# special case: 'discover' argument
if sys_args and sys_args[0].lower() == 'discover':
return discover(sys_args)
options, args, module_name = parse_sys_args(sys_args)
klass, method = args[1].split('.')
if options.distribute:
return run_distributed(options, module_name, klass, method, sys_args)
else:
return run_local(options, module_name, klass, method)
if __name__ == '__main__':
ret = main()
sys.exit(ret)
| gpl-2.0 |
medallia/aurora | src/main/python/apache/aurora/client/api/scheduler_client.py | 5 | 13651 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import threading
import time
import traceback
import requests
from pystachio import Default, Integer, String
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
from twitter.common import log
from twitter.common.concurrent import Timeout, deadline
from twitter.common.quantity import Amount, Time
from twitter.common.zookeeper.kazoo_client import TwitterKazooClient
from twitter.common.zookeeper.serverset import ServerSet
from apache.aurora.common.auth.auth_module_manager import get_auth_handler
from apache.aurora.common.cluster import Cluster
from apache.aurora.common.transport import TRequestsTransport
from gen.apache.aurora.api import AuroraAdmin
from gen.apache.aurora.api.constants import BYPASS_LEADER_REDIRECT_HEADER_NAME
from gen.apache.aurora.api.ttypes import ResponseCode
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class SchedulerClientTrait(Cluster.Trait):
zk = String # noqa
zk_port = Default(Integer, 2181) # noqa
scheduler_zk_path = String # noqa
scheduler_uri = String # noqa
proxy_url = String # noqa
auth_mechanism = Default(String, 'UNAUTHENTICATED') # noqa
def _bypass_leader_redirect_session_factory(should_bypass=False):
session = requests.session()
if should_bypass:
session.headers[BYPASS_LEADER_REDIRECT_HEADER_NAME] = 'true'
return session
class SchedulerClient(object):
THRIFT_RETRIES = 5
RETRY_TIMEOUT = Amount(1, Time.SECONDS)
class Error(Exception): pass
class CouldNotConnect(Error): pass
# TODO(wickman) Refactor per MESOS-3005 into two separate classes with separate traits:
# ZookeeperClientTrait
# DirectClientTrait
@classmethod
def get(cls, cluster, auth_factory=get_auth_handler, **kwargs):
if not isinstance(cluster, Cluster):
raise TypeError('"cluster" must be an instance of Cluster, got %s' % type(cluster))
cluster = cluster.with_trait(SchedulerClientTrait)
auth_handler = auth_factory(cluster.auth_mechanism)
if cluster.zk:
return ZookeeperSchedulerClient(cluster, port=cluster.zk_port, auth=auth_handler, **kwargs)
elif cluster.scheduler_uri:
return DirectSchedulerClient(cluster.scheduler_uri, auth=auth_handler, **kwargs)
else:
raise ValueError('"cluster" does not specify zk or scheduler_uri')
def __init__(self, auth, user_agent, verbose=False, bypass_leader_redirect=False):
self._client = None
self._auth_handler = auth
self._user_agent = user_agent
self._verbose = verbose
self._bypass_leader_redirect = bypass_leader_redirect
def get_thrift_client(self):
if self._client is None:
self._client = self._connect()
return self._client
def get_failed_auth_message(self):
return self._auth_handler.failed_auth_message
# per-class implementation -- mostly meant to set up a valid host/port
# pair and then delegate the opening to SchedulerClient._connect_scheduler
def _connect(self):
return None
def _connect_scheduler(self, uri, clock=time):
transport = TRequestsTransport(
uri,
auth=self._auth_handler.auth(),
user_agent=self._user_agent,
session_factory=functools.partial(
_bypass_leader_redirect_session_factory,
should_bypass=self._bypass_leader_redirect))
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
schedulerClient = AuroraAdmin.Client(protocol)
for _ in range(self.THRIFT_RETRIES):
try:
transport.open()
return schedulerClient
except TTransport.TTransportException:
clock.sleep(self.RETRY_TIMEOUT.as_(Time.SECONDS))
continue
except Exception as e:
# Monkey-patched proxies, like socks, can generate a proxy error here.
# without adding a dependency, we can't catch those in a more specific way.
raise self.CouldNotConnect('Connection to scheduler failed: %s' % e)
raise self.CouldNotConnect('Could not connect to %s' % uri)
class ZookeeperSchedulerClient(SchedulerClient):
SERVERSET_TIMEOUT = Amount(10, Time.SECONDS)
@classmethod
def get_scheduler_serverset(cls, cluster, port=2181, verbose=False, **kw):
if cluster.zk is None:
raise ValueError('Cluster has no associated zookeeper ensemble!')
if cluster.scheduler_zk_path is None:
raise ValueError('Cluster has no defined scheduler path, must specify scheduler_zk_path '
'in your cluster config!')
hosts = [h + ':{p}' for h in cluster.zk.split(',')]
zk = TwitterKazooClient.make(str(','.join(hosts).format(p=port)), verbose=verbose)
return zk, ServerSet(zk, cluster.scheduler_zk_path, **kw)
def __init__(self, cluster, port=2181, verbose=False, _deadline=deadline, **kwargs):
SchedulerClient.__init__(self, verbose=verbose, **kwargs)
self._cluster = cluster
self._zkport = port
self._endpoint = None
self._uri = None
self._deadline = _deadline
def _resolve(self):
"""Resolve the uri associated with this scheduler from zookeeper."""
joined = threading.Event()
def on_join(elements):
joined.set()
zk, serverset = self.get_scheduler_serverset(self._cluster, verbose=self._verbose,
port=self._zkport, on_join=on_join)
joined.wait(timeout=self.SERVERSET_TIMEOUT.as_(Time.SECONDS))
try:
# Need to perform this operation in a separate thread, because kazoo will wait for the
# result of this serverset evaluation indefinitely, which will prevent people killing
# the client with keyboard interrupts.
serverset_endpoints = self._deadline(lambda: list(serverset),
timeout=self.SERVERSET_TIMEOUT.as_(Time.SECONDS), daemon=True, propagate=True)
except Timeout:
raise self.CouldNotConnect("Failed to connect to Zookeeper within %d seconds." %
self.SERVERSET_TIMEOUT.as_(Time.SECONDS))
if len(serverset_endpoints) == 0:
raise self.CouldNotConnect('No schedulers detected in %s!' % self._cluster.name)
instance = serverset_endpoints[0]
if 'https' in instance.additional_endpoints:
endpoint = instance.additional_endpoints['https']
self._uri = 'https://%s:%s' % (endpoint.host, endpoint.port)
elif 'http' in instance.additional_endpoints:
endpoint = instance.additional_endpoints['http']
self._uri = 'http://%s:%s' % (endpoint.host, endpoint.port)
zk.stop()
def _connect(self):
if self._uri is None:
self._resolve()
if self._uri is not None:
return self._connect_scheduler(urljoin(self._uri, 'api'))
@property
def url(self):
proxy_url = self._cluster.proxy_url
if proxy_url:
return proxy_url
return self.raw_url
@property
def raw_url(self):
if self._uri is None:
self._resolve()
if self._uri:
return self._uri
class DirectSchedulerClient(SchedulerClient):
def __init__(self, uri, verbose=True, **kwargs):
SchedulerClient.__init__(self, verbose=verbose, **kwargs)
self._uri = uri
def _connect(self):
return self._connect_scheduler(urljoin(self._uri, 'api'))
@property
def url(self):
return self._uri
@property
def raw_url(self):
return self._uri
class SchedulerProxy(object):
"""
This class is responsible for creating a reliable thrift client to the
twitter scheduler. Basically all the dirty work needed by the
AuroraClientAPI.
"""
CONNECT_MAXIMUM_WAIT = Amount(1, Time.MINUTES)
RPC_RETRY_INTERVAL = Amount(5, Time.SECONDS)
RPC_MAXIMUM_WAIT = Amount(10, Time.MINUTES)
class Error(Exception): pass
class TimeoutError(Error): pass
class TransientError(Error): pass
class AuthError(Error): pass
class APIVersionError(Error): pass
class ThriftInternalError(Error): pass
class NotRetriableError(Error): pass
def __init__(self, cluster, verbose=False, **kwargs):
self.cluster = cluster
# TODO(Sathya): Make this a part of cluster trait when authentication is pushed to the transport
# layer.
self._client = self._scheduler_client = None
self.verbose = verbose
self._lock = threading.RLock()
self._terminating = threading.Event()
self._kwargs = kwargs
def with_scheduler(method):
"""Decorator magic to make sure a connection is made to the scheduler"""
def _wrapper(self, *args, **kwargs):
if not self._client:
self._construct_scheduler()
return method(self, *args, **kwargs)
return _wrapper
def invalidate(self):
self._client = self._scheduler_client = None
def terminate(self):
"""Requests immediate termination of any retry attempts and invalidates client."""
self._terminating.set()
self.invalidate()
@with_scheduler
def client(self):
return self._client
@with_scheduler
def scheduler_client(self):
return self._scheduler_client
def _construct_scheduler(self):
"""
Populates:
self._scheduler_client
self._client
"""
self._scheduler_client = SchedulerClient.get(self.cluster, verbose=self.verbose, **self._kwargs)
assert self._scheduler_client, "Could not find scheduler (cluster = %s)" % self.cluster.name
start = time.time()
while (time.time() - start) < self.CONNECT_MAXIMUM_WAIT.as_(Time.SECONDS):
try:
# this can wind up generating any kind of error, because it turns into
# a call to a dynamically set authentication module.
self._client = self._scheduler_client.get_thrift_client()
break
except SchedulerClient.CouldNotConnect as e:
log.warning('Could not connect to scheduler: %s' % e)
except Exception as e:
# turn any auth module exception into an auth error.
log.debug('Warning: got an unknown exception during authentication:')
log.debug(traceback.format_exc())
raise self.AuthError('Error connecting to scheduler: %s' % e)
if not self._client:
raise self.TimeoutError('Timed out trying to connect to scheduler at %s' % self.cluster.name)
def __getattr__(self, method_name):
# If the method does not exist, getattr will return AttributeError for us.
method = getattr(AuroraAdmin.Client, method_name)
if not callable(method):
return method
@functools.wraps(method)
def method_wrapper(*args, **kwargs):
retry = kwargs.get('retry', False)
with self._lock:
start = time.time()
while not self._terminating.is_set() and (
time.time() - start) < self.RPC_MAXIMUM_WAIT.as_(Time.SECONDS):
try:
method = getattr(self.client(), method_name)
if not callable(method):
return method
resp = method(*args)
if resp is not None and resp.responseCode == ResponseCode.ERROR_TRANSIENT:
raise self.TransientError(", ".join(
[m.message for m in resp.details] if resp.details else []))
return resp
except TRequestsTransport.AuthError as e:
log.error(self.scheduler_client().get_failed_auth_message())
raise self.AuthError(e)
except TTransport.TTransportException as e:
# Client does not know if the request has been received and processed by
# the scheduler, therefore the call is retried if it is idempotent.
if not self._terminating.is_set():
if retry:
log.warning('Transport error communicating with scheduler: %s, retrying...' % e)
self.invalidate()
self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
else:
raise self.NotRetriableError('Transport error communicating with scheduler during '
'non-idempotent operation: %s, not retrying' % e)
except (self.TimeoutError, self.TransientError) as e:
# If it is TimeoutError then the connection with scheduler could not
# be established, therefore the call did not go through.
# If it is TransientError then the scheduler could not process the call
# because its storage is not in READY state.
# In both cases, the call can be safely retried.
if not self._terminating.is_set():
log.warning('Connection error with scheduler: %s, reconnecting...' % e)
self.invalidate()
self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
except Exception as e:
# Take any error that occurs during the RPC call, and transform it
# into something clients can handle.
if not self._terminating.is_set():
raise self.ThriftInternalError("Error during thrift call %s to %s: %s" %
(method_name, self.cluster.name, e))
if not self._terminating.is_set():
raise self.TimeoutError('Timed out attempting to issue %s to %s' % (
method_name, self.cluster.name))
return method_wrapper
| apache-2.0 |
hifly/OpenUpgrade | addons/l10n_fr/wizard/fr_report_compute_resultant.py | 374 | 2312 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrresultat', data=data, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
thesuperzapper/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/uniform_test.py | 35 | 8966 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Uniform distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import uniform as uniform_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class UniformTest(test.TestCase):
def testUniformRange(self):
with self.test_session():
a = 3.0
b = 10.0
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertAllClose(a, uniform.low.eval())
self.assertAllClose(b, uniform.high.eval())
self.assertAllClose(b - a, uniform.range().eval())
def testUniformPDF(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5 + [15.0])
b = constant_op.constant([11.0] * 5 + [20.0])
uniform = uniform_lib.Uniform(low=a, high=b)
a_v = -3.0
b_v = 11.0
x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)
def _expected_pdf():
pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
pdf[x > b_v] = 0.0
pdf[x < a_v] = 0.0
pdf[5] = 1.0 / (20.0 - 15.0)
return pdf
expected_pdf = _expected_pdf()
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, pdf.eval())
log_pdf = uniform.log_prob(x)
self.assertAllClose(np.log(expected_pdf), log_pdf.eval())
def testUniformShape(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5)
b = constant_op.constant(11.0)
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertEqual(uniform.batch_shape_tensor().eval(), (5,))
self.assertEqual(uniform.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(uniform.event_shape_tensor().eval(), [])
self.assertEqual(uniform.event_shape, tensor_shape.TensorShape([]))
def testUniformPDFWithScalarEndpoint(self):
with self.test_session():
a = constant_op.constant([0.0, 5.0])
b = constant_op.constant(10.0)
uniform = uniform_lib.Uniform(low=a, high=b)
x = np.array([0.0, 8.0], dtype=np.float32)
expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, pdf.eval())
def testUniformCDF(self):
with self.test_session():
batch_size = 6
a = constant_op.constant([1.0] * batch_size)
b = constant_op.constant([11.0] * batch_size)
a_v = 1.0
b_v = 11.0
x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)
uniform = uniform_lib.Uniform(low=a, high=b)
def _expected_cdf():
cdf = (x - a_v) / (b_v - a_v)
cdf[x >= b_v] = 1
cdf[x < a_v] = 0
return cdf
cdf = uniform.cdf(x)
self.assertAllClose(_expected_cdf(), cdf.eval())
log_cdf = uniform.log_cdf(x)
self.assertAllClose(np.log(_expected_cdf()), log_cdf.eval())
def testUniformEntropy(self):
with self.test_session():
a_v = np.array([1.0, 1.0, 1.0])
b_v = np.array([[1.5, 2.0, 3.0]])
uniform = uniform_lib.Uniform(low=a_v, high=b_v)
expected_entropy = np.log(b_v - a_v)
self.assertAllClose(expected_entropy, uniform.entropy().eval())
def testUniformAssertMaxGtMin(self):
with self.test_session():
a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
uniform = uniform_lib.Uniform(low=a_v, high=b_v, validate_args=True)
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"x < y"):
uniform.low.eval()
def testUniformSample(self):
with self.test_session():
a = constant_op.constant([3.0, 4.0])
b = constant_op.constant(13.0)
a1_v = 3.0
a2_v = 4.0
b_v = 13.0
n = constant_op.constant(100000)
uniform = uniform_lib.Uniform(low=a, high=b)
samples = uniform.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000, 2))
self.assertAllClose(
sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-2)
self.assertAllClose(
sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-2)
self.assertFalse(
np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
self.assertFalse(
np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
def _testUniformSampleMultiDimensional(self):
# DISABLED: Please enable this test once b/issues/30149644 is resolved.
with self.test_session():
batch_size = 2
a_v = [3.0, 22.0]
b_v = [13.0, 35.0]
a = constant_op.constant([a_v] * batch_size)
b = constant_op.constant([b_v] * batch_size)
uniform = uniform_lib.Uniform(low=a, high=b)
n_v = 100000
n = constant_op.constant(n_v)
samples = uniform.sample(n)
self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))
sample_values = samples.eval()
self.assertFalse(
np.any(sample_values[:, 0, 0] < a_v[0]) or
np.any(sample_values[:, 0, 0] >= b_v[0]))
self.assertFalse(
np.any(sample_values[:, 0, 1] < a_v[1]) or
np.any(sample_values[:, 0, 1] >= b_v[1]))
self.assertAllClose(
sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
self.assertAllClose(
sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
def testUniformMean(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(uniform.mean().eval(), s_uniform.mean())
def testUniformVariance(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(uniform.variance().eval(), s_uniform.var())
def testUniformStd(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(uniform.stddev().eval(), s_uniform.std())
def testUniformNans(self):
with self.test_session():
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(low=a, high=b)
no_nans = constant_op.constant(1.0)
nans = constant_op.constant(0.0) / constant_op.constant(0.0)
self.assertTrue(math_ops.is_nan(nans).eval())
with_nans = array_ops.stack([no_nans, nans])
pdf = uniform.prob(with_nans)
is_nan = math_ops.is_nan(pdf).eval()
self.assertFalse(is_nan[0])
self.assertTrue(is_nan[1])
def testUniformSamplePdf(self):
with self.test_session():
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a, b)
self.assertTrue(
math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0).eval())
def testUniformBroadcasting(self):
with self.test_session():
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
self.assertAllClose(expected_pdf, pdf.eval())
def testUniformSampleWithShape(self):
with self.test_session():
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob(uniform.sample((2, 3)))
# pylint: disable=bad-continuation
expected_pdf = [
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
]
# pylint: enable=bad-continuation
self.assertAllClose(expected_pdf, pdf.eval())
pdf = uniform.prob(uniform.sample())
expected_pdf = [1.0, 0.1]
self.assertAllClose(expected_pdf, pdf.eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
noelbk/neutron-juniper | neutron/db/portsecurity_db.py | 9 | 7480 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Nicira Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira, Inc
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes as attrs
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import portsecurity as psec
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class PortSecurityBinding(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model in order to be to able to
# instruct SQLAlchemy to eagerly load port security binding
port = orm.relationship(
models_v2.Port,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
class NetworkSecurityBinding(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model in order to be able to instruct
# SQLAlchemy to eagerly load default port security setting for ports
# on this network
network = orm.relationship(
models_v2.Network,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
class PortSecurityDbMixin(object):
"""Mixin class to add port security."""
def _process_network_port_security_create(
self, context, network_req, network_res):
with context.session.begin(subtransactions=True):
db = NetworkSecurityBinding(
network_id=network_res['id'],
port_security_enabled=network_req[psec.PORTSECURITY])
context.session.add(db)
network_res[psec.PORTSECURITY] = network_req[psec.PORTSECURITY]
return self._make_network_port_security_dict(db)
def _process_port_port_security_create(
self, context, port_req, port_res):
with context.session.begin(subtransactions=True):
db = PortSecurityBinding(
port_id=port_res['id'],
port_security_enabled=port_req[psec.PORTSECURITY])
context.session.add(db)
port_res[psec.PORTSECURITY] = port_req[psec.PORTSECURITY]
return self._make_port_security_dict(db)
def _extend_port_security_dict(self, response_data, db_data):
if ('port-security' in
getattr(self, 'supported_extension_aliases', [])):
psec_value = db_data['port_security'][psec.PORTSECURITY]
response_data[psec.PORTSECURITY] = psec_value
def _get_network_security_binding(self, context, network_id):
try:
query = self._model_query(context, NetworkSecurityBinding)
binding = query.filter(
NetworkSecurityBinding.network_id == network_id).one()
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
return binding[psec.PORTSECURITY]
def _get_port_security_binding(self, context, port_id):
try:
query = self._model_query(context, PortSecurityBinding)
binding = query.filter(
PortSecurityBinding.port_id == port_id).one()
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
return binding[psec.PORTSECURITY]
def _process_port_port_security_update(
self, context, port_req, port_res):
if psec.PORTSECURITY in port_req:
port_security_enabled = port_req[psec.PORTSECURITY]
else:
return
try:
query = self._model_query(context, PortSecurityBinding)
port_id = port_res['id']
binding = query.filter(
PortSecurityBinding.port_id == port_id).one()
binding.port_security_enabled = port_security_enabled
port_res[psec.PORTSECURITY] = port_security_enabled
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
def _process_network_port_security_update(
self, context, network_req, network_res):
if psec.PORTSECURITY in network_req:
port_security_enabled = network_req[psec.PORTSECURITY]
else:
return
try:
query = self._model_query(context, NetworkSecurityBinding)
network_id = network_res['id']
binding = query.filter(
NetworkSecurityBinding.network_id == network_id).one()
binding.port_security_enabled = port_security_enabled
network_res[psec.PORTSECURITY] = port_security_enabled
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
def _make_network_port_security_dict(self, port_security, fields=None):
res = {'network_id': port_security['network_id'],
psec.PORTSECURITY: port_security[psec.PORTSECURITY]}
return self._fields(res, fields)
def _determine_port_security_and_has_ip(self, context, port):
"""Returns a tuple of booleans (port_security_enabled, has_ip).
Port_security is the value assocated with the port if one is present
otherwise the value associated with the network is returned. has_ip is
if the port is associated with an ip or not.
"""
has_ip = self._ip_on_port(port)
# we don't apply security groups for dhcp, router
if (port.get('device_owner') and
port['device_owner'].startswith('network:')):
return (False, has_ip)
if (psec.PORTSECURITY in port and
isinstance(port[psec.PORTSECURITY], bool)):
port_security_enabled = port[psec.PORTSECURITY]
else:
port_security_enabled = self._get_network_security_binding(
context, port['network_id'])
return (port_security_enabled, has_ip)
def _make_port_security_dict(self, port, fields=None):
res = {'port_id': port['port_id'],
psec.PORTSECURITY: port[psec.PORTSECURITY]}
return self._fields(res, fields)
def _ip_on_port(self, port):
return bool(port.get('fixed_ips'))
# Register dict extend functions for ports and networks
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attrs.NETWORKS, ['_extend_port_security_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attrs.PORTS, ['_extend_port_security_dict'])
| apache-2.0 |
spezi77/android_kernel_google_mako | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
okolisny/integration_tests | cfme/tests/containers/test_relationships.py | 1 | 4852 | from random import shuffle
import pytest
from cfme.utils import testgen
from cfme.utils.version import current_version
from cfme.web_ui import summary_title
from cfme.containers.pod import Pod
from cfme.containers.provider import ContainersProvider,\
ContainersTestItem
from cfme.containers.service import Service
from cfme.containers.node import Node
from cfme.containers.replicator import Replicator
from cfme.containers.image import Image
from cfme.containers.project import Project
from cfme.containers.template import Template
from cfme.containers.container import Container
from cfme.containers.image_registry import ImageRegistry
from cfme.containers.volume import Volume
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(1)]
pytest_generate_tests = testgen.generate([ContainersProvider], scope='function')
# The polarion markers below are used to mark the test item
# with polarion test case ID.
# TODO: future enhancement - https://github.com/pytest-dev/pytest/pull/1921
TEST_ITEMS = [
pytest.mark.polarion('CMP-9851')(ContainersTestItem(ContainersProvider, 'CMP-9851')),
pytest.mark.polarion('CMP-9947')(ContainersTestItem(Container, 'CMP-9947')),
pytest.mark.polarion('CMP-9929')(ContainersTestItem(Pod, 'CMP-9929')),
pytest.mark.polarion('CMP-10564')(ContainersTestItem(Service, 'CMP-10564')),
pytest.mark.polarion('CMP-9962')(ContainersTestItem(Node, 'CMP-9962')),
pytest.mark.polarion('CMP-10565')(ContainersTestItem(Replicator, 'CMP-10565')),
pytest.mark.polarion('CMP-9980')(ContainersTestItem(Image, 'CMP-9980')),
pytest.mark.polarion('CMP-9994')(ContainersTestItem(ImageRegistry, 'CMP-9994')),
pytest.mark.polarion('CMP-9868')(ContainersTestItem(Project, 'CMP-9868')),
pytest.mark.polarion('CMP-10319')(ContainersTestItem(Template, 'CMP-10319')),
pytest.mark.polarion('CMP-10410')(ContainersTestItem(Volume, 'CMP-10410'))
]
def check_relationships(instance):
"""Check the relationships linking & data integrity"""
instance.summary.reload() # Because sometimes:
# AttributeError: 'Summary' object has no attribute 'relationships'
sum_values = instance.summary.relationships.items().values()
shuffle(sum_values)
for attr in sum_values:
if attr.clickable:
break
else:
return # No clickable object but we still want to pass
link_value = attr.value
attr.click()
if type(link_value) is int:
from cfme.web_ui import paginator
rec_total = int(paginator.rec_total())
if rec_total != link_value:
raise Exception('Difference between the value({}) in the relationships table in {}'
'to number of records ({}) in the target'
'page'.format(link_value, instance.name, rec_total))
else:
assert '(Summary)' in summary_title()
@pytest.mark.parametrize('test_item', TEST_ITEMS,
ids=[ti.args[1].pretty_id() for ti in TEST_ITEMS])
def test_relationships_tables(provider, test_item):
"""This test verifies the integrity of the Relationships table.
clicking on each field in the Relationships table takes the user
to either Summary page where we verify that the field that appears
in the Relationships table also appears in the Properties table,
or to the page where the number of rows is equal to the number
that is displayed in the Relationships table.
"""
if current_version() < "5.7" and test_item.obj == Template:
pytest.skip('Templates are not exist in CFME version smaller than 5.7. skipping...')
if test_item.obj is ContainersProvider:
instance = provider
else:
rand_instances = test_item.obj.get_random_instances(provider, count=1)
if not rand_instances:
pytest.skip('Could not find instance of {} to test relationships.'
.format(test_item.obj.__class__.__name__))
instance = rand_instances.pop()
check_relationships(instance)
@pytest.mark.polarion('CMP-9934')
def test_container_status_relationships_data_integrity(provider, appliance, soft_assert):
""" This test verifies that the sum of running, waiting and terminated containers
in the status summary table
is the same number that appears in the Relationships table containers field
"""
for obj in Pod.get_random_instances(provider, count=3, appliance=appliance):
all_containers = obj.summary.relationships.containers.value
running = obj.summary.container_statuses_summary.running.value
waiting = obj.summary.container_statuses_summary.waiting.value
terminated = obj.summary.container_statuses_summary.terminated.value
soft_assert(
all_containers == sum([running, waiting, terminated])
)
| gpl-2.0 |
srikantbmandal/ansible | lib/ansible/modules/network/dellos6/dellos6_command.py | 46 | 7467 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos6_command
version_added: "2.2"
author: "Abirami N (@abirami-n)"
short_description: Run commands on remote devices running Dell OS6
description:
- Sends arbitrary commands to a Dell OS6 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos6_config) to configure Dell OS6 devices.
extends_documentation_fragment: dellos6
options:
commands:
description:
- List of commands to send to the remote dellos6 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of I(retries) as expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos6_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains Dell
dellos6_command:
commands: show version
wait_for: result[0] contains Dell
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos6_command:
commands:
- show version
- show interfaces
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos6_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Dell
- result[1] contains Access
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.dellos6 import run_commands
from ansible.module_utils.dellos6 import dellos6_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.netcli import Conditional
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos6_command does not support running config mode '
'commands. Please use dellos6_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos6_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
alex-quiterio/pychess | lib/pychess/Database/profilehooks.py | 21 | 24872 | """
Profiling hooks
This module contains a couple of decorators (`profile` and `coverage`) that
can be used to wrap functions and/or methods to produce profiles and line
coverage reports. There's a third convenient decorator (`timecall`) that
measures the duration of function execution without the extra profiling
overhead.
Usage example (Python 2.4 or newer)::
from profilehooks import profile, coverage
@profile # or @coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
print(fn(42))
Usage example (Python 2.3 or older)::
from profilehooks import profile, coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
# Now wrap that function in a decorator
fn = profile(fn) # or coverage(fn)
print fn(42)
Reports for all thusly decorated functions will be printed to sys.stdout
on program termination. You can alternatively request for immediate
reports for each call by passing immediate=True to the profile decorator.
There's also a @timecall decorator for printing the time to sys.stderr
every time a function is called, when you just want to get a rough measure
instead of a detailed (but costly) profile.
Caveats
A thread on python-dev convinced me that hotshot produces bogus numbers.
See http://mail.python.org/pipermail/python-dev/2005-November/058264.html
I don't know what will happen if a decorated function will try to call
another decorated function. All decorators probably need to explicitly
support nested profiling (currently TraceFuncCoverage is the only one
that supports this, while HotShotFuncProfile has support for recursive
functions.)
Profiling with hotshot creates temporary files (*.prof for profiling,
*.cprof for coverage) in the current directory. These files are not
cleaned up. Exception: when you specify a filename to the profile
decorator (to store the pstats.Stats object for later inspection),
the temporary file will be the filename you specified with '.raw'
appended at the end.
Coverage analysis with hotshot seems to miss some executions resulting
in lower line counts and some lines errorneously marked as never
executed. For this reason coverage analysis now uses trace.py which is
slower, but more accurate.
Copyright (c) 2004--2012 Marius Gedminas <marius@pov.lt>
Copyright (c) 2007 Hanno Schlichting
Copyright (c) 2008 Florian Schulze
Released under the MIT licence since December 2006:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
(Previously it was distributed under the GNU General Public Licence.)
"""
from __future__ import print_function
__author__ = "Marius Gedminas (marius@gedmin.as)"
__copyright__ = "Copyright 2004-2012 Marius Gedminas"
__license__ = "MIT"
__version__ = "1.6"
__date__ = "2012-05-05"
import atexit
import inspect
import sys
import re
# For profiling
from profile import Profile
import pstats
# For hotshot profiling (inaccurate!)
try:
import hotshot
import hotshot.stats
except ImportError:
hotshot = None
# For trace.py coverage
import trace
# For hotshot coverage (inaccurate!; uses undocumented APIs; might break)
if hotshot is not None:
import _hotshot
import hotshot.log
# For cProfile profiling (best)
try:
import cProfile
except ImportError:
cProfile = None
# For timecall
import time
# registry of available profilers
AVAILABLE_PROFILERS = {}
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40,
profiler=('cProfile', 'profile', 'hotshot')):
"""Mark `fn` for profiling.
If `skip` is > 0, first `skip` calls to `fn` will not be profiled.
If `immediate` is False, profiling results will be printed to
sys.stdout on program termination. Otherwise results will be printed
after each call.
If `dirs` is False only the name of the file will be printed.
Otherwise the full path is used.
`sort` can be a list of sort keys (defaulting to ['cumulative',
'time', 'calls']). The following ones are recognized::
'calls' -- call count
'cumulative' -- cumulative time
'file' -- file name
'line' -- line number
'module' -- file name
'name' -- function name
'nfl' -- name/file/line
'pcalls' -- call count
'stdname' -- standard name
'time' -- internal time
`entries` limits the output to the first N entries.
`profiler` can be used to select the preferred profiler, or specify a
sequence of them, in order of preference. The default is ('cProfile'.
'profile', 'hotshot').
If `filename` is specified, the profile stats will be stored in the
named file. You can load them pstats.Stats(filename).
Usage::
def fn(...):
...
fn = profile(fn, skip=1)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@profile(skip=3)
def fn(...):
...
or just ::
@profile
def fn(...):
...
"""
if fn is None: # @profile() syntax -- we are a decorator maker
def decorator(fn):
return profile(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries,
profiler=profiler)
return decorator
# @profile syntax -- we are a decorator.
if isinstance(profiler, str):
profiler = [profiler]
for p in profiler:
if p in AVAILABLE_PROFILERS:
profiler_class = AVAILABLE_PROFILERS[p]
break
else:
raise ValueError('only these profilers are available: %s'
% ', '.join(AVAILABLE_PROFILERS))
fp = profiler_class(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries)
# fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...)
# or HotShotFuncProfile
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage(fn):
"""Mark `fn` for line coverage analysis.
Results will be printed to sys.stdout on program termination.
Usage::
def fn(...):
...
fn = coverage(fn)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@coverage
def fn(...):
...
"""
fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage_with_hotshot(fn):
"""Mark `fn` for line coverage analysis.
Uses the 'hotshot' module for fast coverage analysis.
BUG: Produces inaccurate results.
See the docstring of `coverage` for usage examples.
"""
fp = HotShotFuncCoverage(fn)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncProfile(object):
"""Profiler for a function (uses profile)."""
# This flag is shared between all instances
in_profiler = False
Profile = Profile
def __init__(self, fn, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
FuncProfile registers an atexit handler that prints profiling
information to sys.stderr when the program terminates.
"""
self.fn = fn
self.skip = skip
self.filename = filename
self.immediate = immediate
self.dirs = dirs
self.sort = sort or ('cumulative', 'time', 'calls')
if isinstance(self.sort, str):
self.sort = (self.sort, )
self.entries = entries
self.reset_stats()
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if FuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
# You cannot reuse the same profiler for many calls and accumulate
# stats that way. :-/
profiler = self.Profile()
try:
FuncProfile.in_profiler = True
return profiler.runcall(self.fn, *args, **kw)
finally:
FuncProfile.in_profiler = False
self.stats.add(profiler)
if self.immediate:
self.print_stats()
self.reset_stats()
def print_stats(self):
"""Print profile information to sys.stdout."""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** PROFILER RESULTS ***")
print(("%s (%s:%s)" % (funcname, filename, lineno)))
if self.skipped:
skipped = "(%d calls not profiled)" % self.skipped
else:
skipped = ""
print(("function called %d times%s" % (self.ncalls, skipped)))
print("")
stats = self.stats
if self.filename:
stats.dump_stats(self.filename)
if not self.dirs:
stats.strip_dirs()
stats.sort_stats(*self.sort)
stats.print_stats(self.entries)
def reset_stats(self):
"""Reset accumulated profiler statistics."""
# Note: not using self.Profile, since pstats.Stats() fails then
self.stats = pstats.Stats(Profile())
self.ncalls = 0
self.skipped = 0
def atexit(self):
"""Stop profiling and print profile information to sys.stdout.
This function is registered as an atexit hook.
"""
if not self.immediate:
self.print_stats()
AVAILABLE_PROFILERS['profile'] = FuncProfile
if cProfile is not None:
class CProfileFuncProfile(FuncProfile):
"""Profiler for a function (uses cProfile)."""
Profile = cProfile.Profile
AVAILABLE_PROFILERS['cProfile'] = CProfileFuncProfile
if hotshot is not None:
class HotShotFuncProfile(object):
"""Profiler for a function (uses hotshot)."""
# This flag is shared between all instances
in_profiler = False
def __init__(self, fn, skip=0, filename=None):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncProfile registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.filename = filename
if self.filename:
self.logfilename = filename + ".raw"
else:
self.logfilename = fn.__name__ + ".prof"
self.profiler = hotshot.Profile(self.logfilename)
self.ncalls = 0
self.skip = skip
self.skipped = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if HotShotFuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
try:
HotShotFuncProfile.in_profiler = True
return self.profiler.runcall(self.fn, *args, **kw)
finally:
HotShotFuncProfile.in_profiler = False
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
self.profiler.close()
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** PROFILER RESULTS ***")
print(("%s (%s:%s)" % (funcname, filename, lineno)))
if self.skipped:
skipped = "(%d calls not profiled)" % self.skipped
else:
skipped = ""
print(("function called %d times%s" % (self.ncalls, skipped)))
print("")
stats = hotshot.stats.load(self.logfilename)
# hotshot.stats.load takes ages, and the .prof file eats megabytes, but
# a saved stats object is small and fast
if self.filename:
stats.dump_stats(self.filename)
# it is best to save before strip_dirs
stats.strip_dirs()
stats.sort_stats('cumulative', 'time', 'calls')
stats.print_stats(40)
AVAILABLE_PROFILERS['hotshot'] = HotShotFuncProfile
class HotShotFuncCoverage:
"""Coverage analysis for a function (uses _hotshot).
HotShot coverage is reportedly faster than trace.py, but it appears to
have problems with exceptions; also line counts in coverage reports
are generally lower from line counts produced by TraceFuncCoverage.
Is this my bug, or is it a problem with _hotshot?
"""
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.profiler = _hotshot.coverage(self.logfilename)
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
return self.profiler.runcall(self.fn, args, kw)
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
self.profiler.close()
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print(("%s (%s:%s)" % (funcname, filename, lineno)))
print(("function called %d times" % self.ncalls))
print("")
fs = FuncSource(self.fn)
reader = hotshot.log.LogReader(self.logfilename)
for what, (filename, lineno, funcname), tdelta in reader:
if filename != fs.filename:
continue
if what == hotshot.log.LINE:
fs.mark(lineno)
if what == hotshot.log.ENTER:
# hotshot gives us the line number of the function definition
# and never gives us a LINE event for the first statement in
# a function, so if we didn't perform this mapping, the first
# statement would be marked as never executed
if lineno == fs.firstlineno:
lineno = fs.firstcodelineno
fs.mark(lineno)
reader.close()
print(fs)
class TraceFuncCoverage:
"""Coverage analysis for a function (uses trace module).
HotShot coverage analysis is reportedly faster, but it appears to have
problems with exceptions.
"""
# Shared between all instances so that nested calls work
tracer = trace.Trace(count=True, trace=False,
ignoredirs=[sys.prefix, sys.exec_prefix])
# This flag is also shared between all instances
tracing = False
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
TraceFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if TraceFuncCoverage.tracing:
return self.fn(*args, **kw)
try:
TraceFuncCoverage.tracing = True
return self.tracer.runfunc(self.fn, *args, **kw)
finally:
TraceFuncCoverage.tracing = False
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print(("%s (%s:%s)" % (funcname, filename, lineno)))
print(("function called %d times" % self.ncalls))
print("")
fs = FuncSource(self.fn)
for (filename, lineno), count in self.tracer.counts.items():
if filename != fs.filename:
continue
fs.mark(lineno, count)
print(fs)
never_executed = fs.count_never_executed()
if never_executed:
print(("%d lines were not executed." % never_executed))
class FuncSource:
"""Source code annotator for a function."""
blank_rx = re.compile(r"^\s*finally:\s*(#.*)?$")
def __init__(self, fn):
self.fn = fn
self.filename = inspect.getsourcefile(fn)
self.source, self.firstlineno = inspect.getsourcelines(fn)
self.sourcelines = {}
self.firstcodelineno = self.firstlineno
self.find_source_lines()
def find_source_lines(self):
"""Mark all executable source lines in fn as executed 0 times."""
strs = trace.find_strings(self.filename)
lines = trace.find_lines_from_code(self.fn.__code__, strs)
self.firstcodelineno = sys.maxsize
for lineno in lines:
self.firstcodelineno = min(self.firstcodelineno, lineno)
self.sourcelines.setdefault(lineno, 0)
if self.firstcodelineno == sys.maxsize:
self.firstcodelineno = self.firstlineno
def mark(self, lineno, count=1):
"""Mark a given source line as executed count times.
Multiple calls to mark for the same lineno add up.
"""
self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
def count_never_executed(self):
"""Count statements that were never executed."""
lineno = self.firstlineno
counter = 0
for line in self.source:
if self.sourcelines.get(lineno) == 0:
if not self.blank_rx.match(line):
counter += 1
lineno += 1
return counter
def __str__(self):
"""Return annotated source code for the function."""
lines = []
lineno = self.firstlineno
for line in self.source:
counter = self.sourcelines.get(lineno)
if counter is None:
prefix = ' ' * 7
elif counter == 0:
if self.blank_rx.match(line):
prefix = ' ' * 7
else:
prefix = '>' * 6 + ' '
else:
prefix = '%5d: ' % counter
lines.append(prefix + line)
lineno += 1
return ''.join(lines)
def timecall(fn=None, immediate=True, timer=time.time):
"""Wrap `fn` and print its execution time.
Example::
@timecall
def somefunc(x, y):
time.sleep(x * y)
somefunc(2, 3)
will print the time taken by somefunc on every call. If you want just
a summary at program termination, use
@timecall(immediate=False)
You can also choose a timing method other than the default ``time.time()``,
e.g.:
@timecall(timer=time.clock)
"""
if fn is None: # @timecall() syntax -- we are a decorator maker
def decorator(fn):
return timecall(fn, immediate=immediate, timer=timer)
return decorator
# @timecall syntax -- we are a decorator.
fp = FuncTimer(fn, immediate=immediate, timer=timer)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncTimer(object):
def __init__(self, fn, immediate, timer):
self.fn = fn
self.ncalls = 0
self.totaltime = 0
self.immediate = immediate
self.timer = timer
if not immediate:
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
fn = self.fn
timer = self.timer
self.ncalls += 1
try:
start = timer()
return fn(*args, **kw)
finally:
duration = timer() - start
self.totaltime += duration
if self.immediate:
funcname = fn.__name__
filename = fn.__code__.co_filename
lineno = fn.__code__.co_firstlineno
sys.stderr.write("\n %s (%s:%s):\n %.3f seconds\n\n" % (
funcname, filename, lineno, duration))
sys.stderr.flush()
def atexit(self):
if not self.ncalls:
return
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print(("\n %s (%s:%s):\n"
" %d calls, %.3f seconds (%.3f seconds per call)\n" % (
funcname, filename, lineno, self.ncalls,
self.totaltime, self.totaltime / self.ncalls)))
| gpl-3.0 |
Clyde-fare/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
samthor/intellij-community | python/lib/Lib/encodings/utf_8_sig.py | 181 | 3585 | """ Python 'utf-8-sig' Codec
This work similar to UTF-8 with the following changes:
* On encoding/writing a UTF-8 encoded BOM will be prepended/written as the
first three bytes.
* On decoding/reading if the first three bytes are a UTF-8 encoded BOM, these
bytes will be skipped.
"""
import codecs
### Codec APIs
def encode(input, errors='strict'):
return (codecs.BOM_UTF8 + codecs.utf_8_encode(input, errors)[0], len(input))
def decode(input, errors='strict'):
prefix = 0
if input[:3] == codecs.BOM_UTF8:
input = input[3:]
prefix = 3
(output, consumed) = codecs.utf_8_decode(input, errors, True)
return (output, consumed+prefix)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.first = True
def encode(self, input, final=False):
if self.first:
self.first = False
return codecs.BOM_UTF8 + codecs.utf_8_encode(input, self.errors)[0]
else:
return codecs.utf_8_encode(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.first = True
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.first = True
def _buffer_decode(self, input, errors, final):
if self.first:
if len(input) < 3:
if codecs.BOM_UTF8.startswith(input):
# not enough data to decide if this really is a BOM
# => try again on the next call
return (u"", 0)
else:
self.first = None
else:
self.first = None
if input[:3] == codecs.BOM_UTF8:
(output, consumed) = codecs.utf_8_decode(input[3:], errors, final)
return (output, consumed+3)
return codecs.utf_8_decode(input, errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.first = True
class StreamWriter(codecs.StreamWriter):
def reset(self):
codecs.StreamWriter.reset(self)
try:
del self.encode
except AttributeError:
pass
def encode(self, input, errors='strict'):
self.encode = codecs.utf_8_encode
return encode(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
if len(input) < 3:
if codecs.BOM_UTF8.startswith(input):
# not enough data to decide if this is a BOM
# => try again on the next call
return (u"", 0)
elif input[:3] == codecs.BOM_UTF8:
self.decode = codecs.utf_8_decode
(output, consumed) = codecs.utf_8_decode(input[3:],errors)
return (output, consumed+3)
# (else) no BOM present
self.decode = codecs.utf_8_decode
return codecs.utf_8_decode(input, errors)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-8-sig',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
adaur/SickRage | lib/github/CommitComment.py | 72 | 6837 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class CommitComment(github.GithubObject.CompletableGithubObject):
"""
This class represents CommitComments as returned for example by http://developer.github.com/v3/todo
"""
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def commit_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._commit_id)
return self._commit_id.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def line(self):
"""
:type: integer
"""
self._completeIfNotSet(self._line)
return self._line.value
@property
def path(self):
"""
:type: string
"""
self._completeIfNotSet(self._path)
return self._path.value
@property
def position(self):
"""
:type: integer
"""
self._completeIfNotSet(self._position)
return self._position.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/comments/:id <http://developer.github.com/v3/repos/comments>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, body):
"""
:calls: `PATCH /repos/:owner/:repo/comments/:id <http://developer.github.com/v3/repos/comments>`_
:param body: string
:rtype: None
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._body = github.GithubObject.NotSet
self._commit_id = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._line = github.GithubObject.NotSet
self._path = github.GithubObject.NotSet
self._position = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "commit_id" in attributes: # pragma no branch
self._commit_id = self._makeStringAttribute(attributes["commit_id"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "line" in attributes: # pragma no branch
self._line = self._makeIntAttribute(attributes["line"])
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "position" in attributes: # pragma no branch
self._position = self._makeIntAttribute(attributes["position"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
| gpl-3.0 |
ionelmc/pytest | _pytest/nose.py | 228 | 2527 | """ run test suites written for nose. """
import sys
import py
import pytest
from _pytest import unittest
def get_skip_exceptions():
skip_classes = set()
for module_name in ('unittest', 'unittest2', 'nose'):
mod = sys.modules.get(module_name)
if hasattr(mod, 'SkipTest'):
skip_classes.add(mod.SkipTest)
return tuple(skip_classes)
def pytest_runtest_makereport(item, call):
if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
# let's substitute the excinfo with a pytest.skip one
call2 = call.__class__(lambda:
pytest.skip(str(call.excinfo.value)), call.when)
call.excinfo = call2.excinfo
@pytest.hookimpl(trylast=True)
def pytest_runtest_setup(item):
if is_potential_nosetest(item):
if isinstance(item.parent, pytest.Generator):
gen = item.parent
if not hasattr(gen, '_nosegensetup'):
call_optional(gen.obj, 'setup')
if isinstance(gen.parent, pytest.Instance):
call_optional(gen.parent.obj, 'setup')
gen._nosegensetup = True
if not call_optional(item.obj, 'setup'):
# call module level setup if there is no object level one
call_optional(item.parent.obj, 'setup')
#XXX this implies we only call teardown when setup worked
item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
def teardown_nose(item):
if is_potential_nosetest(item):
if not call_optional(item.obj, 'teardown'):
call_optional(item.parent.obj, 'teardown')
#if hasattr(item.parent, '_nosegensetup'):
# #call_optional(item._nosegensetup, 'teardown')
# del item.parent._nosegensetup
def pytest_make_collect_report(collector):
if isinstance(collector, pytest.Generator):
call_optional(collector.obj, 'setup')
def is_potential_nosetest(item):
# extra check needed since we do not do nose style setup/teardown
# on direct unittest style classes
return isinstance(item, pytest.Function) and \
not isinstance(item, unittest.TestCaseFunction)
def call_optional(obj, name):
method = getattr(obj, name, None)
isfixture = hasattr(method, "_pytestfixturefunction")
if method is not None and not isfixture and py.builtin.callable(method):
# If there's any problems allow the exception to raise rather than
# silently ignoring them
method()
return True
| mit |
thinkAmi-sandbox/Django_generic_view_sample | Django_generic_view_sample/settings.py | 1 | 3309 | """
Django settings for Django_generic_view_sample project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=es9x)-vyx5p9^v@#6@$(_!p4gmrkx#a4%o6a9zap7n-x&eiqm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.goal',
'apps.create_view',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django_generic_view_sample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Django_generic_view_sample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| unlicense |
tpounds/ansible-modules-core | cloud/openstack/os_subnets_facts.py | 25 | 4664 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_subnets_facts
short_description: Retrieve facts about one or more OpenStack subnets.
version_added: "2.0"
author: "Davide Agnello (@dagnello)"
description:
- Retrieve facts about one or more subnets from OpenStack.
requirements:
- "python >= 2.6"
- "shade"
options:
subnet:
description:
- Name or ID of the subnet
required: false
filters:
description:
- A dictionary of meta data to use for further filtering. Elements of
this dictionary may be additional dictionaries.
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about previously created subnets
- os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
- debug: var=openstack_subnets
# Gather facts about a previously created subnet by name
- os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
name: subnet1
- debug: var=openstack_subnets
# Gather facts about a previously created subnet with filter (note: name and
filters parameters are Not mutually exclusive)
- os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
filters:
tenant_id: 55e2ce24b2a245b09f181bf025724cbe
- debug: var=openstack_subnets
'''
RETURN = '''
openstack_subnets:
description: has all the openstack facts about the subnets
returned: always, but can be null
type: complex
contains:
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the subnet.
returned: success
type: string
network_id:
description: Network ID this subnet belongs in.
returned: success
type: string
cidr:
description: Subnet's CIDR.
returned: success
type: string
gateway_ip:
description: Subnet's gateway ip.
returned: success
type: string
enable_dhcp:
description: DHCP enable flag for this subnet.
returned: success
type: bool
ip_version:
description: IP version for this subnet.
returned: success
type: int
tenant_id:
description: Tenant id associated with this subnet.
returned: success
type: string
dns_nameservers:
description: DNS name servers for this subnet.
returned: success
type: list of strings
allocation_pools:
description: Allocation pools associated with this subnet.
returned: success
type: list of dicts
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
filters=dict(required=False, default=None)
)
module = AnsibleModule(argument_spec)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
subnets = cloud.search_subnets(module.params['name'],
module.params['filters'])
module.exit_json(changed=False, ansible_facts=dict(
openstack_subnets=subnets))
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Cinntax/home-assistant | homeassistant/components/mysensors/gateway.py | 3 | 8194 | """Handle MySensors gateways."""
import asyncio
from collections import defaultdict
import logging
import socket
import sys
import async_timeout
import voluptuous as vol
from homeassistant.const import CONF_OPTIMISTIC, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_setup_component
from .const import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAYS,
CONF_NODES,
CONF_PERSISTENCE,
CONF_PERSISTENCE_FILE,
CONF_RETAIN,
CONF_TCP_PORT,
CONF_TOPIC_IN_PREFIX,
CONF_TOPIC_OUT_PREFIX,
CONF_VERSION,
DOMAIN,
MYSENSORS_GATEWAY_READY,
MYSENSORS_GATEWAYS,
)
from .handler import HANDLERS
from .helpers import discover_mysensors_platform, validate_child, validate_node
_LOGGER = logging.getLogger(__name__)
GATEWAY_READY_TIMEOUT = 15.0
MQTT_COMPONENT = "mqtt"
def is_serial_port(value):
"""Validate that value is a windows serial port or a unix device."""
if sys.platform.startswith("win"):
ports = ("COM{}".format(idx + 1) for idx in range(256))
if value in ports:
return value
raise vol.Invalid(f"{value} is not a serial port")
return cv.isdevice(value)
def is_socket_address(value):
"""Validate that value is a valid address."""
try:
socket.getaddrinfo(value, None)
return value
except OSError:
raise vol.Invalid("Device is not a valid domain name or ip address")
def get_mysensors_gateway(hass, gateway_id):
"""Return MySensors gateway."""
if MYSENSORS_GATEWAYS not in hass.data:
hass.data[MYSENSORS_GATEWAYS] = {}
gateways = hass.data.get(MYSENSORS_GATEWAYS)
return gateways.get(gateway_id)
async def setup_gateways(hass, config):
"""Set up all gateways."""
conf = config[DOMAIN]
gateways = {}
for index, gateway_conf in enumerate(conf[CONF_GATEWAYS]):
persistence_file = gateway_conf.get(
CONF_PERSISTENCE_FILE,
hass.config.path("mysensors{}.pickle".format(index + 1)),
)
ready_gateway = await _get_gateway(hass, config, gateway_conf, persistence_file)
if ready_gateway is not None:
gateways[id(ready_gateway)] = ready_gateway
return gateways
async def _get_gateway(hass, config, gateway_conf, persistence_file):
"""Return gateway after setup of the gateway."""
from mysensors import mysensors
conf = config[DOMAIN]
persistence = conf[CONF_PERSISTENCE]
version = conf[CONF_VERSION]
device = gateway_conf[CONF_DEVICE]
baud_rate = gateway_conf[CONF_BAUD_RATE]
tcp_port = gateway_conf[CONF_TCP_PORT]
in_prefix = gateway_conf.get(CONF_TOPIC_IN_PREFIX, "")
out_prefix = gateway_conf.get(CONF_TOPIC_OUT_PREFIX, "")
if device == MQTT_COMPONENT:
if not await async_setup_component(hass, MQTT_COMPONENT, config):
return None
mqtt = hass.components.mqtt
retain = conf[CONF_RETAIN]
def pub_callback(topic, payload, qos, retain):
"""Call MQTT publish function."""
mqtt.async_publish(topic, payload, qos, retain)
def sub_callback(topic, sub_cb, qos):
"""Call MQTT subscribe function."""
@callback
def internal_callback(msg):
"""Call callback."""
sub_cb(msg.topic, msg.payload, msg.qos)
hass.async_create_task(mqtt.async_subscribe(topic, internal_callback, qos))
gateway = mysensors.AsyncMQTTGateway(
pub_callback,
sub_callback,
in_prefix=in_prefix,
out_prefix=out_prefix,
retain=retain,
loop=hass.loop,
event_callback=None,
persistence=persistence,
persistence_file=persistence_file,
protocol_version=version,
)
else:
try:
await hass.async_add_job(is_serial_port, device)
gateway = mysensors.AsyncSerialGateway(
device,
baud=baud_rate,
loop=hass.loop,
event_callback=None,
persistence=persistence,
persistence_file=persistence_file,
protocol_version=version,
)
except vol.Invalid:
try:
await hass.async_add_job(is_socket_address, device)
# valid ip address
gateway = mysensors.AsyncTCPGateway(
device,
port=tcp_port,
loop=hass.loop,
event_callback=None,
persistence=persistence,
persistence_file=persistence_file,
protocol_version=version,
)
except vol.Invalid:
# invalid ip address
return None
gateway.metric = hass.config.units.is_metric
gateway.optimistic = conf[CONF_OPTIMISTIC]
gateway.device = device
gateway.event_callback = _gw_callback_factory(hass, config)
gateway.nodes_config = gateway_conf[CONF_NODES]
if persistence:
await gateway.start_persistence()
return gateway
async def finish_setup(hass, hass_config, gateways):
"""Load any persistent devices and platforms and start gateway."""
discover_tasks = []
start_tasks = []
for gateway in gateways.values():
discover_tasks.append(_discover_persistent_devices(hass, hass_config, gateway))
start_tasks.append(_gw_start(hass, gateway))
if discover_tasks:
# Make sure all devices and platforms are loaded before gateway start.
await asyncio.wait(discover_tasks)
if start_tasks:
await asyncio.wait(start_tasks)
async def _discover_persistent_devices(hass, hass_config, gateway):
"""Discover platforms for devices loaded via persistence file."""
tasks = []
new_devices = defaultdict(list)
for node_id in gateway.sensors:
if not validate_node(gateway, node_id):
continue
node = gateway.sensors[node_id]
for child in node.children.values():
validated = validate_child(gateway, node_id, child)
for platform, dev_ids in validated.items():
new_devices[platform].extend(dev_ids)
for platform, dev_ids in new_devices.items():
tasks.append(discover_mysensors_platform(hass, hass_config, platform, dev_ids))
if tasks:
await asyncio.wait(tasks)
async def _gw_start(hass, gateway):
"""Start the gateway."""
# Don't use hass.async_create_task to avoid holding up setup indefinitely.
connect_task = hass.loop.create_task(gateway.start())
@callback
def gw_stop(event):
"""Trigger to stop the gateway."""
hass.async_create_task(gateway.stop())
if not connect_task.done():
connect_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gw_stop)
if gateway.device == "mqtt":
# Gatways connected via mqtt doesn't send gateway ready message.
return
gateway_ready = asyncio.Future()
gateway_ready_key = MYSENSORS_GATEWAY_READY.format(id(gateway))
hass.data[gateway_ready_key] = gateway_ready
try:
with async_timeout.timeout(GATEWAY_READY_TIMEOUT):
await gateway_ready
except asyncio.TimeoutError:
_LOGGER.warning(
"Gateway %s not ready after %s secs so continuing with setup",
gateway.device,
GATEWAY_READY_TIMEOUT,
)
finally:
hass.data.pop(gateway_ready_key, None)
def _gw_callback_factory(hass, hass_config):
"""Return a new callback for the gateway."""
@callback
def mysensors_callback(msg):
"""Handle messages from a MySensors gateway."""
_LOGGER.debug("Node update: node %s child %s", msg.node_id, msg.child_id)
msg_type = msg.gateway.const.MessageType(msg.type)
msg_handler = HANDLERS.get(msg_type.name)
if msg_handler is None:
return
hass.async_create_task(msg_handler(hass, hass_config, msg))
return mysensors_callback
| apache-2.0 |
t3dev/odoo | odoo/osv/orm.py | 1 | 6513 | import json
import warnings
from lxml import etree
from ..exceptions import except_orm
from ..models import (
MetaModel,
BaseModel,
Model, TransientModel, AbstractModel,
MAGIC_COLUMNS,
LOG_ACCESS_COLUMNS,
)
from odoo.tools.safe_eval import safe_eval
# extra definitions for backward compatibility
browse_record_list = BaseModel
class BRM(type):
def __instancecheck__(self, inst):
warnings.warn(DeprecationWarning(
"browse_record is a deprecated concept and should not be used "
"anymore, you can replace `isinstance(o, browse_record)` by "
"`isinstance(o, BaseModel)`"
))
return isinstance(inst, BaseModel) and len(inst) <= 1
browse_record = BRM('browse_record', (object,), {})
class NBM(type):
def __instancecheck__(self, inst):
warnings.warn(DeprecationWarning(
"browse_record is a deprecated concept and should not be used "
"anymore, you can replace `isinstance(o, browse_null)` by "
"`isinstance(o, BaseModel) and not o`"
))
return isinstance(inst, BaseModel) and not inst
browse_null = NBM('browse_null', (object,), {})
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in field.get("states",{}).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(safe_eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(safe_eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['column_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', json.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``column_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``column_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, str):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
assert modifiers == expected, "%s != %s" % (modifiers, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
assert modifiers == expected, "%s != %s" % (modifiers, expected)
# To use this test:
# import odoo
# odoo.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', {})
test_modifiers('<field name="a" invisible="1"/>', {"invisible": True})
test_modifiers('<field name="a" readonly="1"/>', {"readonly": True})
test_modifiers('<field name="a" required="1"/>', {"required": True})
test_modifiers('<field name="a" invisible="0"/>', {})
test_modifiers('<field name="a" readonly="0"/>', {})
test_modifiers('<field name="a" required="0"/>', {})
test_modifiers('<field name="a" invisible="1" required="1"/>', {"invisible": True, "required": True}) # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', {"invisible": True})
test_modifiers('<field name="a" invisible="0" required="1"/>', {"required": True})
test_modifiers("""<field name="a" attrs="{'invisible': [['b', '=', 'c']]}"/>""", {"invisible": [["b", "=", "c"]]})
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, {})
test_modifiers({"invisible": True}, {"invisible": True})
test_modifiers({"invisible": False}, {})
| gpl-3.0 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib64/scons-2.1.0/SCons/Tool/dmd.py | 20 | 9512 | """SCons.Tool.dmd
Tool-specific initialization for the Digital Mars D compiler.
(http://digitalmars.com/d)
Coded by Andy Friesen (andy@ikagames.com)
15 November 2003
Amended by Russel Winder (russel@russel.org.uk)
2010-02-07
There are a number of problems with this script at this point in time.
The one that irritates me the most is the Windows linker setup. The D
linker doesn't have a way to add lib paths on the commandline, as far
as I can see. You have to specify paths relative to the SConscript or
use absolute paths. To hack around it, add '#/blah'. This will link
blah.lib from the directory where SConstruct resides.
Compiler variables:
DC - The name of the D compiler to use. Defaults to dmd or gdmd,
whichever is found.
DPATH - List of paths to search for import modules.
DVERSIONS - List of version tags to enable when compiling.
DDEBUG - List of debug tags to enable when compiling.
Linker related variables:
LIBS - List of library files to link in.
DLINK - Name of the linker to use. Defaults to dmd or gdmd.
DLINKFLAGS - List of linker flags.
Lib tool variables:
DLIB - Name of the lib tool to use. Defaults to lib.
DLIBFLAGS - List of flags to pass to the lib tool.
LIBS - Same as for the linker. (libraries to pull into the .lib)
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dmd.py 5357 2011/09/09 21:31:03 bdeegan"
import os
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner.D
import SCons.Tool
# Adapted from c++.py
def isD(source):
if not source:
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext == '.d':
return 1
return 0
smart_link = {}
smart_lib = {}
def generate(env):
global smart_link
global smart_lib
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
DAction = SCons.Action.Action('$DCOM', '$DCOMSTR')
static_obj.add_action('.d', DAction)
shared_obj.add_action('.d', DAction)
static_obj.add_emitter('.d', SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter('.d', SCons.Defaults.SharedObjectEmitter)
dc = env.Detect(['dmd', 'gdmd'])
env['DC'] = dc
env['DCOM'] = '$DC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -c -of$TARGET $SOURCES'
env['_DINCFLAGS'] = '$( ${_concat(DINCPREFIX, DPATH, DINCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['_DVERFLAGS'] = '$( ${_concat(DVERPREFIX, DVERSIONS, DVERSUFFIX, __env__)} $)'
env['_DDEBUGFLAGS'] = '$( ${_concat(DDEBUGPREFIX, DDEBUG, DDEBUGSUFFIX, __env__)} $)'
env['_DFLAGS'] = '$( ${_concat(DFLAGPREFIX, DFLAGS, DFLAGSUFFIX, __env__)} $)'
env['DPATH'] = ['#/']
env['DFLAGS'] = []
env['DVERSIONS'] = []
env['DDEBUG'] = []
if dc:
# Add the path to the standard library.
# This is merely for the convenience of the dependency scanner.
dmd_path = env.WhereIs(dc)
if dmd_path:
x = dmd_path.rindex(dc)
phobosDir = dmd_path[:x] + '/../src/phobos'
if os.path.isdir(phobosDir):
env.Append(DPATH = [phobosDir])
env['DINCPREFIX'] = '-I'
env['DINCSUFFIX'] = ''
env['DVERPREFIX'] = '-version='
env['DVERSUFFIX'] = ''
env['DDEBUGPREFIX'] = '-debug='
env['DDEBUGSUFFIX'] = ''
env['DFLAGPREFIX'] = '-'
env['DFLAGSUFFIX'] = ''
env['DFILESUFFIX'] = '.d'
# Need to use the Digital Mars linker/lib on windows.
# *nix can just use GNU link.
if env['PLATFORM'] == 'win32':
env['DLINK'] = '$DC'
env['DLINKCOM'] = '$DLINK -of$TARGET $SOURCES $DFLAGS $DLINKFLAGS $_DLINKLIBFLAGS'
env['DLIB'] = 'lib'
env['DLIBCOM'] = '$DLIB $_DLIBFLAGS -c $TARGET $SOURCES $_DLINKLIBFLAGS'
env['_DLINKLIBFLAGS'] = '$( ${_concat(DLIBLINKPREFIX, LIBS, DLIBLINKSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['_DLIBFLAGS'] = '$( ${_concat(DLIBFLAGPREFIX, DLIBFLAGS, DLIBFLAGSUFFIX, __env__)} $)'
env['DLINKFLAGS'] = []
env['DLIBLINKPREFIX'] = ''
env['DLIBLINKSUFFIX'] = '.lib'
env['DLIBFLAGPREFIX'] = '-'
env['DLIBFLAGSUFFIX'] = ''
env['DLINKFLAGPREFIX'] = '-'
env['DLINKFLAGSUFFIX'] = ''
SCons.Tool.createStaticLibBuilder(env)
# Basically, we hijack the link and ar builders with our own.
# these builders check for the presence of D source, and swap out
# the system's defaults for the Digital Mars tools. If there's no D
# source, then we silently return the previous settings.
linkcom = env.get('LINKCOM')
try:
env['SMART_LINKCOM'] = smart_link[linkcom]
except KeyError:
def _smartLink(source, target, env, for_signature,
defaultLinker=linkcom):
if isD(source):
# XXX I'm not sure how to add a $DLINKCOMSTR variable
# so that it works with this _smartLink() logic,
# and I don't have a D compiler/linker to try it out,
# so we'll leave it alone for now.
return '$DLINKCOM'
else:
return defaultLinker
env['SMART_LINKCOM'] = smart_link[linkcom] = _smartLink
arcom = env.get('ARCOM')
try:
env['SMART_ARCOM'] = smart_lib[arcom]
except KeyError:
def _smartLib(source, target, env, for_signature,
defaultLib=arcom):
if isD(source):
# XXX I'm not sure how to add a $DLIBCOMSTR variable
# so that it works with this _smartLib() logic, and
# I don't have a D compiler/archiver to try it out,
# so we'll leave it alone for now.
return '$DLIBCOM'
else:
return defaultLib
env['SMART_ARCOM'] = smart_lib[arcom] = _smartLib
# It is worth noting that the final space in these strings is
# absolutely pivotal. SCons sees these as actions and not generators
# if it is not there. (very bad)
env['ARCOM'] = '$SMART_ARCOM '
env['LINKCOM'] = '$SMART_LINKCOM '
else: # assuming linux
linkcom = env.get('LINKCOM')
try:
env['SMART_LINKCOM'] = smart_link[linkcom]
except KeyError:
def _smartLink(source, target, env, for_signature,
defaultLinker=linkcom, dc=dc):
if isD(source):
try:
libs = env['LIBS']
except KeyError:
libs = []
if dc == 'dmd':
# TODO: This assumes that the dmd executable is in the
# bin directory and that the libraries are in a peer
# directory lib. This true of the Digital Mars
# distribution but . . .
import glob
dHome = env.WhereIs(dc).replace('/dmd' , '/..')
if glob.glob(dHome + '/lib/*phobos2*'):
if 'phobos2' not in libs:
env.Append(LIBPATH = [dHome + '/lib'])
env.Append(LIBS = ['phobos2'])
# TODO: Find out when there will be a
# 64-bit version of D.
env.Append(LINKFLAGS = ['-m32'])
else:
if 'phobos' not in libs:
env.Append(LIBS = ['phobos'])
elif dc is 'gdmd':
env.Append(LIBS = ['gphobos'])
if 'pthread' not in libs:
env.Append(LIBS = ['pthread'])
if 'm' not in libs:
env.Append(LIBS = ['m'])
return defaultLinker
env['SMART_LINKCOM'] = smart_link[linkcom] = _smartLink
env['LINKCOM'] = '$SMART_LINKCOM '
def exists(env):
return env.Detect(['dmd', 'gdmd'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
Benjamin-Dobell/three.js | utils/converters/obj/convert_obj_three_for_python3.py | 68 | 48066 | """Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", createScene );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", createScene );
function createScene( geometry, materials ) {
var mesh = new THREE.Mesh( geometry, new THREE.MultiMaterial( materials ) );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in range(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath.replace("\\", "/"))
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "newmtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Material start
# newmtl identifier
if chunks[0] == "newmtl":
if len(chunks) > 1:
identifier = chunks[1]
else:
identifier = ""
if not identifier in materials:
materials[identifier] = {}
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["transparent"] = True
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Dissolves
# d 0.9
if chunks[0] == "d" and len(chunks) == 2:
materials[identifier]["opacity"] = float(chunks[1])
if materials[identifier]["opacity"] < 1.0:
materials[identifier]["transparent"] = True
# Transparency
# Tr 0.1
if chunks[0] == "Tr" and len(chunks) == 2:
materials[identifier]["opacity"] = 1.0 - float(chunks[1])
if materials[identifier]["opacity"] < 1.0:
materials[identifier]["transparent"] = True
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
material = ""
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "usemtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl":
if len(chunks) > 1:
material = chunks[1]
else:
material = ""
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
# Precompute vert / normal / uv lists
# for negative index lookup
vertlen = len(vertices) + 1
normlen = len(normals) + 1
uvlen = len(uvs) + 1
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
if vertex['v'] < 0:
vertex['v'] += vertlen
vertex_index.append(vertex['v'])
if vertex['t']:
if vertex['t'] < 0:
vertex['t'] += uvlen
uv_index.append(vertex['t'])
if vertex['n']:
if vertex['n'] < 0:
vertex['n'] += normlen
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in range(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in range(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print("WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices))
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print("adding [%s] with %d vertices" % (name, n_morph_vertices))
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print("WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices))
elif n_faces != n_morph_faces:
print("WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces))
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print("adding [%s] with %d face colors" % (name, len(morphFaceColors)))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print("Couldn't find [%s]" % fname)
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print("Couldn't find [%s]" % infile)
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print("%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials)))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print("Couldn't find [%s]" % infile)
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack(b'<12s', b'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write(b"".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print("Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0]))
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print("Converting [%s] into [%s] ..." % (infile, outfile))
if morphfiles:
print("Morphs [%s]" % morphfiles)
if colorfiles:
print("Colors [%s]" % colorfiles)
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
| mit |
tuxfux-hlp-notes/python-batches | archieves/batch-56/modules/sheets/lib/python2.7/site-packages/pip/_vendor/requests/compat.py | 571 | 2556 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| gpl-3.0 |
tashigaofei/BlogSpider | scrapy/utils/console.py | 10 | 1260 |
def start_python_console(namespace=None, noipython=False, banner=''):
"""Start Python console binded to the given namespace. If IPython is
available, an IPython console will be started instead, unless `noipython`
is True. Also, tab completion will be used on Unix systems.
"""
if namespace is None:
namespace = {}
try:
try: # use IPython if available
if noipython:
raise ImportError()
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
sh(global_ns={}, local_ns=namespace)
except ImportError:
import code
try: # readline module is only available on unix systems
import readline
except ImportError:
pass
else:
import rlcompleter
readline.parse_and_bind("tab:complete")
code.interact(banner=banner, local=namespace)
except SystemExit: # raised when using exit() in python code.interact
pass
| mit |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/opus_core/indicator_framework/image_types/dataset_table.py | 2 | 9135 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import array, logical_and, logical_not, concatenate, newaxis, rank
from opus_core.indicator_framework.core.abstract_indicator import AbstractIndicator
from opus_core.variables.variable_name import VariableName
from opus_core.storage_factory import StorageFactory
from opus_core.database_management.configurations.database_configuration import DatabaseConfiguration
class DatasetTable(AbstractIndicator):
def __init__(self, source_data, dataset_name, attributes,
name, years = None, operation = None,
exclude_condition = None, output_type = 'tab',
storage_location = None):
if output_type == 'sql' and not isinstance(storage_location, DatabaseConfiguration):
raise "If DatasetTable output_type is 'sql', a Database object must be passed as storage_location."
elif output_type in ['dbf', 'csv', 'tab', 'esri'] and \
storage_location is not None and \
not isinstance(storage_location,str):
raise "If DatasetTable output_type is %s, storage_location must be a path to the output directory"%output_type
elif output_type not in ['dbf', 'csv', 'tab', 'sql', 'esri']:
raise "DatasetTable output_type needs to be either dbf, csv, tab, or sql"
self.output_type = output_type
self.exclude_condition = exclude_condition
self.name = name
AbstractIndicator.__init__(self, source_data, dataset_name,
attributes, years, operation, name,
storage_location=storage_location,
can_write_to_db = True)
self.output_type = output_type
kwargs = {}
if self.output_type == 'sql':
kwargs['protocol'] = storage_location.protocol
kwargs['username'] = storage_location.user_name
kwargs['password'] = storage_location.password
kwargs['hostname'] = storage_location.host_name
kwargs['database_name'] = storage_location.database_name
elif self.output_type == 'esri':
kwargs['storage_location'] = storage_location
else:
kwargs['storage_location'] = self.get_storage_location()
self.store = StorageFactory().get_storage(
type = '%s_storage'%(self.output_type),
**kwargs
)
def is_single_year_indicator_image_type(self):
return True
def get_file_extension(self):
return self.output_type
def get_visualization_shorthand(self):
return 'dataset_table'
def get_additional_metadata(self):
return [('output_type',self.output_type),
('exclude_condition',self.exclude_condition)]
def _create_indicator(self, year):
'''Creates a table with a column for each attribute specified in the arguments
The id attributes are also included as columns. The outputted file
contains data for only one year and one dataset.
'''
dataset = self._get_dataset(year)
id_attributes = dataset.get_id_attribute()
id_cols = dataset.get_id_name()
id_columns = [i for i in range(len(id_cols))]
col_titles = id_cols + [VariableName(attribute_name).get_alias()
for attribute_name in self.attributes]
cols = self._get_indicator(year)
if id_attributes.size == 1 and rank(cols) == 1:
cols = concatenate((id_attributes, cols))[:, newaxis]
else:
cols = concatenate((id_attributes[newaxis,:], cols))
if self.exclude_condition is not None:
exclude_mask = self._get_indicator(year,
attributes = [self.exclude_condition],
wrap = False)
cols = self._conditionally_eliminate_rows(
cols,
id_columns,
exclude_mask)
attribute_vals = {}
for i in range(len(col_titles)):
attribute_vals[col_titles[i]] = cols[i]
kwargs = {}
if self.output_type in ['csv','tab']:
kwargs['fixed_column_order'] = col_titles
kwargs['append_type_info'] = False
table_name = self.get_file_name(year = year,
suppress_extension_addition=True)
self.store.write_table(table_name = table_name,
table_data = attribute_vals,
**kwargs)
return self.get_file_path(year = year)
def _conditionally_eliminate_rows(self, data, id_columns, exclude_mask):
'''eliminates all the rows where all the data values match the exclude_condition
data -- an array of arrays
id_columns -- the columns which should be ignored when deciding to eliminate a row
exclude_mask -- the mask to be applied to the cols
'''
mask = logical_not(exclude_mask)
new_data = []
for col in data:
new_col = col[mask]
new_data.append(new_col)
return new_data
import os
from opus_core.tests import opus_unittest
from opus_core.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
from numpy import ma
class Tests(AbstractIndicatorTest):
def test_create_indicator(self):
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
table = DatasetTable(
source_data = self.source_data,
name = '',
dataset_name = 'test',
attributes = ['opus_core.test.attribute',
'opus_core.test.attribute2'],
output_type = 'tab'
)
table.create(False)
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__dataset_table____1980.tab')))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__dataset_table____1980.meta')))
def test_conditionally_eliminate_rows_through_create_indicator(self):
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
table = DatasetTable(
source_data = self.source_data,
name = '',
dataset_name = 'test',
attributes = ['opus_core.test.attribute',
'opus_core.test.attribute2'],
output_type = 'tab',
exclude_condition = 'opus_core.test.attribute<7'
)
table.create(False)
fpath = os.path.join(indicator_path, 'test__dataset_table____1980.tab')
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(fpath))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__dataset_table____1980.meta')))
expected_r1 = [3,7,70]
expected_r2 = [4,8,80]
f = open(fpath)
f.readline() #don't care about header
output_r1 = [int(c) for c in f.readline().split('\t')]
output_r2 = [int(c) for c in f.readline().split('\t')]
self.assertEqual(expected_r1,output_r1)
self.assertEqual(expected_r2,output_r2)
def test__conditionally_eliminate_rows(self):
dataset_table = DatasetTable(
source_data = self.source_data,
attributes = [],
dataset_name = 'test',
name = 'test')
data = [
array([1,1,2,2]),#id 1
array([1,2,0,0]),
array([1,2,0,0]),
array([1,2,1,2]),#id 2
array([0,2,0,4])
]
exclude_mask = array([0,0,0,0])
actual_output = dataset_table._conditionally_eliminate_rows(
data,
id_columns=[0,3],
exclude_mask = exclude_mask)
self.assert_(ma.allequal(actual_output,data))
exclude_mask = array([0,0,1,0])
actual_output = dataset_table._conditionally_eliminate_rows(
data,
id_columns=[0,3],
exclude_mask = exclude_mask)
desired_output = [
array([1,1,2]),
array([1,2,0]),
array([1,2,0]),
array([1,2,2]),
array([0,2,4])
]
self.assertEqual(len(actual_output), len(desired_output))
for col in range(len(actual_output)):
self.assert_(ma.allclose(actual_output[col], desired_output[col]))
if __name__ == '__main__':
opus_unittest.main()
| gpl-2.0 |
piantado/kelpy | examples/demo-tobii/demo_tobii_choice.py | 1 | 9949 | # -*- coding: utf-8 -*-
"""
This example builds off of demo3b which is a 2-AFC task. However, instead of clicking on the 'correct' image, the program recognizes a person's selection based on which image was looked at for longer during the trial.
For the correct image, an 'affirmative' sound plays.
For the other image, an 'error' sound plays.
There is a set trial time; if an image is not looked at for longer than the set looking threshold and the looking proportions do not differ more than the set difference threshold, an error sound will also play.
The trial number, images, and their respective looking proportions over the duration of the trial are printed to the console
Like demo3b, this takes a csv file of image pairs instead of having the images hard coded into the program.
Rather than CommandableImageSprites, TobiiSprites are created which also require a TobiiController.
"""
import os, sys
import pygame
from random import randint, choice, sample, shuffle
from time import time
import csv
from kelpy.CommandableImageSprite import *
from kelpy.Miscellaneous import *
from kelpy.DisplayQueue import *
from kelpy.OrderedUpdates import *
from kelpy.EventHandler import *
from kelpy.tobii.TobiiController import *
from kelpy.tobii.TobiiSimController import *
from kelpy.tobii.TobiiSprite import *
from kelpy.tobii.TobiiWatcher import *
########################################
##The following are constants that are used to set up the size and arrangement of images later.
##
IMAGE_SCALE = 0.25
HOFFSET = 300
VOFFSET = 100
##############################################
## Set up pygame
## this is the initialization line, to set up a screen that we will diplay things on.
# the tobii eyetracker requires the screen to be fullsized for accurate gaze points
screen, spot = initialize_kelpy( dimensions = (1024, 768) )
##############################################
## This line fetches the size of the screen (the one we just created) and assigns
## the values to some constants that we can refer to later.
WINDOW_WIDTH, WINDOW_HEIGHT = screen.get_size()
## Those positions are then stored in arrays so they may be shuffled (randomized).
#check the spot values to figure out where they actually are...
onscreen_positions = [spot.b1, spot.b4]
offscreen_positions = [spot.west, spot.east]
##############################################
## We create a string to hold the filepath to the sound that will play when the 'correct' option is clicked.
sound_yup_path = kstimulus('sounds/Affirmative.wav')
sound_nope_path = kstimulus('sounds/Error3.wav')
###############################################
## Almost finished! We finally set up some arrays to hold the filepaths to the images we will be using.
## NOTE: In this version of the demo we intake a csv file that holds predetermined groupings of stimuli.
## These groups are presented randomly
car_images = [] ## we create some blank lists
animal_images = [] ## another blank list
## Check demo3b for more details on how this csv section works!
with open('stimuli_pairings.csv', 'rb') as f:
reader = csv.reader(f, delimiter=",")
for row in reader:
car_images.append(kstimulus(row[0]))
animal_images.append(kstimulus(row[1]))
##############################################
## setup and activate tobii
# this creates a TobiiController that calls the actual Tobii SDK code
tobii_controller = TobiiController(screen)
#tobii_controller = TobiiSimController(screen)
# this searches for the tobii eyetracker that is connected.
# It times out based on the given amount of seconds (the default is 1,000 seconds) and exits this program
tobii_controller.wait_for_find_eyetracker(3)
#set the name of the data file that will output all of the Tobii data
tobii_controller.set_data_file('testdata.tsv')
#activate the first tobii eyetracker that was found
tobii_controller.activate(tobii_controller.eyetrackers.keys()[0])
##############################################
#This is the function we will eventually use to run the trial.
## It requires two arrays of image filepaths be passed to it.
def present_trial(car_paths, animal_paths):
"""
This is our main method that we use to run this trial. It accepts two string arrays as it's parameters.
car_paths
animal_paths
These arrays are shuffled and one image from each of them is randomly picked to be the stimuli for the trial.
This function makes use of some kelpy classes to display the images and then handle user interaction.
"""
## First we set up the elements of the trial.
## The images we will be using are set up like so...
## We pick two random images from the list and assign them to the variables pick1 and pick2.
pick1 = randint(0, len(car_paths)-1)
pick2 = randint(0, len(animal_paths)-1)
## Later this function will create two CommandableImageSprite objects with those two images.
## We start by first making an empty array to hold the objects.
img = [None] * 2
## We shuffle out offscreen values and later assign one of them to each image.
## This will make them appear to come from random directions when they come on screen.
shuffle(offscreen_positions)
## We then create our TobiiSprite objects.
## These are initialized the same way as CommandableImageSprites, but must also have the TobiiController as an argument and has an optional argument of "is_following" (defaults to False). In this example, since we don't want the images to move with eye gaze, we will leave is_following at its default value
img[0] = TobiiSprite( screen, offscreen_positions[0], car_paths[pick1], tobii_controller, scale=IMAGE_SCALE)
img[1] = TobiiSprite( screen, offscreen_positions[1], animal_paths[pick2], tobii_controller, scale=IMAGE_SCALE)
## We then designate which image is going to be the correct one, and store a reference to that object.
## in this case the image assigned to img[1] is always drawn from our pool of animal images.
correct = img[1]
## This line sets up the display queue (from the kelpy class DisplayQueue(). Think of this as our list of things to happen.
Q = DisplayQueue()
#### These next lines are a script of what is to happen in the experiment.
## We move the two objects in from their start positions offscreen.
## They are moved to a shuffled position from the onscreen_positions array.
## Note that we first shuffle the array to randomize the positions.
shuffle(onscreen_positions)
Q.append(obj=img[0], action='move', pos= onscreen_positions[0], duration=0.5)
Q.append(obj=img[1], action='move', pos= onscreen_positions[1], duration=0.5)
# We store the order that we will draw and update things in this variable 'dos'
dos = OrderedUpdates(*img)
## And we take a note of the time that the trial starts with this line.
## Calling the time() method from the python time library.
start_time = time()
#####################################################
##These next few lines are used to print all of our info in a nice and orderly fashion.
##the python str method rsplit is used to seperate the filename from the rest of the filepath.
## we simply match everything 1 backslash from the end, it returns both items in a list.
## we want the last item, so we ask for item 1 from the list. It's repeated for the car and animal images.
car_used = filename(car_images[pick1])
animal_used = filename(animal_images[pick2])
###############
## Then we determine whether the onscreen position for the car was left or right. We deduce that the animal was the opposite.
## These variables are used later to print the on screen position.
if onscreen_positions[0] is spot.west:
car_position = 'LEFT'
animal_position = 'RIGHT'
else:
car_position = 'RIGHT'
animal_position = 'LEFT'
#start tracking
tobii_controller.start_tracking()
## The standard event loop in kelpy -- this loops infinitely to process interactions
## and throws events depending on what the user does
trial_time = 5.0
for event in kelpy_standard_event_loop(screen, Q, dos, throw_null_events=True):
if (time() - start_time > trial_time):
print "trial end"
break
#this is set specifically for the tobii controller; otherwise the program hangs
#since the text file is not closed
if event.type == QUIT:
tobii_controller.close_data_file()
tobii_controller.destroy()
tobii_controller.stop_tracking()
chosen = None
max_value = 0.0;
proportions = looking_proportions(dos, trial_time)
print proportions
for item, value in proportions.iteritems():
if value > max_value:
chosen = item
max_value = value
if chosen is not None:
print "chosen: ", filename(chosen.image_path), proportions[chosen]
else:
print "nothing chosen"
if chosen is correct:
## Print whether the correct item was clicked, which car was used, it's position, which animal was used and it's position, and how long the trial took in seconds.
play_sound(sound_yup_path, wait=True, volume=5.0)
return True, car_used, car_position, animal_used, animal_position, trial_time
else:
## otherwise print the fail sound and print false.
play_sound(sound_nope_path, wait=True, volume=5.0)
return False, car_used, car_position, animal_used, animal_position, trial_time
##################################
## (end of present trial function)
# Then finally we use a for loop and the present_trial function to present a number of blocks.
for block in range(10):
## we print the block number along with the trial results.
print block, present_trial(car_images, animal_images)
#make sure the TobiiController has closed the data file and removed itself
tobii_controller.close_data_file()
tobii_controller.destroy()
##run()
| gpl-3.0 |
theheros/kbengine | kbe/src/lib/python/Lib/sched.py | 47 | 5541 | """A generally useful event scheduler class.
Each instance of this class manages its own queue.
No multi-threading is implied; you are supposed to hack that
yourself, or use a single instance per application.
Each instance is parametrized with two functions, one that is
supposed to return the current time, one that is supposed to
implement a delay. You can implement real-time scheduling by
substituting time and sleep from built-in module time, or you can
implement simulated time by writing your own functions. This can
also be used to integrate scheduling with STDWIN events; the delay
function is allowed to modify the queue. Time can be expressed as
integers or floating point numbers, as long as it is consistent.
Events are specified by tuples (time, priority, action, argument).
As in UNIX, lower priority numbers mean higher priority; in this
way the queue can be maintained as a priority queue. Execution of the
event means calling the action function, passing it the argument
sequence in "argument" (remember that in Python, multiple function
arguments are be packed in a sequence).
The action function may be an instance method so it
has another way to reference private data (besides global variables).
"""
# XXX The timefunc and delayfunc should have been defined as methods
# XXX so you can define new kinds of schedulers using subclassing
# XXX instead of having to define a module or class just to hold
# XXX the global state of your particular time and delay functions.
import heapq
from collections import namedtuple
__all__ = ["scheduler"]
class Event(namedtuple('Event', 'time, priority, action, argument')):
def __eq__(s, o): return (s.time, s.priority) == (o.time, o.priority)
def __ne__(s, o): return (s.time, s.priority) != (o.time, o.priority)
def __lt__(s, o): return (s.time, s.priority) < (o.time, o.priority)
def __le__(s, o): return (s.time, s.priority) <= (o.time, o.priority)
def __gt__(s, o): return (s.time, s.priority) > (o.time, o.priority)
def __ge__(s, o): return (s.time, s.priority) >= (o.time, o.priority)
class scheduler:
def __init__(self, timefunc, delayfunc):
"""Initialize a new instance, passing the time and delay
functions"""
self._queue = []
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument):
"""Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
"""
event = Event(time, priority, action, argument)
heapq.heappush(self._queue, event)
return event # The ID
def enter(self, delay, priority, action, argument):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument)
def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises ValueError.
"""
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
"""Check whether the queue is empty."""
return not self._queue
def run(self):
"""Execute events until the queue is empty.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
delayfunc(time - now)
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
delayfunc(0) # Let other threads run
else:
heapq.heappush(q, event)
@property
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
events = self._queue[:]
return map(heapq.heappop, [events]*len(events))
| lgpl-3.0 |
willingc/zulip | zerver/lib/avatar.py | 124 | 1765 | from __future__ import absolute_import
from django.conf import settings
import hashlib
from zerver.lib.utils import make_safe_digest
def gravatar_hash(email):
"""Compute the Gravatar hash for an email address."""
# Non-ASCII characters aren't permitted by the currently active e-mail
# RFCs. However, the IETF has published https://tools.ietf.org/html/rfc4952,
# outlining internationalization of email addresses, and regardless if we
# typo an address or someone manages to give us a non-ASCII address, let's
# not error out on it.
return make_safe_digest(email.lower(), hashlib.md5)
def user_avatar_hash(email):
# Salting the user_key may be overkill, but it prevents us from
# basically mimicking Gravatar's hashing scheme, which could lead
# to some abuse scenarios like folks using us as a free Gravatar
# replacement.
user_key = email.lower() + settings.AVATAR_SALT
return make_safe_digest(user_key, hashlib.sha1)
def avatar_url(user_profile):
return get_avatar_url(
user_profile.avatar_source,
user_profile.email
)
def get_avatar_url(avatar_source, email):
if avatar_source == 'U':
hash_key = user_avatar_hash(email)
if settings.LOCAL_UPLOADS_DIR is not None:
# ?x=x allows templates to append additional parameters with &s
return "/user_avatars/%s.png?x=x" % (hash_key)
else:
bucket = settings.S3_AVATAR_BUCKET
return "https://%s.s3.amazonaws.com/%s?x=x" % (bucket, hash_key)
elif settings.ENABLE_GRAVATAR:
hash_key = gravatar_hash(email)
return "https://secure.gravatar.com/avatar/%s?d=identicon" % (hash_key,)
else:
return settings.DEFAULT_AVATAR_URI+'?x=x'
| apache-2.0 |
grumpyjames/buck | third-party/py/unittest2/unittest2/test/test_setups.py | 111 | 16845 | import sys
from cStringIO import StringIO
import unittest2
from unittest2.test.support import resultFactory
class TestSetups(unittest2.TestCase):
def getRunner(self):
return unittest2.TextTestRunner(resultclass=resultFactory,
stream=StringIO())
def runTests(self, *cases):
suite = unittest2.TestSuite()
for case in cases:
tests = unittest2.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest2.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest2.TestSuite())
realSuite.addTest(unittest2.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest2.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest2.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest2.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest2.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest2.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest2.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest2.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest2.TestSuite((Test1('testOne'),))
second = unittest2.TestSuite((Test1('testTwo'),))
third = unittest2.TestSuite((Test2('testOne'),))
fourth = unittest2.TestSuite((Test2('testTwo'),))
fifth = unittest2.TestSuite((Test3('testOne'),))
sixth = unittest2.TestSuite((Test3('testTwo'),))
suite = unittest2.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest2.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest2.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
raise unittest2.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest2.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest2.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest2.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest2.TestSuite()
# nesting a suite again exposes a bug in the initial implementation
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
self.assertRaisesRegexp(Exception, msg, suite.debug)
| apache-2.0 |
CowboyKenbot/VT | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py | 372 | 124844 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
def _AdjustSourcesForRules(spec, rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(options, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
options: Options provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
spec['msvs_external_builder_out_dir'] = \
options.depth + '/out/$(Configuration)'
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-t',
'clean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(options, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec:
ext = spec.get('product_extension')
msbuild_attributes['TargetExt'] = '.' + ext
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if 'TargetExt' in attributes:
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile('\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
Right now, only "Build" and "Clean" targets are generated.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
return [build_target, clean_target]
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = (
'\r\nif %errorlevel% neq 0 exit /b %errorlevel%\r\n'.join(commands))
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| mit |
KnightHawk3/GUTG-Vote | GUTG_Vote/utilities.py | 1 | 1451 | import json
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
from GUTG_Vote.models import User, Game
json_key = json.load(open('auth.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'],
bytes(json_key['private_key'],
'UTF-8'), scope)
gc = gspread.Client(auth=credentials)
gc.login()
sheet = gc.open_by_key('1ele3BSmZMKWpunF0f3L0_Eeye7ELqtbrb7W89vdtAdk')
def sync_mongo_with_spreadsheet():
registrants = sheet.worksheet('Registrants').get_all_values()
registrants.pop(0)
User.objects.delete()
for user in registrants:
if user[1] is not '':
User(username=user[1], password=user[2]).save()
wishlist = sheet.worksheet('Wishlist').get_all_values()
wishlist.pop(0)
Game.objects.delete()
for ittr, game in enumerate(wishlist):
if game[0] is not '':
Game(title=game[0], url=game[1], votes=int(game[2]), cost=float(game[3]), game_id=int(ittr)).save()
def sync_spreadsheet_with_mongo():
wishlist = sheet.worksheet('Wishlist')
for ittr, game in enumerate(Game.objects):
wishlist.update_cell(ittr + 2, 1, str(game.title))
wishlist.update_cell(ittr + 2, 2, game.url)
wishlist.update_cell(ittr + 2, 3, game.votes)
wishlist.update_cell(ittr + 2, 4, game.cost) | mit |
tkingless/webtesting | venvs/tutorials/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py | 762 | 3532 | import hashlib
import os
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
from ..cache import BaseCache
from ..controller import CacheController
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
if use_dir_lock:
lock_class = MkdirLockFile
if lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
os.remove(name)
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
| mit |
dct2012/chromeos-3.14 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
evaschalde/odoo | addons/website_forum_doc/controllers/main.py | 338 | 3425 | # -*- coding: utf-8 -*-
from openerp import http
from openerp.http import request
from openerp.addons.website.models.website import slug
class WebsiteDoc(http.Controller):
@http.route(['/forum/how-to', '/forum/how-to/<model("forum.documentation.toc"):toc>'], type='http', auth="public", website=True)
def toc(self, toc=None, **kwargs):
cr, uid, context, toc_id = request.cr, request.uid, request.context, False
if toc:
sections = toc.child_ids
forum = toc.forum_id
else:
toc_obj = request.registry['forum.documentation.toc']
obj_ids = toc_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
sections = toc_obj.browse(cr, uid, obj_ids, context=context)
forum = sections and sections[0].forum_id or False
value = {
'toc': toc,
'main_object': toc or forum,
'forum': forum,
'sections': sections,
}
return request.website.render("website_forum_doc.documentation", value)
@http.route(['''/forum/how-to/<model("forum.documentation.toc"):toc>/<model("forum.post", "[('documentation_toc_id','=',toc[0])]"):post>'''], type='http', auth="public", website=True)
def post(self, toc, post, **kwargs):
# TODO: implement a redirect instead of crash
assert post.documentation_toc_id.id == toc.id, "Wrong post!"
value = {
'toc': toc,
'post': post,
'main_object': post,
'forum': post.forum_id
}
return request.website.render("website_forum_doc.documentation_post", value)
@http.route('/forum/<model("forum.forum"):forum>/question/<model("forum.post"):post>/promote', type='http', auth="user", website=True)
def post_toc(self, forum, post, **kwargs):
cr, uid, context, toc_id = request.cr, request.uid, request.context, False
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
assert user.karma >= 200, 'You need 200 karma to promote a post to the documentation'
toc_obj = request.registry['forum.documentation.toc']
obj_ids = toc_obj.search(cr, uid, [], context=context)
tocs = toc_obj.browse(cr, uid, obj_ids, context=context)
value = {
'post': post,
'forum': post.forum_id,
'chapters': filter(lambda x: not x.child_ids, tocs)
}
return request.website.render("website_forum_doc.promote_question", value)
@http.route('/forum/<model("forum.forum"):forum>/promote_ok', type='http', auth="user", website=True)
def post_toc_ok(self, forum, post_id, toc_id, **kwargs):
cr, uid, context = request.cr, request.uid, request.context
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
assert user.karma >= 200, 'Not enough karma, you need 200 to promote a documentation.'
toc_obj = request.registry['forum.documentation.toc']
stage_ids = toc_obj.search(cr, uid, [], limit=1, context=context)
post_obj = request.registry['forum.post']
post_obj.write(cr, uid, [int(post_id)], {
'documentation_toc_id': toc_id and int(toc_id) or False,
'documentation_stage_id': stage_ids and stage_ids[0] or False
}, context=context)
return request.redirect('/forum/'+str(forum.id)+'/question/'+str(post_id))
| agpl-3.0 |
rishilification/phantomjs | src/qt/qtwebkit/Tools/BuildSlaveSupport/build.webkit.org-config/wkbuild_unittest.py | 116 | 4793 | # Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import wkbuild
class ShouldBuildTest(unittest.TestCase):
_should_build_tests = [
(["ChangeLog", "Source/WebCore/ChangeLog", "Source/WebKit2/ChangeLog-2011-02-11"], []),
(["GNUmakefile.am", "Source/WebCore/GNUmakefile.am"], ["gtk"]),
(["Websites/bugs.webkit.org/foo", "Source/WebCore/bar"], ["*"]),
(["Websites/bugs.webkit.org/foo"], []),
(["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/JavaScriptCore/JavaScriptCore.vcproj/foo", "Source/WebKit2/win/WebKit2.vcproj", "Source/WebKit/win/WebKit.sln", "Tools/WebKitTestRunner/Configurations/WebKitTestRunnerCommon.vsprops"], ["win"]),
(["LayoutTests/platform/mac/foo", "Source/WebCore/bar"], ["*"]),
(["LayoutTests/foo"], ["*"]),
(["LayoutTests/canvas/philip/tests/size.attributes.parse.exp-expected.txt", "LayoutTests/canvas/philip/tests/size.attributes.parse.exp.html"], ["*"]),
(["LayoutTests/platform/mac-leopard/foo"], ["mac-leopard"]),
(["LayoutTests/platform/mac-lion/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/mac-snowleopard/foo"], ["mac-leopard", "mac-snowleopard"]),
(["LayoutTests/platform/mac-wk2/Skipped"], ["mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/win-xp/foo"], ["win"]),
(["LayoutTests/platform/win-wk2/foo"], ["win"]),
(["LayoutTests/platform/win/foo"], ["win"]),
(["Source/WebCore.exp.in", "Source/WebKit/mac/WebKit.exp"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/win/foo"], ["win"]),
(["Source/WebCore/platform/graphics/gpu/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/platform/wx/wxcode/win/foo"], []),
(["Source/WebCore/rendering/RenderThemeMac.mm", "Source/WebCore/rendering/RenderThemeMac.h"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/rendering/RenderThemeWinCE.h"], []),
(["Tools/BuildSlaveSupport/build.webkit.org-config/public_html/LeaksViewer/LeaksViewer.js"], []),
]
def test_should_build(self):
for files, platforms in self._should_build_tests:
# FIXME: We should test more platforms here once
# wkbuild._should_file_trigger_build is implemented for them.
for platform in ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]:
should_build = platform in platforms or "*" in platforms
self.assertEqual(wkbuild.should_build(platform, files), should_build, "%s should%s have built but did%s (files: %s)" % (platform, "" if should_build else "n't", "n't" if should_build else "", str(files)))
# FIXME: We should run this file as part of test-rm .
# Unfortunately test-rm currently requires that unittests
# be located in a directory with a valid module name.
# 'build.webkit.org-config' is not a valid module name (due to '.' and '-')
# so for now this is a stand-alone test harness.
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jobscore/sync-engine | inbox/contacts/algorithms.py | 3 | 7124 | import datetime
from collections import defaultdict
'''
This file currently contains algorithms for the contacts/rankings endpoint
and the groups/intrinsic endpoint.
'''
# For calculating message weights
LOOKBACK_TIME = 63072000.0 # datetime.timedelta(days=2*365).total_seconds()
MIN_MESSAGE_WEIGHT = .01
# For calculate_group_scores
MIN_GROUP_SIZE = 2
MIN_MESSAGE_COUNT = 2.5 # Might want to tune this param. (1.5, 2.5?)
SELF_IDENTITY_THRESHOLD = 0.3 # Also tunable
JACCARD_THRESHOLD = .35 # probably shouldn't tune this
SOCIAL_MOLECULE_EXPANSION_LIMIT = 1000 # Don't add too many molecules!
SOCIAL_MOLECULE_LIMIT = 5000 # Give up if there are too many messages
##
# Helper functions
##
def _get_message_weight(now, message_date):
timediff = now - message_date
weight = 1 - (timediff.total_seconds() / LOOKBACK_TIME)
return max(weight, MIN_MESSAGE_WEIGHT)
def _jaccard_similarity(set1, set2):
return len(set1.intersection(set2)) / float(len(set1.union(set2)))
def _get_participants(msg, excluded_emails=[]):
"""Returns an alphabetically sorted list of
emails addresses that msg was sent to (including cc and bcc)
"""
participants = msg.to_addr + msg.cc_addr + msg.bcc_addr
return sorted(list(set([email.lower() for _, email in participants
if email not in excluded_emails])))
# Not really an algorithm, but it seemed reasonable to put this here?
def is_stale(last_updated, lifespan=14):
""" last_updated is a datetime.datetime object
lifespan is measured in days
"""
if last_updated is None:
return True
expiration_date = last_updated + datetime.timedelta(days=lifespan)
return datetime.datetime.now() > expiration_date
##
# The actual algorithms for contact rankings and groupings!
##
def calculate_contact_scores(messages, time_dependent=True):
now = datetime.datetime.now()
res = defaultdict(int)
for message in messages:
if time_dependent:
weight = _get_message_weight(now, message.date)
else:
weight = 1
recipients = message.to_addr + message.cc_addr + message.bcc_addr
for (name, email) in recipients:
res[email] += weight
return res
def calculate_group_counts(messages, user_email):
"""Strips out most of the logic from calculate_group_scores
algorithm and just returns raw counts for each group.
"""
res = defaultdict(int)
for msg in messages:
participants = _get_participants(msg, [user_email])
if len(participants) >= MIN_GROUP_SIZE:
res[', '.join(participants)] += 1
return res
def calculate_group_scores(messages, user_email):
"""This is a (modified) implementation of the algorithm described
in this paper: http://mobisocial.stanford.edu/papers/iui11g.pdf
messages must have the following properties:
to_addr - [('name1', 'email1@e.com'), ... ]
cc_addr - [('name1', 'email1@e.com'), ... ]
bcc_addr - [('name1', 'email1@e.com'), ... ]
date - datetime.datetime object
"""
now = datetime.datetime.now()
message_ids_to_scores = {}
molecules_dict = defaultdict(set) # (emails, ...) -> {message ids, ...}
def get_message_list_weight(message_ids):
return sum([message_ids_to_scores[m_id] for m_id in message_ids])
# Gather initial candidate social molecules
for msg in messages:
participants = _get_participants(msg, [user_email])
if len(participants) >= MIN_GROUP_SIZE:
molecules_dict[tuple(participants)].add(msg.id)
message_ids_to_scores[msg.id] = \
_get_message_weight(now, msg.date)
if len(molecules_dict) > SOCIAL_MOLECULE_LIMIT:
return {} # Not worth the calculation
# Expand pool of social molecules by taking pairwise intersections.
# If there are already too many molecules, skip this step.
if len(molecules_dict) < SOCIAL_MOLECULE_EXPANSION_LIMIT:
_expand_molecule_pool(molecules_dict)
# Filter out infrequent molecules
molecules_list = [(set(emails), set(msgs))
for (emails, msgs) in molecules_dict.iteritems()
if get_message_list_weight(msgs) >= MIN_MESSAGE_COUNT]
# Subsets get absorbed by supersets (if minimal info lost)
molecules_list = _subsume_molecules(
molecules_list, get_message_list_weight)
molecules_list = _combine_similar_molecules(molecules_list)
# Give a score to each group.
return {', '.join(sorted(g)): get_message_list_weight(m)
for (g, m) in molecules_list}
# Helper functions for calculating group scores
def _expand_molecule_pool(molecules_dict):
mditems = [(set(g), msgs) for (g, msgs) in molecules_dict.items()]
for i in xrange(len(mditems)):
g1, m1 = mditems[i]
for j in xrange(i, len(mditems)):
g2, m2 = mditems[j]
new_molecule = tuple(sorted(list(g1.intersection(g2))))
if len(new_molecule) >= MIN_GROUP_SIZE:
molecules_dict[new_molecule] = \
molecules_dict[new_molecule].union(m1).union(m2)
def _subsume_molecules(molecules_list, get_message_list_weight):
molecules_list.sort(key=lambda x: len(x[0]), reverse=True)
is_subsumed = [False] * len(molecules_list)
mol_weights = [get_message_list_weight(m) for (_, m) in molecules_list]
for i in xrange(1, len(molecules_list)):
g1, m1 = molecules_list[i] # Smaller group
m1_size = mol_weights[i]
for j in xrange(i):
if is_subsumed[j]:
continue
g2, m2 = molecules_list[j] # Bigger group
m2_size = mol_weights[j]
if g1.issubset(g2):
sharing_error = ((len(g2) - len(g1)) * (m1_size - m2_size) /
(1.0 * (len(g2) * m1_size)))
if sharing_error < SELF_IDENTITY_THRESHOLD:
is_subsumed[i] = True
break
return [ml for (ml, dead) in zip(molecules_list, is_subsumed) if not dead]
def _combine_similar_molecules(molecules_list):
"""Using a greedy approach here for speed"""
new_guys_start_idx = 0
while new_guys_start_idx < len(molecules_list):
combined = [False] * len(molecules_list)
new_guys = []
for j in xrange(new_guys_start_idx, len(molecules_list)):
for i in xrange(0, j):
if combined[i]:
continue
(g1, m1), (g2, m2) = molecules_list[i], molecules_list[j]
js = _jaccard_similarity(g1, g2)
if js > JACCARD_THRESHOLD:
new_guys.append((g1.union(g2), m1.union(m2)))
combined[i], combined[j] = True, True
break
molecules_list = [molecule for molecule, was_combined
in zip(molecules_list, combined)
if not was_combined]
new_guys_start_idx = len(molecules_list)
molecules_list.extend(new_guys)
return molecules_list
| agpl-3.0 |
HiSPARC/station-software | user/python/Lib/site-packages/pip/_vendor/chardet/mbcssm.py | 289 | 25481 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import MachineState
# BIG5
BIG5_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
)
BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
BIG5_SM_MODEL = {'class_table': BIG5_CLS,
'class_factor': 5,
'state_table': BIG5_ST,
'char_len_table': BIG5_CHAR_LEN_TABLE,
'name': 'Big5'}
# CP949
CP949_CLS = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_ST = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
)
CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949_SM_MODEL = {'class_table': CP949_CLS,
'class_factor': 10,
'state_table': CP949_ST,
'char_len_table': CP949_CHAR_LEN_TABLE,
'name': 'CP949'}
# EUC-JP
EUCJP_CLS = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_ST = (
3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
)
EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
'class_factor': 6,
'state_table': EUCJP_ST,
'char_len_table': EUCJP_CHAR_LEN_TABLE,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_ST = (
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
)
EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
'class_factor': 4,
'state_table': EUCKR_ST,
'char_len_table': EUCKR_CHAR_LEN_TABLE,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_CLS = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_ST = (
MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
'class_factor': 7,
'state_table': EUCTW_ST,
'char_len_table': EUCTW_CHAR_LEN_TABLE,
'name': 'x-euc-tw'}
# GB2312
GB2312_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_ST = (
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validating
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
GB2312_SM_MODEL = {'class_table': GB2312_CLS,
'class_factor': 7,
'state_table': GB2312_ST,
'char_len_table': GB2312_CHAR_LEN_TABLE,
'name': 'GB2312'}
# Shift_JIS
SJIS_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
)
SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
SJIS_SM_MODEL = {'class_table': SJIS_CLS,
'class_factor': 6,
'state_table': SJIS_ST,
'char_len_table': SJIS_CHAR_LEN_TABLE,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_ST = (
5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
'class_factor': 6,
'state_table': UCS2BE_ST,
'char_len_table': UCS2BE_CHAR_LEN_TABLE,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_ST = (
6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
'class_factor': 6,
'state_table': UCS2LE_ST,
'char_len_table': UCS2LE_CHAR_LEN_TABLE,
'name': 'UTF-16LE'}
# UTF-8
UTF8_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_ST = (
MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
)
UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8_SM_MODEL = {'class_table': UTF8_CLS,
'class_factor': 16,
'state_table': UTF8_ST,
'char_len_table': UTF8_CHAR_LEN_TABLE,
'name': 'UTF-8'}
| gpl-3.0 |
hwroitzsch/DayLikeTodayClone | venv/lib/python3.5/site-packages/wheel/util.py | 219 | 4192 | """Utility functions."""
import sys
import os
import base64
import json
import hashlib
__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8',
'to_json', 'from_json', 'matches_requirement']
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(binary('='))
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def to_json(o):
'''Convert given data to JSON.'''
return json.dumps(o, sort_keys=True)
def from_json(j):
'''Decode a JSON payload.'''
return json.loads(j)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
try:
unicode
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, unicode):
return data.encode('utf-8')
return data
except NameError:
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, str):
return data.encode('utf-8')
return data
try:
# For encoding ascii back and forth between bytestrings, as is repeatedly
# necessary in JSON-based crypto under Python 3
unicode
def native(s):
return s
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def native(s):
if isinstance(s, bytes):
return s.decode('ascii')
return s
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
class HashingFile(object):
def __init__(self, fd, hashtype='sha256'):
self.fd = fd
self.hashtype = hashtype
self.hash = hashlib.new(hashtype)
self.length = 0
def write(self, data):
self.hash.update(data)
self.length += len(data)
self.fd.write(data)
def close(self):
self.fd.close()
def digest(self):
if self.hashtype == 'md5':
return self.hash.hexdigest()
digest = self.hash.digest()
return self.hashtype + '=' + native(urlsafe_b64encode(digest))
if sys.platform == 'win32':
import ctypes.wintypes
# CSIDL_APPDATA for reference - not used here for compatibility with
# dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order
csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28,
CSIDL_COMMON_APPDATA=35)
def get_path(name):
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf)
return buf.value
def save_config_path(*resource):
appdata = get_path("CSIDL_LOCAL_APPDATA")
path = os.path.join(appdata, *resource)
if not os.path.isdir(path):
os.makedirs(path)
return path
def load_config_paths(*resource):
ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"]
for id in ids:
base = get_path(id)
path = os.path.join(base, *resource)
if os.path.exists(path):
yield path
else:
def save_config_path(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.save_config_path(*resource)
def load_config_paths(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.load_config_paths(*resource)
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
for wf in wheels:
f = wf.parsed_filename
dist = Distribution(project_name=f.group("name"), version=f.group("ver"))
if dist in req:
selected.append(wf)
return selected
| mit |
596acres/livinglots-nola | livinglotsnola/pathways/migrations/0001_initial.py | 1 | 12098 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Pathway'
db.create_table(u'pathways_pathway', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=256)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('private_owners', self.gf('django.db.models.fields.BooleanField')()),
('public_owners', self.gf('django.db.models.fields.BooleanField')()),
('language', self.gf('django.db.models.fields.CharField')(default='en', max_length=10)),
('translation_of', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='translations', null=True, to=orm['pathways.Pathway'])),
))
db.send_create_signal(u'pathways', ['Pathway'])
# Adding M2M table for field specific_public_owners on 'Pathway'
m2m_table_name = db.shorten_name(u'pathways_pathway_specific_public_owners')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pathway', models.ForeignKey(orm[u'pathways.pathway'], null=False)),
('owner', models.ForeignKey(orm[u'owners.owner'], null=False))
))
db.create_unique(m2m_table_name, ['pathway_id', 'owner_id'])
# Adding model 'RichTextContent'
db.create_table(u'pathways_pathway_richtextcontent', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('feincms.contrib.richtext.RichTextField')(blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='richtextcontent_set', to=orm['pathways.Pathway'])),
('region', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ordering', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'pathways', ['RichTextContent'])
# Adding model 'MediaFileContent'
db.create_table(u'pathways_pathway_mediafilecontent', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('mediafile', self.gf('feincms.module.medialibrary.fields.MediaFileForeignKey')(related_name='+', to=orm['medialibrary.MediaFile'])),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='mediafilecontent_set', to=orm['pathways.Pathway'])),
('region', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ordering', self.gf('django.db.models.fields.IntegerField')(default=0)),
('type', self.gf('django.db.models.fields.CharField')(default='default', max_length=20)),
))
db.send_create_signal(u'pathways', ['MediaFileContent'])
def backwards(self, orm):
# Deleting model 'Pathway'
db.delete_table(u'pathways_pathway')
# Removing M2M table for field specific_public_owners on 'Pathway'
db.delete_table(db.shorten_name(u'pathways_pathway_specific_public_owners'))
# Deleting model 'RichTextContent'
db.delete_table(u'pathways_pathway_richtextcontent')
# Deleting model 'MediaFileContent'
db.delete_table(u'pathways_pathway_mediafilecontent')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'livinglots_owners.alias': {
'Meta': {'object_name': 'Alias'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
},
u'medialibrary.category': {
'Meta': {'ordering': "['parent__title', 'title']", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['medialibrary.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'medialibrary.mediafile': {
'Meta': {'ordering': "['-created']", 'object_name': 'MediaFile'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['medialibrary.Category']", 'null': 'True', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'})
},
u'owners.owner': {
'Meta': {'object_name': 'Owner'},
'aliases': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['livinglots_owners.Alias']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'owner_type': ('django.db.models.fields.CharField', [], {'default': "'private'", 'max_length': '20'})
},
u'pathways.mediafilecontent': {
'Meta': {'ordering': "['ordering']", 'object_name': 'MediaFileContent', 'db_table': "u'pathways_pathway_mediafilecontent'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mediafile': ('feincms.module.medialibrary.fields.MediaFileForeignKey', [], {'related_name': "'+'", 'to': u"orm['medialibrary.MediaFile']"}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mediafilecontent_set'", 'to': u"orm['pathways.Pathway']"}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'})
},
u'pathways.pathway': {
'Meta': {'object_name': 'Pathway'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'private_owners': ('django.db.models.fields.BooleanField', [], {}),
'public_owners': ('django.db.models.fields.BooleanField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}),
'specific_public_owners': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['owners.Owner']", 'null': 'True', 'blank': 'True'}),
'translation_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': u"orm['pathways.Pathway']"})
},
u'pathways.richtextcontent': {
'Meta': {'ordering': "['ordering']", 'object_name': 'RichTextContent', 'db_table': "u'pathways_pathway_richtextcontent'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'richtextcontent_set'", 'to': u"orm['pathways.Pathway']"}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'text': ('feincms.contrib.richtext.RichTextField', [], {'blank': 'True'})
}
}
complete_apps = ['pathways'] | gpl-3.0 |
ujenmr/ansible | lib/ansible/modules/network/fortios/fortios_firewall_profile_protocol_options.py | 24 | 42201 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_profile_protocol_options
short_description: Configure protocol options in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and profile_protocol_options category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_profile_protocol_options:
description:
- Configure protocol options.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comment:
description:
- Optional comments.
dns:
description:
- Configure DNS protocol options.
suboptions:
ports:
description:
- Ports to scan for content (1 - 65535, default = 53).
status:
description:
- Enable/disable the active status of scanning for this protocol.
choices:
- enable
- disable
ftp:
description:
- Configure FTP protocol options.
suboptions:
comfort-amount:
description:
- Amount of data to send in a transmission for client comforting (1 - 10240 bytes, default = 1).
comfort-interval:
description:
- Period of time between start, or last transmission, and the next client comfort transmission of data (1 - 900 sec, default = 10).
inspect-all:
description:
- Enable/disable the inspection of all ports for the protocol.
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
choices:
- clientcomfort
- oversize
- splice
- bypass-rest-command
- bypass-mode-command
oversize-limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB, default = 10).
ports:
description:
- Ports to scan for content (1 - 65535, default = 21).
scan-bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
choices:
- enable
- disable
uncompressed-nest-limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100, default = 12).
uncompressed-oversize-limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited, default = 10).
http:
description:
- Configure HTTP protocol options.
suboptions:
block-page-status-code:
description:
- Code number returned for blocked HTTP pages (non-FortiGuard only) (100 - 599, default = 403).
comfort-amount:
description:
- Amount of data to send in a transmission for client comforting (1 - 10240 bytes, default = 1).
comfort-interval:
description:
- Period of time between start, or last transmission, and the next client comfort transmission of data (1 - 900 sec, default = 10).
fortinet-bar:
description:
- Enable/disable Fortinet bar on HTML content.
choices:
- enable
- disable
fortinet-bar-port:
description:
- Port for use by Fortinet Bar (1 - 65535, default = 8011).
http-policy:
description:
- Enable/disable HTTP policy check.
choices:
- disable
- enable
inspect-all:
description:
- Enable/disable the inspection of all ports for the protocol.
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
choices:
- clientcomfort
- servercomfort
- oversize
- chunkedbypass
oversize-limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB, default = 10).
ports:
description:
- Ports to scan for content (1 - 65535, default = 80).
post-lang:
description:
- ID codes for character sets to be used to convert to UTF-8 for banned words and DLP on HTTP posts (maximum of 5 character sets).
choices:
- jisx0201
- jisx0208
- jisx0212
- gb2312
- ksc5601-ex
- euc-jp
- sjis
- iso2022-jp
- iso2022-jp-1
- iso2022-jp-2
- euc-cn
- ces-gbk
- hz
- ces-big5
- euc-kr
- iso2022-jp-3
- iso8859-1
- tis620
- cp874
- cp1252
- cp1251
range-block:
description:
- Enable/disable blocking of partial downloads.
choices:
- disable
- enable
retry-count:
description:
- Number of attempts to retry HTTP connection (0 - 100, default = 0).
scan-bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
choices:
- enable
- disable
streaming-content-bypass:
description:
- Enable/disable bypassing of streaming content from buffering.
choices:
- enable
- disable
strip-x-forwarded-for:
description:
- Enable/disable stripping of HTTP X-Forwarded-For header.
choices:
- disable
- enable
switching-protocols:
description:
- Bypass from scanning, or block a connection that attempts to switch protocol.
choices:
- bypass
- block
uncompressed-nest-limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100, default = 12).
uncompressed-oversize-limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited, default = 10).
imap:
description:
- Configure IMAP protocol options.
suboptions:
inspect-all:
description:
- Enable/disable the inspection of all ports for the protocol.
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
choices:
- fragmail
- oversize
oversize-limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB, default = 10).
ports:
description:
- Ports to scan for content (1 - 65535, default = 143).
scan-bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
choices:
- enable
- disable
uncompressed-nest-limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100, default = 12).
uncompressed-oversize-limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited, default = 10).
mail-signature:
description:
- Configure Mail signature.
suboptions:
signature:
description:
- Email signature to be added to outgoing email (if the signature contains spaces, enclose with quotation marks).
status:
description:
- Enable/disable adding an email signature to SMTP email messages as they pass through the FortiGate.
choices:
- disable
- enable
mapi:
description:
- Configure MAPI protocol options.
suboptions:
options:
description:
- One or more options that can be applied to the session.
choices:
- fragmail
- oversize
oversize-limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB, default = 10).
ports:
description:
- Ports to scan for content (1 - 65535, default = 135).
scan-bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
choices:
- enable
- disable
uncompressed-nest-limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100, default = 12).
uncompressed-oversize-limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited, default = 10).
name:
description:
- Name.
required: true
nntp:
description:
- Configure NNTP protocol options.
suboptions:
inspect-all:
description:
- Enable/disable the inspection of all ports for the protocol.
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
choices:
- oversize
- splice
oversize-limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB, default = 10).
ports:
description:
- Ports to scan for content (1 - 65535, default = 119).
scan-bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
choices:
- enable
- disable
uncompressed-nest-limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100, default = 12).
uncompressed-oversize-limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited, default = 10).
oversize-log:
description:
- Enable/disable logging for antivirus oversize file blocking.
choices:
- disable
- enable
pop3:
description:
- Configure POP3 protocol options.
suboptions:
inspect-all:
description:
- Enable/disable the inspection of all ports for the protocol.
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
choices:
- fragmail
- oversize
oversize-limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB, default = 10).
ports:
description:
- Ports to scan for content (1 - 65535, default = 110).
scan-bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
choices:
- enable
- disable
uncompressed-nest-limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100, default = 12).
uncompressed-oversize-limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited, default = 10).
replacemsg-group:
description:
- Name of the replacement message group to be used Source system.replacemsg-group.name.
rpc-over-http:
description:
- Enable/disable inspection of RPC over HTTP.
choices:
- enable
- disable
smtp:
description:
- Configure SMTP protocol options.
suboptions:
inspect-all:
description:
- Enable/disable the inspection of all ports for the protocol.
choices:
- enable
- disable
options:
description:
- One or more options that can be applied to the session.
choices:
- fragmail
- oversize
- splice
oversize-limit:
description:
- Maximum in-memory file size that can be scanned (1 - 383 MB, default = 10).
ports:
description:
- Ports to scan for content (1 - 65535, default = 25).
scan-bzip2:
description:
- Enable/disable scanning of BZip2 compressed files.
choices:
- enable
- disable
server-busy:
description:
- Enable/disable SMTP server busy when server not available.
choices:
- enable
- disable
status:
description:
- Enable/disable the active status of scanning for this protocol.
choices:
- enable
- disable
uncompressed-nest-limit:
description:
- Maximum nested levels of compression that can be uncompressed and scanned (2 - 100, default = 12).
uncompressed-oversize-limit:
description:
- Maximum in-memory uncompressed file size that can be scanned (0 - 383 MB, 0 = unlimited, default = 10).
switching-protocols-log:
description:
- Enable/disable logging for HTTP/HTTPS switching protocols.
choices:
- disable
- enable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure protocol options.
fortios_firewall_profile_protocol_options:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_profile_protocol_options:
state: "present"
comment: "Optional comments."
dns:
ports: "5"
status: "enable"
ftp:
comfort-amount: "8"
comfort-interval: "9"
inspect-all: "enable"
options: "clientcomfort"
oversize-limit: "12"
ports: "13"
scan-bzip2: "enable"
status: "enable"
uncompressed-nest-limit: "16"
uncompressed-oversize-limit: "17"
http:
block-page-status-code: "19"
comfort-amount: "20"
comfort-interval: "21"
fortinet-bar: "enable"
fortinet-bar-port: "23"
http-policy: "disable"
inspect-all: "enable"
options: "clientcomfort"
oversize-limit: "27"
ports: "28"
post-lang: "jisx0201"
range-block: "disable"
retry-count: "31"
scan-bzip2: "enable"
status: "enable"
streaming-content-bypass: "enable"
strip-x-forwarded-for: "disable"
switching-protocols: "bypass"
uncompressed-nest-limit: "37"
uncompressed-oversize-limit: "38"
imap:
inspect-all: "enable"
options: "fragmail"
oversize-limit: "42"
ports: "43"
scan-bzip2: "enable"
status: "enable"
uncompressed-nest-limit: "46"
uncompressed-oversize-limit: "47"
mail-signature:
signature: "<your_own_value>"
status: "disable"
mapi:
options: "fragmail"
oversize-limit: "53"
ports: "54"
scan-bzip2: "enable"
status: "enable"
uncompressed-nest-limit: "57"
uncompressed-oversize-limit: "58"
name: "default_name_59"
nntp:
inspect-all: "enable"
options: "oversize"
oversize-limit: "63"
ports: "64"
scan-bzip2: "enable"
status: "enable"
uncompressed-nest-limit: "67"
uncompressed-oversize-limit: "68"
oversize-log: "disable"
pop3:
inspect-all: "enable"
options: "fragmail"
oversize-limit: "73"
ports: "74"
scan-bzip2: "enable"
status: "enable"
uncompressed-nest-limit: "77"
uncompressed-oversize-limit: "78"
replacemsg-group: "<your_own_value> (source system.replacemsg-group.name)"
rpc-over-http: "enable"
smtp:
inspect-all: "enable"
options: "fragmail"
oversize-limit: "84"
ports: "85"
scan-bzip2: "enable"
server-busy: "enable"
status: "enable"
uncompressed-nest-limit: "89"
uncompressed-oversize-limit: "90"
switching-protocols-log: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_profile_protocol_options_data(json):
option_list = ['comment', 'dns', 'ftp',
'http', 'imap', 'mail-signature',
'mapi', 'name', 'nntp',
'oversize-log', 'pop3', 'replacemsg-group',
'rpc-over-http', 'smtp', 'switching-protocols-log']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_profile_protocol_options(data, fos):
vdom = data['vdom']
firewall_profile_protocol_options_data = data['firewall_profile_protocol_options']
filtered_data = filter_firewall_profile_protocol_options_data(firewall_profile_protocol_options_data)
if firewall_profile_protocol_options_data['state'] == "present":
return fos.set('firewall',
'profile-protocol-options',
data=filtered_data,
vdom=vdom)
elif firewall_profile_protocol_options_data['state'] == "absent":
return fos.delete('firewall',
'profile-protocol-options',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_profile_protocol_options']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_profile_protocol_options": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"dns": {"required": False, "type": "dict",
"options": {
"ports": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"ftp": {"required": False, "type": "dict",
"options": {
"comfort-amount": {"required": False, "type": "int"},
"comfort-interval": {"required": False, "type": "int"},
"inspect-all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["clientcomfort", "oversize", "splice",
"bypass-rest-command", "bypass-mode-command"]},
"oversize-limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan-bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed-nest-limit": {"required": False, "type": "int"},
"uncompressed-oversize-limit": {"required": False, "type": "int"}
}},
"http": {"required": False, "type": "dict",
"options": {
"block-page-status-code": {"required": False, "type": "int"},
"comfort-amount": {"required": False, "type": "int"},
"comfort-interval": {"required": False, "type": "int"},
"fortinet-bar": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fortinet-bar-port": {"required": False, "type": "int"},
"http-policy": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"inspect-all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["clientcomfort", "servercomfort", "oversize",
"chunkedbypass"]},
"oversize-limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"post-lang": {"required": False, "type": "str",
"choices": ["jisx0201", "jisx0208", "jisx0212",
"gb2312", "ksc5601-ex", "euc-jp",
"sjis", "iso2022-jp", "iso2022-jp-1",
"iso2022-jp-2", "euc-cn", "ces-gbk",
"hz", "ces-big5", "euc-kr",
"iso2022-jp-3", "iso8859-1", "tis620",
"cp874", "cp1252", "cp1251"]},
"range-block": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"retry-count": {"required": False, "type": "int"},
"scan-bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"streaming-content-bypass": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"strip-x-forwarded-for": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"switching-protocols": {"required": False, "type": "str",
"choices": ["bypass", "block"]},
"uncompressed-nest-limit": {"required": False, "type": "int"},
"uncompressed-oversize-limit": {"required": False, "type": "int"}
}},
"imap": {"required": False, "type": "dict",
"options": {
"inspect-all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["fragmail", "oversize"]},
"oversize-limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan-bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed-nest-limit": {"required": False, "type": "int"},
"uncompressed-oversize-limit": {"required": False, "type": "int"}
}},
"mail-signature": {"required": False, "type": "dict",
"options": {
"signature": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}},
"mapi": {"required": False, "type": "dict",
"options": {
"options": {"required": False, "type": "str",
"choices": ["fragmail", "oversize"]},
"oversize-limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan-bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed-nest-limit": {"required": False, "type": "int"},
"uncompressed-oversize-limit": {"required": False, "type": "int"}
}},
"name": {"required": True, "type": "str"},
"nntp": {"required": False, "type": "dict",
"options": {
"inspect-all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["oversize", "splice"]},
"oversize-limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan-bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed-nest-limit": {"required": False, "type": "int"},
"uncompressed-oversize-limit": {"required": False, "type": "int"}
}},
"oversize-log": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"pop3": {"required": False, "type": "dict",
"options": {
"inspect-all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["fragmail", "oversize"]},
"oversize-limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan-bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed-nest-limit": {"required": False, "type": "int"},
"uncompressed-oversize-limit": {"required": False, "type": "int"}
}},
"replacemsg-group": {"required": False, "type": "str"},
"rpc-over-http": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"smtp": {"required": False, "type": "dict",
"options": {
"inspect-all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"options": {"required": False, "type": "str",
"choices": ["fragmail", "oversize", "splice"]},
"oversize-limit": {"required": False, "type": "int"},
"ports": {"required": False, "type": "int"},
"scan-bzip2": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"server-busy": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"uncompressed-nest-limit": {"required": False, "type": "int"},
"uncompressed-oversize-limit": {"required": False, "type": "int"}
}},
"switching-protocols-log": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
joopert/home-assistant | tests/components/no_ip/test_init.py | 4 | 2410 | """Test the NO-IP component."""
import asyncio
from datetime import timedelta
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components import no_ip
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
DOMAIN = "test.example.com"
PASSWORD = "xyz789"
UPDATE_URL = no_ip.UPDATE_URL
USERNAME = "abc@123.com"
@pytest.fixture
def setup_no_ip(hass, aioclient_mock):
"""Fixture that sets up NO-IP."""
aioclient_mock.get(UPDATE_URL, params={"hostname": DOMAIN}, text="good 0.0.0.0")
hass.loop.run_until_complete(
async_setup_component(
hass,
no_ip.DOMAIN,
{
no_ip.DOMAIN: {
"domain": DOMAIN,
"username": USERNAME,
"password": PASSWORD,
}
},
)
)
@asyncio.coroutine
def test_setup(hass, aioclient_mock):
"""Test setup works if update passes."""
aioclient_mock.get(UPDATE_URL, params={"hostname": DOMAIN}, text="nochg 0.0.0.0")
result = yield from async_setup_component(
hass,
no_ip.DOMAIN,
{no_ip.DOMAIN: {"domain": DOMAIN, "username": USERNAME, "password": PASSWORD}},
)
assert result
assert aioclient_mock.call_count == 1
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 2
@asyncio.coroutine
def test_setup_fails_if_update_fails(hass, aioclient_mock):
"""Test setup fails if first update fails."""
aioclient_mock.get(UPDATE_URL, params={"hostname": DOMAIN}, text="nohost")
result = yield from async_setup_component(
hass,
no_ip.DOMAIN,
{no_ip.DOMAIN: {"domain": DOMAIN, "username": USERNAME, "password": PASSWORD}},
)
assert not result
assert aioclient_mock.call_count == 1
@asyncio.coroutine
def test_setup_fails_if_wrong_auth(hass, aioclient_mock):
"""Test setup fails if first update fails through wrong authentication."""
aioclient_mock.get(UPDATE_URL, params={"hostname": DOMAIN}, text="badauth")
result = yield from async_setup_component(
hass,
no_ip.DOMAIN,
{no_ip.DOMAIN: {"domain": DOMAIN, "username": USERNAME, "password": PASSWORD}},
)
assert not result
assert aioclient_mock.call_count == 1
| apache-2.0 |
rcarrillocruz/ansible | lib/ansible/modules/monitoring/honeybadger_deployment.py | 36 | 4275 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: honeybadger_deployment
author: "Benjamin Curtis (@stympy)"
version_added: "2.2"
short_description: Notify Honeybadger.io about app deployments
description:
- Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
options:
token:
description:
- API token.
required: true
environment:
description:
- The environment name, typically 'production', 'staging', etc.
required: true
user:
description:
- The username of the person doing the deployment
required: false
default: None
repo:
description:
- URL of the project repository
required: false
default: None
revision:
description:
- A hash, number, tag, or other identifier showing what revision was deployed
required: false
default: None
url:
description:
- Optional URL to submit the notification to.
required: false
default: "https://api.honeybadger.io/v1/deploys"
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
'''
EXAMPLES = '''
- honeybadger_deployment:
token: AAAAAA
environment: staging
user: ansible
revision: b6826b8
repo: 'git@github.com:user/repo.git'
'''
RETURN = '''# '''
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import *
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
params = {}
if module.params["environment"]:
params["deploy[environment]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[revision]"] = module.params["revision"]
params["api_key"] = module.params["token"]
url = module.params.get('url')
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
try:
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
except Exception:
e = get_exception()
module.fail_json(msg='Unable to notify Honeybadger: %s' % e)
else:
if info['status'] == 201:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
if __name__ == '__main__':
main()
| gpl-3.0 |
HyperBaton/ansible | lib/ansible/modules/network/fortios/fortios_webfilter_urlfilter.py | 7 | 15868 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_urlfilter
short_description: Configure URL filter lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify webfilter feature and urlfilter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
webfilter_urlfilter:
description:
- Configure URL filter lists.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
comment:
description:
- Optional comments.
type: str
entries:
description:
- URL filter entries.
type: list
suboptions:
action:
description:
- Action to take for URL filter matches.
type: str
choices:
- exempt
- block
- allow
- monitor
dns_address_family:
description:
- Resolve IPv4 address, IPv6 address, or both from DNS server.
type: str
choices:
- ipv4
- ipv6
- both
exempt:
description:
- If action is set to exempt, select the security profile operations that exempt URLs skip. Separate multiple options with a space.
type: str
choices:
- av
- web-content
- activex-java-cookie
- dlp
- fortiguard
- range-block
- pass
- all
id:
description:
- Id.
required: true
type: int
referrer_host:
description:
- Referrer host name.
type: str
status:
description:
- Enable/disable this URL filter.
type: str
choices:
- enable
- disable
type:
description:
- Filter type (simple, regex, or wildcard).
type: str
choices:
- simple
- regex
- wildcard
url:
description:
- URL to be filtered.
type: str
web_proxy_profile:
description:
- Web proxy profile. Source web-proxy.profile.name.
type: str
id:
description:
- ID.
required: true
type: int
ip_addr_block:
description:
- Enable/disable blocking URLs when the hostname appears as an IP address.
type: str
choices:
- enable
- disable
name:
description:
- Name of URL filter list.
type: str
one_arm_ips_urlfilter:
description:
- Enable/disable DNS resolver for one-arm IPS URL filter operation.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure URL filter lists.
fortios_webfilter_urlfilter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
webfilter_urlfilter:
comment: "Optional comments."
entries:
-
action: "exempt"
dns_address_family: "ipv4"
exempt: "av"
id: "8"
referrer_host: "myhostname"
status: "enable"
type: "simple"
url: "myurl.com"
web_proxy_profile: "<your_own_value> (source web-proxy.profile.name)"
id: "14"
ip_addr_block: "enable"
name: "default_name_16"
one_arm_ips_urlfilter: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_webfilter_urlfilter_data(json):
option_list = ['comment', 'entries', 'id',
'ip_addr_block', 'name', 'one_arm_ips_urlfilter']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def webfilter_urlfilter(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['webfilter_urlfilter'] and data['webfilter_urlfilter']:
state = data['webfilter_urlfilter']['state']
else:
state = True
webfilter_urlfilter_data = data['webfilter_urlfilter']
filtered_data = underscore_to_hyphen(filter_webfilter_urlfilter_data(webfilter_urlfilter_data))
if state == "present":
return fos.set('webfilter',
'urlfilter',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('webfilter',
'urlfilter',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_webfilter(data, fos):
if data['webfilter_urlfilter']:
resp = webfilter_urlfilter(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"webfilter_urlfilter": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"entries": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["exempt", "block", "allow",
"monitor"]},
"dns_address_family": {"required": False, "type": "str",
"choices": ["ipv4", "ipv6", "both"]},
"exempt": {"required": False, "type": "str",
"choices": ["av", "web-content", "activex-java-cookie",
"dlp", "fortiguard", "range-block",
"pass", "all"]},
"id": {"required": True, "type": "int"},
"referrer_host": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"type": {"required": False, "type": "str",
"choices": ["simple", "regex", "wildcard"]},
"url": {"required": False, "type": "str"},
"web_proxy_profile": {"required": False, "type": "str"}
}},
"id": {"required": True, "type": "int"},
"ip_addr_block": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"name": {"required": False, "type": "str"},
"one_arm_ips_urlfilter": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
procangroup/edx-platform | lms/djangoapps/notification_prefs/views.py | 3 | 7651 | """
Views to support notification preferences.
"""
from __future__ import division
import json
import os
from base64 import urlsafe_b64decode, urlsafe_b64encode
from hashlib import sha256
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from cryptography.hazmat.primitives.padding import PKCS7
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.views.decorators.http import require_GET, require_POST
from six import text_type
from edxmako.shortcuts import render_to_response
from notification_prefs import NOTIFICATION_PREF_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference
AES_BLOCK_SIZE_BYTES = int(AES.block_size / 8)
class UsernameDecryptionException(Exception):
pass
class UsernameCipher(object):
"""
A transformation of a username to/from an opaque token
The purpose of the token is to make one-click unsubscribe links that don't
require the user to log in. To prevent users from unsubscribing other users,
we must ensure the token cannot be computed by anyone who has this
source code. The token must also be embeddable in a URL.
Thus, we take the following steps to encode (and do the inverse to decode):
1. Pad the UTF-8 encoding of the username with PKCS#7 padding to match the
AES block length
2. Generate a random AES block length initialization vector
3. Use AES-256 (with a hash of settings.SECRET_KEY as the encryption key)
in CBC mode to encrypt the username
4. Prepend the IV to the encrypted value to allow for initialization of the
decryption cipher
5. base64url encode the result
"""
@staticmethod
def _get_aes_cipher(initialization_vector):
hash_ = sha256()
hash_.update(settings.SECRET_KEY)
return Cipher(AES(hash_.digest()), CBC(initialization_vector), backend=default_backend())
@staticmethod
def encrypt(username):
initialization_vector = os.urandom(AES_BLOCK_SIZE_BYTES)
aes_cipher = UsernameCipher._get_aes_cipher(initialization_vector)
encryptor = aes_cipher.encryptor()
padder = PKCS7(AES.block_size).padder()
padded = padder.update(username.encode("utf-8")) + padder.finalize()
return urlsafe_b64encode(initialization_vector + encryptor.update(padded) + encryptor.finalize())
@staticmethod
def decrypt(token):
try:
base64_decoded = urlsafe_b64decode(token)
except TypeError:
raise UsernameDecryptionException("base64url")
if len(base64_decoded) < AES_BLOCK_SIZE_BYTES:
raise UsernameDecryptionException("initialization_vector")
initialization_vector = base64_decoded[:AES_BLOCK_SIZE_BYTES]
aes_encrypted = base64_decoded[AES_BLOCK_SIZE_BYTES:]
aes_cipher = UsernameCipher._get_aes_cipher(initialization_vector)
decryptor = aes_cipher.decryptor()
unpadder = PKCS7(AES.block_size).unpadder()
try:
decrypted = decryptor.update(aes_encrypted) + decryptor.finalize()
except ValueError:
raise UsernameDecryptionException("aes")
try:
unpadded = unpadder.update(decrypted) + unpadder.finalize()
if len(unpadded) == 0:
raise UsernameDecryptionException("padding")
return unpadded
except ValueError:
raise UsernameDecryptionException("padding")
def enable_notifications(user):
"""
Enable notifications for a user.
Currently only used for daily forum digests.
"""
# Calling UserPreference directly because this method is called from a couple of places,
# and it is not clear that user is always the user initiating the request.
UserPreference.objects.get_or_create(
user=user,
key=NOTIFICATION_PREF_KEY,
defaults={
"value": UsernameCipher.encrypt(user.username)
}
)
@require_POST
def ajax_enable(request):
"""
A view that enables notifications for the authenticated user
This view should be invoked by an AJAX POST call. It returns status 204
(no content) or an error. If notifications were already enabled for this
user, this has no effect. Otherwise, a preference is created with the
unsubscribe token (an encryption of the username) as the value.username
"""
if not request.user.is_authenticated():
raise PermissionDenied
enable_notifications(request.user)
return HttpResponse(status=204)
@require_POST
def ajax_disable(request):
"""
A view that disables notifications for the authenticated user
This view should be invoked by an AJAX POST call. It returns status 204
(no content) or an error.
"""
if not request.user.is_authenticated():
raise PermissionDenied
delete_user_preference(request.user, NOTIFICATION_PREF_KEY)
return HttpResponse(status=204)
@require_GET
def ajax_status(request):
"""
A view that retrieves notifications status for the authenticated user.
This view should be invoked by an AJAX GET call. It returns status 200,
with a JSON-formatted payload, or an error.
"""
if not request.user.is_authenticated():
raise PermissionDenied
qs = UserPreference.objects.filter(
user=request.user,
key=NOTIFICATION_PREF_KEY
)
return HttpResponse(json.dumps({"status": len(qs)}), content_type="application/json")
@require_GET
def set_subscription(request, token, subscribe): # pylint: disable=unused-argument
"""
A view that disables or re-enables notifications for a user who may not be authenticated
This view is meant to be the target of an unsubscribe link. The request
must be a GET, and the `token` parameter must decrypt to a valid username.
The subscribe flag feature controls whether the view subscribes or unsubscribes the user, with subscribe=True
used to "undo" accidentally clicking on the unsubscribe link
A 405 will be returned if the request method is not GET. A 404 will be
returned if the token parameter does not decrypt to a valid username. On
success, the response will contain a page indicating success.
"""
try:
username = UsernameCipher().decrypt(token.encode())
user = User.objects.get(username=username)
except UnicodeDecodeError:
raise Http404("base64url")
except UsernameDecryptionException as exn:
raise Http404(text_type(exn))
except User.DoesNotExist:
raise Http404("username")
# Calling UserPreference directly because the fact that the user is passed in the token implies
# that it may not match request.user.
if subscribe:
UserPreference.objects.get_or_create(user=user,
key=NOTIFICATION_PREF_KEY,
defaults={
"value": UsernameCipher.encrypt(user.username)
})
return render_to_response("resubscribe.html", {'token': token})
else:
UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY).delete()
return render_to_response("unsubscribe.html", {'token': token})
| agpl-3.0 |
frewsxcv/AutobahnPython | autobahn/autobahn/wamp/uri.py | 11 | 5339 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import re
import six
# TODO:
# suffix matches
# args + kwargs
# uuid converter
# multiple URI patterns per decorated object
# classes: Pattern, EndpointPattern, ..
class Pattern:
"""
A WAMP URI Pattern.
"""
URI_TARGET_ENDPOINT = 1
URI_TARGET_HANDLER = 2
URI_TARGET_EXCEPTION = 3
URI_TYPE_EXACT = 1
URI_TYPE_PREFIX = 2
URI_TYPE_WILDCARD = 3
_URI_COMPONENT = re.compile(r"^[a-z][a-z0-9_]*$")
_URI_NAMED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*)>$")
_URI_NAMED_CONVERTED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*):([a-z]*)>$")
def __init__(self, uri, target):
"""
Constructor for WAMP URI pattern.
:param uri: The URI or URI pattern, e.g. `"com.myapp.product.<product:int>.update"`.
:type uri: str
:param target: The target for this pattern: a procedure endpoint (a callable),
an event handler (a callable) or an exception (a class).
"""
assert(type(uri) == six.text_type)
assert(target in [Pattern.URI_TARGET_ENDPOINT,
Pattern.URI_TARGET_HANDLER,
Pattern.URI_TARGET_EXCEPTION])
components = uri.split('.')
pl = []
nc = {}
i = 0
for component in components:
match = Pattern._URI_NAMED_CONVERTED_COMPONENT.match(component)
if match:
ctype = match.groups()[1]
if ctype not in ['string', 'int', 'suffix']:
raise Exception("invalid URI")
if ctype == 'suffix' and i != len(components) - 1:
raise Exception("invalid URI")
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
if ctype in ['string', 'suffix']:
nc[name] = str
elif ctype == 'int':
nc[name] = int
else:
# should not arrive here
raise Exception("logic error")
pl.append("(?P<{}>[a-z0-9_]+)".format(name))
continue
match = Pattern._URI_NAMED_COMPONENT.match(component)
if match:
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
nc[name] = str
pl.append("(?P<{}>[a-z][a-z0-9_]*)".format(name))
continue
match = Pattern._URI_COMPONENT.match(component)
if match:
pl.append(component)
continue
raise Exception("invalid URI")
if nc:
# URI pattern
self._type = Pattern.URI_TYPE_WILDCARD
p = "^" + "\.".join(pl) + "$"
self._pattern = re.compile(p)
self._names = nc
else:
# exact URI
self._type = Pattern.URI_TYPE_EXACT
self._pattern = None
self._names = None
self._uri = uri
self._target = target
def uri(self):
"""
Returns the original URI (pattern) for this pattern.
:returns str -- The URI (pattern), e.g. `"com.myapp.product.<product:int>.update"`.
"""
return self._uri
def match(self, uri):
"""
Match the given (fully qualified) URI according to this pattern
and return extracted args and kwargs.
:param uri: The URI to match, e.g. `"com.myapp.product.123456.update"`.
:type uri: str
:returns tuple -- A tuple `(args, kwargs)`
"""
args = []
kwargs = {}
if self._type == Pattern.URI_TYPE_EXACT:
return args, kwargs
elif self._type == Pattern.URI_TYPE_WILDCARD:
match = self._pattern.match(uri)
if match:
for key in self._names:
val = match.group(key)
val = self._names[key](val)
kwargs[key] = val
return args, kwargs
else:
raise Exception("no match")
def is_endpoint(self):
"""
Check if this pattern is for a procedure endpoint.
:returns bool -- `True`, iff this pattern is for a procedure endpoint.
"""
return self._target == Pattern.URI_TARGET_ENDPOINT
def is_handler(self):
"""
Check if this pattern is for an event handler.
:returns bool -- `True`, iff this pattern is for an event handler.
"""
return self._target == Pattern.URI_TARGET_HANDLER
def is_exception(self):
"""
Check if this pattern is for an exception.
:returns bool -- `True`, iff this pattern is for an exception.
"""
return self._target == Pattern.URI_TARGET_EXCEPTION
| apache-2.0 |
ybellavance/python-for-android | python3-alpha/python3-src/Doc/includes/dbpickle.py | 100 | 2863 | # Simple example presenting how persistent ID can be used to pickle
# external objects by reference.
import pickle
import sqlite3
from collections import namedtuple
# Simple class representing a record in our database.
MemoRecord = namedtuple("MemoRecord", "key, task")
class DBPickler(pickle.Pickler):
def persistent_id(self, obj):
# Instead of pickling MemoRecord as a regular class instance, we emit a
# persistent ID.
if isinstance(obj, MemoRecord):
# Here, our persistent ID is simply a tuple, containing a tag and a
# key, which refers to a specific record in the database.
return ("MemoRecord", obj.key)
else:
# If obj does not have a persistent ID, return None. This means obj
# needs to be pickled as usual.
return None
class DBUnpickler(pickle.Unpickler):
def __init__(self, file, connection):
super().__init__(file)
self.connection = connection
def persistent_load(self, pid):
# This method is invoked whenever a persistent ID is encountered.
# Here, pid is the tuple returned by DBPickler.
cursor = self.connection.cursor()
type_tag, key_id = pid
if type_tag == "MemoRecord":
# Fetch the referenced record from the database and return it.
cursor.execute("SELECT * FROM memos WHERE key=?", (str(key_id),))
key, task = cursor.fetchone()
return MemoRecord(key, task)
else:
# Always raises an error if you cannot return the correct object.
# Otherwise, the unpickler will think None is the object referenced
# by the persistent ID.
raise pickle.UnpicklingError("unsupported persistent object")
def main():
import io
import pprint
# Initialize and populate our database.
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("CREATE TABLE memos(key INTEGER PRIMARY KEY, task TEXT)")
tasks = (
'give food to fish',
'prepare group meeting',
'fight with a zebra',
)
for task in tasks:
cursor.execute("INSERT INTO memos VALUES(NULL, ?)", (task,))
# Fetch the records to be pickled.
cursor.execute("SELECT * FROM memos")
memos = [MemoRecord(key, task) for key, task in cursor]
# Save the records using our custom DBPickler.
file = io.BytesIO()
DBPickler(file).dump(memos)
print("Pickled records:")
pprint.pprint(memos)
# Update a record, just for good measure.
cursor.execute("UPDATE memos SET task='learn italian' WHERE key=1")
# Load the records from the pickle data stream.
file.seek(0)
memos = DBUnpickler(file, conn).load()
print("Unpickled records:")
pprint.pprint(memos)
if __name__ == '__main__':
main()
| apache-2.0 |
davrv93/creed-en-sus-profetas-backend | django_rv_apps/apps/believe_his_prophets_api/views/spirit_prophecy_chapter_view.py | 1 | 2084 | from rest_framework import serializers
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import list_route
from rest_framework.exceptions import ParseError
from django_rv_apps.apps.believe_his_prophets.models.spirit_prophecy_chapter import SpiritProphecyChapter
from django_filters import rest_framework as django_filters
from rest_framework import filters
class SpiritProphecyChapterSerializer(serializers.ModelSerializer):
class Meta:
model = SpiritProphecyChapter
fields='__all__'
class SpiritProphecyChapterViewSet(viewsets.ModelViewSet):
queryset = SpiritProphecyChapter.objects.all()
serializer_class = SpiritProphecyChapterSerializer
filter_backends = (filters.SearchFilter, django_filters.DjangoFilterBackend,)
filter_fields = ('id','name','spirit_prophecy__name','chapter')
search_fields = ('spirit_prophecy__name','chapter')
def update(self, request, pk=None):
try:
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
except SpiritProphecy.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def perform_destroy(self, instance):
return instance.delete()
@list_route(url_path='destroy', methods=['post'])
def destroy_multiple(self, request):
results = self.get_queryset().filter(id__in=request.data['bulk_id'])
promise_list = []
for i in results:
promise = self.perform_destroy(i)
if promise is not None:
promise_list.append(str(promise))
if len(promise_list) == 0:
return Response(status=status.HTTP_204_NO_CONTENT)
else:
raise ParseError(', '.join(list(promise_list))) | apache-2.0 |
komsas/partner-contact | partner_helper/partner.py | 35 | 2586 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Sébastien BEAU <sebastien.beau@akretion.com>
# Copyright 2014 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
def split_char(char, output_number, size):
words = char.split(' ')
result = []
word = words.pop(0)
for index in range(0, output_number):
result.append(word)
word = ''
while len(words) > 0:
word = words.pop(0)
if len(result[index] + ' %s' % word) > size:
break
else:
result[index] += ' %s' % word
word = ''
return result
class ResPartner(orm.Model):
_inherit = "res.partner"
def _get_split_address(
self, cr, uid, partner, output_number, max_size, context=None):
""" This method allows to get a number of street fields according to
your choice. Default is 2 large fields in Odoo (128 chars).
In some countries you may use 3 or 4 shorter street fields.
example:
res = self.pool['res.partner']._get_split_address(
cr, uid, picking.partner_id, 3, 35, context=context)
address['street'], address['street2'], address['street3'] = res
"""
street = partner.street or ''
street2 = partner.street2 or ''
if len(street) <= max_size and len(street2) <= max_size:
result = ['' for i in range(0, output_number)]
result[0] = street
result[1] = street2
return result
elif street <= max_size:
return [street] + split_char(street2, output_number - 1, max_size)
else:
return split_char(
'%s %s' % (street, street2), output_number, max_size)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.