repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
michaelgallacher/intellij-community | python/helpers/coveragepy/coverage/data.py | 209 | 9188 | """Coverage data for Coverage."""
import os
from coverage.backward import iitems, pickle, sorted # pylint: disable=W0622
from coverage.files import PathAliases
from coverage.misc import file_be_gone
class CoverageData(object):
"""Manages collected coverage data, including file storage.
The data file format is a pickled dict, with these keys:
* collector: a string identifying the collecting software
* lines: a dict mapping filenames to sorted lists of line numbers
executed:
{ 'file1': [17,23,45], 'file2': [1,2,3], ... }
* arcs: a dict mapping filenames to sorted lists of line number pairs:
{ 'file1': [(17,23), (17,25), (25,26)], ... }
"""
def __init__(self, basename=None, collector=None, debug=None):
"""Create a CoverageData.
`basename` is the name of the file to use for storing data.
`collector` is a string describing the coverage measurement software.
`debug` is a `DebugControl` object for writing debug messages.
"""
self.collector = collector or 'unknown'
self.debug = debug
self.use_file = True
# Construct the filename that will be used for data file storage, if we
# ever do any file storage.
self.filename = basename or ".coverage"
self.filename = os.path.abspath(self.filename)
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed:
#
# {
# 'filename1.py': { 12: None, 47: None, ... },
# ...
# }
#
self.lines = {}
# A map from canonical Python source file name to a dictionary with an
# entry for each pair of line numbers forming an arc:
#
# {
# 'filename1.py': { (12,14): None, (47,48): None, ... },
# ...
# }
#
self.arcs = {}
def usefile(self, use_file=True):
"""Set whether or not to use a disk file for data."""
self.use_file = use_file
def read(self):
"""Read coverage data from the coverage data file (if it exists)."""
if self.use_file:
self.lines, self.arcs = self._read_file(self.filename)
else:
self.lines, self.arcs = {}, {}
def write(self, suffix=None):
"""Write the collected coverage data to a file.
`suffix` is a suffix to append to the base file name. This can be used
for multiple or parallel execution, so that many coverage data files
can exist simultaneously. A dot will be used to join the base name and
the suffix.
"""
if self.use_file:
filename = self.filename
if suffix:
filename += "." + suffix
self.write_file(filename)
def erase(self):
"""Erase the data, both in this object, and from its file storage."""
if self.use_file:
if self.filename:
file_be_gone(self.filename)
self.lines = {}
self.arcs = {}
def line_data(self):
"""Return the map from filenames to lists of line numbers executed."""
return dict(
[(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)]
)
def arc_data(self):
"""Return the map from filenames to lists of line number pairs."""
return dict(
[(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)]
)
def write_file(self, filename):
"""Write the coverage data to `filename`."""
# Create the file data.
data = {}
data['lines'] = self.line_data()
arcs = self.arc_data()
if arcs:
data['arcs'] = arcs
if self.collector:
data['collector'] = self.collector
if self.debug and self.debug.should('dataio'):
self.debug.write("Writing data to %r" % (filename,))
# Write the pickle to the file.
fdata = open(filename, 'wb')
try:
pickle.dump(data, fdata, 2)
finally:
fdata.close()
def read_file(self, filename):
"""Read the coverage data from `filename`."""
self.lines, self.arcs = self._read_file(filename)
def raw_data(self, filename):
"""Return the raw pickled data from `filename`."""
if self.debug and self.debug.should('dataio'):
self.debug.write("Reading data from %r" % (filename,))
fdata = open(filename, 'rb')
try:
data = pickle.load(fdata)
finally:
fdata.close()
return data
def _read_file(self, filename):
"""Return the stored coverage data from the given file.
Returns two values, suitable for assigning to `self.lines` and
`self.arcs`.
"""
lines = {}
arcs = {}
try:
data = self.raw_data(filename)
if isinstance(data, dict):
# Unpack the 'lines' item.
lines = dict([
(f, dict.fromkeys(linenos, None))
for f, linenos in iitems(data.get('lines', {}))
])
# Unpack the 'arcs' item.
arcs = dict([
(f, dict.fromkeys(arcpairs, None))
for f, arcpairs in iitems(data.get('arcs', {}))
])
except Exception:
pass
return lines, arcs
def combine_parallel_data(self, aliases=None):
"""Combine a number of data files together.
Treat `self.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
"""
aliases = aliases or PathAliases()
data_dir, local = os.path.split(self.filename)
localdot = local + '.'
for f in os.listdir(data_dir or '.'):
if f.startswith(localdot):
full_path = os.path.join(data_dir, f)
new_lines, new_arcs = self._read_file(full_path)
for filename, file_data in iitems(new_lines):
filename = aliases.map(filename)
self.lines.setdefault(filename, {}).update(file_data)
for filename, file_data in iitems(new_arcs):
filename = aliases.map(filename)
self.arcs.setdefault(filename, {}).update(file_data)
if f != local:
os.remove(full_path)
def add_line_data(self, line_data):
"""Add executed line data.
`line_data` is { filename: { lineno: None, ... }, ...}
"""
for filename, linenos in iitems(line_data):
self.lines.setdefault(filename, {}).update(linenos)
def add_arc_data(self, arc_data):
"""Add measured arc data.
`arc_data` is { filename: { (l1,l2): None, ... }, ...}
"""
for filename, arcs in iitems(arc_data):
self.arcs.setdefault(filename, {}).update(arcs)
def touch_file(self, filename):
"""Ensure that `filename` appears in the data, empty if needed."""
self.lines.setdefault(filename, {})
def measured_files(self):
"""A list of all files that had been measured."""
return list(self.lines.keys())
def executed_lines(self, filename):
"""A map containing all the line numbers executed in `filename`.
If `filename` hasn't been collected at all (because it wasn't executed)
then return an empty map.
"""
return self.lines.get(filename) or {}
def executed_arcs(self, filename):
"""A map containing all the arcs executed in `filename`."""
return self.arcs.get(filename) or {}
def add_to_hash(self, filename, hasher):
"""Contribute `filename`'s data to the Md5Hash `hasher`."""
hasher.update(self.executed_lines(filename))
hasher.update(self.executed_arcs(filename))
def summary(self, fullpath=False):
"""Return a dict summarizing the coverage data.
Keys are based on the filenames, and values are the number of executed
lines. If `fullpath` is true, then the keys are the full pathnames of
the files, otherwise they are the basenames of the files.
"""
summ = {}
if fullpath:
filename_fn = lambda f: f
else:
filename_fn = os.path.basename
for filename, lines in iitems(self.lines):
summ[filename_fn(filename)] = len(lines)
return summ
def has_arcs(self):
"""Does this data have arcs?"""
return bool(self.arcs)
if __name__ == '__main__':
# Ad-hoc: show the raw data in a data file.
import pprint, sys
covdata = CoverageData()
if sys.argv[1:]:
fname = sys.argv[1]
else:
fname = covdata.filename
pprint.pprint(covdata.raw_data(fname))
| apache-2.0 |
MoisesTedeschi/python | Scripts-Python/Modulos-Diversos/python-com-scrapy/Lib/site-packages/pip/_vendor/chardet/gb2312freq.py | 343 | 20715 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312_CHAR_TO_FREQ_ORDER = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512
)
| gpl-3.0 |
mozilla/make.mozilla.org | vendor-local/lib/python/amqplib/client_0_8/connection.py | 22 | 26968 | """
AMQP 0-8 Connections
"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
import logging
from abstract_channel import AbstractChannel
from channel import Channel
from exceptions import *
from method_framing import MethodReader, MethodWriter
from serialization import AMQPReader, AMQPWriter
from transport import create_transport
__all__ = [
'Connection',
]
#
# Client property info that gets sent to the server on connection startup
#
LIBRARY_PROPERTIES = {
'library': 'Python amqplib',
'library_version': '1.0.2',
}
AMQP_LOGGER = logging.getLogger('amqplib')
class Connection(AbstractChannel):
"""
The connection class provides methods for a client to establish a
network connection to a server, and for both peers to operate the
connection thereafter.
GRAMMAR:
connection = open-connection *use-connection close-connection
open-connection = C:protocol-header
S:START C:START-OK
*challenge
S:TUNE C:TUNE-OK
C:OPEN S:OPEN-OK | S:REDIRECT
challenge = S:SECURE C:SECURE-OK
use-connection = *channel
close-connection = C:CLOSE S:CLOSE-OK
/ S:CLOSE C:CLOSE-OK
"""
def __init__(self,
host='localhost',
userid='guest',
password='guest',
login_method='AMQPLAIN',
login_response=None,
virtual_host='/',
locale='en_US',
client_properties=None,
ssl=False,
insist=False,
connect_timeout=None,
**kwargs):
"""
Create a connection to the specified host, which should be
a 'host[:port]', such as 'localhost', or '1.2.3.4:5672'
(defaults to 'localhost', if a port is not specified then
5672 is used)
If login_response is not specified, one is built up for you from
userid and password if they are present.
The 'ssl' parameter may be simply True/False, or for Python >= 2.6
a dictionary of options to pass to ssl.wrap_socket() such as
requiring certain certificates.
"""
if (login_response is None) \
and (userid is not None) \
and (password is not None):
login_response = AMQPWriter()
login_response.write_table({'LOGIN': userid, 'PASSWORD': password})
login_response = login_response.getvalue()[4:] #Skip the length
#at the beginning
d = {}
d.update(LIBRARY_PROPERTIES)
if client_properties:
d.update(client_properties)
self.known_hosts = ''
while True:
self.channels = {}
# The connection object itself is treated as channel 0
super(Connection, self).__init__(self, 0)
self.transport = None
# Properties set in the Tune method
self.channel_max = 65535
self.frame_max = 131072
self.heartbeat = 0
# Properties set in the Start method
self.version_major = 0
self.version_minor = 0
self.server_properties = {}
self.mechanisms = []
self.locales = []
# Let the transport.py module setup the actual
# socket connection to the broker.
#
self.transport = create_transport(host, connect_timeout, ssl)
self.method_reader = MethodReader(self.transport)
self.method_writer = MethodWriter(self.transport, self.frame_max)
self.wait(allowed_methods=[
(10, 10), # start
])
self._x_start_ok(d, login_method, login_response, locale)
self._wait_tune_ok = True
while self._wait_tune_ok:
self.wait(allowed_methods=[
(10, 20), # secure
(10, 30), # tune
])
host = self._x_open(virtual_host, insist=insist)
if host is None:
# we weren't redirected
return
# we were redirected, close the socket, loop and try again
try:
self.close()
except Exception:
pass
def _do_close(self):
self.transport.close()
self.transport = None
temp_list = [x for x in self.channels.values() if x is not self]
for ch in temp_list:
ch._do_close()
self.connection = self.channels = None
def _get_free_channel_id(self):
for i in xrange(1, self.channel_max+1):
if i not in self.channels:
return i
raise AMQPException('No free channel ids, current=%d, channel_max=%d'
% (len(self.channels), self.channel_max))
def _wait_method(self, channel_id, allowed_methods):
"""
Wait for a method from the server destined for
a particular channel.
"""
#
# Check the channel's deferred methods
#
method_queue = self.channels[channel_id].method_queue
for queued_method in method_queue:
method_sig = queued_method[0]
if (allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40)):
method_queue.remove(queued_method)
return queued_method
#
# Nothing queued, need to wait for a method from the peer
#
while True:
channel, method_sig, args, content = \
self.method_reader.read_method()
if (channel == channel_id) \
and ((allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40))):
return method_sig, args, content
#
# Certain methods like basic_return should be dispatched
# immediately rather than being queued, even if they're not
# one of the 'allowed_methods' we're looking for.
#
if (channel != 0) and (method_sig in Channel._IMMEDIATE_METHODS):
self.channels[channel].dispatch_method(method_sig, args, content)
continue
#
# Not the channel and/or method we were looking for. Queue
# this method for later
#
self.channels[channel].method_queue.append((method_sig, args, content))
#
# If we just queued up a method for channel 0 (the Connection
# itself) it's probably a close method in reaction to some
# error, so deal with it right away.
#
if channel == 0:
self.wait()
def channel(self, channel_id=None):
"""
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist.
"""
if channel_id in self.channels:
return self.channels[channel_id]
return Channel(self, channel_id)
#################
def close(self, reply_code=0, reply_text='', method_sig=(0, 0)):
"""
request a connection close
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
if self.transport is None:
# already closed
return
args = AMQPWriter()
args.write_short(reply_code)
args.write_shortstr(reply_text)
args.write_short(method_sig[0]) # class_id
args.write_short(method_sig[1]) # method_id
self._send_method((10, 60), args)
return self.wait(allowed_methods=[
(10, 61), # Connection.close_ok
])
def _close(self, args):
"""
request a connection close
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._x_close_ok()
raise AMQPConnectionException(reply_code, reply_text, (class_id, method_id))
def _x_close_ok(self):
"""
confirm a connection close
This method confirms a Connection.Close method and tells the
recipient that it is safe to release resources for the
connection and close the socket.
RULE:
A peer that detects a socket closure without having
received a Close-Ok handshake method SHOULD log the error.
"""
self._send_method((10, 61))
self._do_close()
def _close_ok(self, args):
"""
confirm a connection close
This method confirms a Connection.Close method and tells the
recipient that it is safe to release resources for the
connection and close the socket.
RULE:
A peer that detects a socket closure without having
received a Close-Ok handshake method SHOULD log the error.
"""
self._do_close()
def _x_open(self, virtual_host, capabilities='', insist=False):
"""
open connection to virtual host
This method opens a connection to a virtual host, which is a
collection of resources, and acts to separate multiple
application domains within a server.
RULE:
The client MUST open the context before doing any work on
the connection.
PARAMETERS:
virtual_host: shortstr
virtual host name
The name of the virtual host to work with.
RULE:
If the server supports multiple virtual hosts, it
MUST enforce a full separation of exchanges,
queues, and all associated entities per virtual
host. An application, connected to a specific
virtual host, MUST NOT be able to access resources
of another virtual host.
RULE:
The server SHOULD verify that the client has
permission to access the specified virtual host.
RULE:
The server MAY configure arbitrary limits per
virtual host, such as the number of each type of
entity that may be used, per connection and/or in
total.
capabilities: shortstr
required capabilities
The client may specify a number of capability names,
delimited by spaces. The server can use this string
to how to process the client's connection request.
insist: boolean
insist on connecting to server
In a configuration with multiple load-sharing servers,
the server may respond to a Connection.Open method
with a Connection.Redirect. The insist option tells
the server that the client is insisting on a
connection to the specified server.
RULE:
When the client uses the insist option, the server
SHOULD accept the client connection unless it is
technically unable to do so.
"""
args = AMQPWriter()
args.write_shortstr(virtual_host)
args.write_shortstr(capabilities)
args.write_bit(insist)
self._send_method((10, 40), args)
return self.wait(allowed_methods=[
(10, 41), # Connection.open_ok
(10, 50), # Connection.redirect
])
def _open_ok(self, args):
"""
signal that the connection is ready
This method signals to the client that the connection is ready
for use.
PARAMETERS:
known_hosts: shortstr
"""
self.known_hosts = args.read_shortstr()
AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts)
return None
def _redirect(self, args):
"""
asks the client to use a different server
This method redirects the client to another server, based on
the requested virtual host and/or capabilities.
RULE:
When getting the Connection.Redirect method, the client
SHOULD reconnect to the host specified, and if that host
is not present, to any of the hosts specified in the
known-hosts list.
PARAMETERS:
host: shortstr
server to connect to
Specifies the server to connect to. This is an IP
address or a DNS name, optionally followed by a colon
and a port number. If no port number is specified, the
client should use the default port number for the
protocol.
known_hosts: shortstr
"""
host = args.read_shortstr()
self.known_hosts = args.read_shortstr()
AMQP_LOGGER.debug('Redirected to [%s], known_hosts [%s]' % (host, self.known_hosts))
return host
def _secure(self, args):
"""
security mechanism challenge
The SASL protocol works by exchanging challenges and responses
until both peers have received sufficient information to
authenticate each other. This method challenges the client to
provide more information.
PARAMETERS:
challenge: longstr
security challenge data
Challenge information, a block of opaque binary data
passed to the security mechanism.
"""
challenge = args.read_longstr()
def _x_secure_ok(self, response):
"""
security mechanism response
This method attempts to authenticate, passing a block of SASL
data for the security mechanism at the server side.
PARAMETERS:
response: longstr
security response data
A block of opaque data passed to the security
mechanism. The contents of this data are defined by
the SASL security mechanism.
"""
args = AMQPWriter()
args.write_longstr(response)
self._send_method((10, 21), args)
def _start(self, args):
"""
start connection negotiation
This method starts the connection negotiation process by
telling the client the protocol version that the server
proposes, along with a list of security mechanisms which the
client can use for authentication.
RULE:
If the client cannot handle the protocol version suggested
by the server it MUST close the socket connection.
RULE:
The server MUST provide a protocol version that is lower
than or equal to that requested by the client in the
protocol header. If the server cannot support the
specified protocol it MUST NOT send this method, but MUST
close the socket connection.
PARAMETERS:
version_major: octet
protocol major version
The protocol major version that the server agrees to
use, which cannot be higher than the client's major
version.
version_minor: octet
protocol major version
The protocol minor version that the server agrees to
use, which cannot be higher than the client's minor
version.
server_properties: table
server properties
mechanisms: longstr
available security mechanisms
A list of the security mechanisms that the server
supports, delimited by spaces. Currently ASL supports
these mechanisms: PLAIN.
locales: longstr
available message locales
A list of the message locales that the server
supports, delimited by spaces. The locale defines the
language in which the server will send reply texts.
RULE:
All servers MUST support at least the en_US
locale.
"""
self.version_major = args.read_octet()
self.version_minor = args.read_octet()
self.server_properties = args.read_table()
self.mechanisms = args.read_longstr().split(' ')
self.locales = args.read_longstr().split(' ')
AMQP_LOGGER.debug('Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s'
% (self.version_major, self.version_minor,
str(self.server_properties), self.mechanisms, self.locales))
def _x_start_ok(self, client_properties, mechanism, response, locale):
"""
select security mechanism and locale
This method selects a SASL security mechanism. ASL uses SASL
(RFC2222) to negotiate authentication and encryption.
PARAMETERS:
client_properties: table
client properties
mechanism: shortstr
selected security mechanism
A single security mechanisms selected by the client,
which must be one of those specified by the server.
RULE:
The client SHOULD authenticate using the highest-
level security profile it can handle from the list
provided by the server.
RULE:
The mechanism field MUST contain one of the
security mechanisms proposed by the server in the
Start method. If it doesn't, the server MUST close
the socket.
response: longstr
security response data
A block of opaque data passed to the security
mechanism. The contents of this data are defined by
the SASL security mechanism. For the PLAIN security
mechanism this is defined as a field table holding two
fields, LOGIN and PASSWORD.
locale: shortstr
selected message locale
A single message local selected by the client, which
must be one of those specified by the server.
"""
args = AMQPWriter()
args.write_table(client_properties)
args.write_shortstr(mechanism)
args.write_longstr(response)
args.write_shortstr(locale)
self._send_method((10, 11), args)
def _tune(self, args):
"""
propose connection tuning parameters
This method proposes a set of connection configuration values
to the client. The client can accept and/or adjust these.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server
allows per connection. Zero means that the server does
not impose a fixed limit, but the number of allowed
channels may be limited by available server resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for
the connection. The client can negotiate a lower
value. Zero means that the server does not impose any
specific limit but may reject very large frames if it
cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both
peers MUST accept frames of up to 4096 octets
large. The minimum non-zero value for the frame-
max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat
that the server wants. Zero means the server does not
want a heartbeat.
"""
self.channel_max = args.read_short() or self.channel_max
self.frame_max = args.read_long() or self.frame_max
self.method_writer.frame_max = self.frame_max
self.heartbeat = args.read_short()
self._x_tune_ok(self.channel_max, self.frame_max, 0)
def _x_tune_ok(self, channel_max, frame_max, heartbeat):
"""
negotiate connection tuning parameters
This method sends the client's connection tuning parameters to
the server. Certain fields are negotiated, others provide
capability information.
PARAMETERS:
channel_max: short
negotiated maximum channels
The maximum total number of channels that the client
will use per connection. May not be higher than the
value specified by the server.
RULE:
The server MAY ignore the channel-max value or MAY
use it for tuning its resource allocation.
frame_max: long
negotiated maximum frame size
The largest frame size that the client and server will
use for the connection. Zero means that the client
does not impose any specific limit but may reject very
large frames if it cannot allocate resources for them.
Note that the frame-max limit applies principally to
content frames, where large contents can be broken
into frames of arbitrary size.
RULE:
Until the frame-max has been negotiated, both
peers must accept frames of up to 4096 octets
large. The minimum non-zero value for the frame-
max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat
that the client wants. Zero means the client does not
want a heartbeat.
"""
args = AMQPWriter()
args.write_short(channel_max)
args.write_long(frame_max)
args.write_short(heartbeat)
self._send_method((10, 31), args)
self._wait_tune_ok = False
_METHOD_MAP = {
(10, 10): _start,
(10, 20): _secure,
(10, 30): _tune,
(10, 41): _open_ok,
(10, 50): _redirect,
(10, 60): _close,
(10, 61): _close_ok,
}
_IMMEDIATE_METHODS = []
| bsd-3-clause |
EricCline/CEM_inc | env/lib/python2.7/site-packages/IPython/qt/inprocess.py | 10 | 1313 | """ Defines an in-process KernelManager with signals and slots.
"""
# Local imports.
from IPython.kernel.inprocess import (
InProcessShellChannel, InProcessIOPubChannel, InProcessStdInChannel,
InProcessHBChannel, InProcessKernelClient, InProcessKernelManager,
)
from IPython.utils.traitlets import Type
from .kernel_mixins import (
QtShellChannelMixin, QtIOPubChannelMixin,
QtStdInChannelMixin, QtHBChannelMixin, QtKernelClientMixin,
QtKernelManagerMixin,
)
class QtInProcessShellChannel(QtShellChannelMixin, InProcessShellChannel):
pass
class QtInProcessIOPubChannel(QtIOPubChannelMixin, InProcessIOPubChannel):
pass
class QtInProcessStdInChannel(QtStdInChannelMixin, InProcessStdInChannel):
pass
class QtInProcessHBChannel(QtHBChannelMixin, InProcessHBChannel):
pass
class QtInProcessKernelClient(QtKernelClientMixin, InProcessKernelClient):
""" An in-process KernelManager with signals and slots.
"""
iopub_channel_class = Type(QtInProcessIOPubChannel)
shell_channel_class = Type(QtInProcessShellChannel)
stdin_channel_class = Type(QtInProcessStdInChannel)
hb_channel_class = Type(QtInProcessHBChannel)
class QtInProcessKernelManager(QtKernelManagerMixin, InProcessKernelManager):
client_class = __module__ + '.QtInProcessKernelClient'
| mit |
pope/SublimeYetAnotherCodeSearch | tests/test_csearch.py | 1 | 2481 | import sublime
import os.path
import shutil
import textwrap
import time
import uuid
from YetAnotherCodeSearch.tests import CommandTestCase
_NEEDLE_IN_HAYSTACK = 'cc5b252b-e7fb-5145-bf8a-ed272e3aa7bf'
class CsearchCommandTest(CommandTestCase):
def setUp(self):
super(CsearchCommandTest, self).setUp()
if os.path.isfile(self.index):
return
self.window.run_command('cindex', {'index_project': True})
self._wait_for_status(self.view)
assert os.path.isfile(self.index)
def test_csearch_exists(self):
self.assertIsNotNone(shutil.which('csearch'))
def test_csearch(self):
results_view = self._search(_NEEDLE_IN_HAYSTACK)
expected = textwrap.dedent("""\
Searching for "{0}"
{1}/test_csearch.py:
12: _NEEDLE_IN_HAYSTACK = '{0}'
1 matches across 1 files
""").format(_NEEDLE_IN_HAYSTACK, self.project_path)
actual = results_view.substr(sublime.Region(0, results_view.size()))
self.assertEquals(expected, actual)
def test_csearch_no_matches(self):
query = str(uuid.uuid4())
results_view = self._search(query)
expected = textwrap.dedent("""\
Searching for "{0}"
No matches found
""").format(query, self.project_path)
actual = results_view.substr(sublime.Region(0, results_view.size()))
self.assertEquals(expected, actual)
def test_csearch_go_to_file(self):
results_view = self._search(_NEEDLE_IN_HAYSTACK)
pt = results_view.text_point(3, 10) # Line 4, 10 characters in
results_view.sel().clear()
results_view.sel().add(sublime.Region(pt))
self.window.run_command('code_search_results_go_to_file')
self.assertEquals('{0}/test_csearch.py'.format(self.project_path),
self.window.active_view().file_name())
def _wait_for_status(self, view):
max_iters = 10
while max_iters > 0 and view.get_status('YetAnotherCodeSearch') != '':
time.sleep(0.1)
max_iters -= 1
assert '' == view.get_status('YetAnotherCodeSearch')
def _search(self, query):
self.window.run_command('csearch', {'query': query})
results_view = next((view for view in self.window.views()
if view.name() == 'Code Search Results'))
self._wait_for_status(results_view)
return results_view
| mit |
dohoangkhiem/ansible | lib/ansible/plugins/lookup/url.py | 95 | 2020 | # (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import urllib2
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.utils.unicode import to_unicode
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
validate_certs = kwargs.get('validate_certs', True)
ret = []
for term in terms:
self._display.vvvv("url lookup connecting to %s" % term)
try:
response = open_url(term, validate_certs=validate_certs)
except urllib2.URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e)))
except urllib2.HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, str(e)))
for line in response.read().splitlines():
ret.append(to_unicode(line))
return ret
| gpl-3.0 |
awong1900/platformio | platformio/builder/scripts/frameworks/mbed.py | 1 | 7172 | # Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
"""
mbed
The mbed framework The mbed SDK has been designed to provide enough
hardware abstraction to be intuitive and concise, yet powerful enough to
build complex projects. It is built on the low-level ARM CMSIS APIs,
allowing you to code down to the metal if needed. In addition to RTOS,
USB and Networking libraries, a cookbook of hundreds of reusable
peripheral and module libraries have been built on top of the SDK by
the mbed Developer Community.
http://mbed.org/
"""
import re
import xml.etree.ElementTree as ElementTree
from binascii import crc32
from os import walk
from os.path import basename, isfile, join, normpath
from SCons.Script import DefaultEnvironment, Exit
env = DefaultEnvironment()
BOARD_OPTS = env.get("BOARD_OPTIONS", {}).get("build", {})
env.Replace(
PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-mbed")
)
MBED_VARIANTS = {
"stm32f3discovery": "DISCO_F303VC",
"stm32f4discovery": "DISCO_F407VG",
"stm32f429discovery": "DISCO_F429ZI",
"blueboard_lpc11u24": "LPC11U24",
"dipcortexm0": "LPC11U24",
"seeeduinoArchPro": "ARCH_PRO",
"ubloxc027": "UBLOX_C027",
"lpc1114fn28": "LPC1114",
"lpc11u35": "LPC11U35_401",
"mbuino": "LPC11U24",
"nrf51_mkit": "NRF51822",
"seeedTinyBLE": "SEEED_TINY_BLE",
"redBearLab": "RBLAB_NRF51822",
"nrf51-dt": "NRF51_DK",
"redBearLabBLENano": "RBLAB_NRF51822",
"wallBotBLE": "NRF51822",
"frdm_kl25z": "KL25Z",
"frdm_kl46z": "KL46Z",
"frdm_k64f": "K64F",
"frdm_kl05z": "KL05Z",
"frdm_k20d50m": "K20D50M",
"frdm_k22f": "K22F"
}
MBED_LIBS_MAP = {
"dsp": {"ar": ["dsp", "cmsis_dsp"]},
"eth": {"ar": ["eth"], "deps": ["rtos"]},
"fat": {"ar": ["fat"]},
"rtos": {"ar": ["rtos", "rtx"]},
"usb": {"ar": ["USBDevice"]},
"usb_host": {"ar": ["USBHost"]}
}
def get_mbedlib_includes():
result = []
for lib in MBED_LIBS_MAP.keys():
includes = []
lib_dir = join(env.subst("$PLATFORMFW_DIR"), "libs", lib)
for _, _, files in walk(lib_dir):
for libfile in files:
if libfile.endswith(".h"):
includes.append(libfile)
result.append((lib, set(includes)))
return result
def get_used_mbedlibs():
re_includes = re.compile(r"^(#include\s+(?:\<|\")([^\r\n\"]+))",
re.M | re.I)
srcincs = []
for root, _, files in walk(env.get("PROJECTSRC_DIR")):
for pfile in files:
if not any([pfile.endswith(ext) for ext in (".h", ".c", ".cpp")]):
continue
with open(join(root, pfile)) as fp:
srcincs.extend([i[1] for i in re_includes.findall(fp.read())])
srcincs = set(srcincs)
result = {}
for libname, libincs in get_mbedlib_includes():
if libincs & srcincs and libname not in result:
result[libname] = MBED_LIBS_MAP[libname]
return result
def add_mbedlib(libname, libar):
if libar in env.get("LIBS"):
return
lib_dir = join(env.subst("$PLATFORMFW_DIR"), "libs", libname)
if not isfile(join(lib_dir, "TARGET_%s" % variant,
"TOOLCHAIN_GCC_ARM", "lib%s.a" % libar)):
Exit("Error: %s board doesn't support %s library!" %
(env.get("BOARD"), libname))
env.Append(
LIBPATH=[
join(env.subst("$PLATFORMFW_DIR"), "libs", libname,
"TARGET_%s" % variant, "TOOLCHAIN_GCC_ARM")
],
LIBS=[libar]
)
sysincdirs = (
"eth",
"include",
"ipv4",
"lwip-eth",
"lwip-sys"
)
for root, _, files in walk(lib_dir):
if (not any(f.endswith(".h") for f in files) and
basename(root) not in sysincdirs):
continue
var_dir = join("$BUILD_DIR", "FrameworkMbed%sInc%d" %
(libname.upper(), crc32(root)))
if var_dir in env.get("CPPPATH"):
continue
env.VariantDir(var_dir, root)
env.Append(CPPPATH=[var_dir])
def parse_eix_file(filename):
result = {}
paths = (
("CFLAGS", "./Target/Source/CC/Switch"),
("CXXFLAGS", "./Target/Source/CPPC/Switch"),
("CPPDEFINES", "./Target/Source/Symbols/Symbol"),
("FILES", "./Target/Files/File"),
("LINKFLAGS", "./Target/Source/LD/Switch"),
("OBJFILES", "./Target/Source/Addobjects/Addobject"),
("LIBPATH", "./Target/Linker/Librarypaths/Librarypath"),
("STDLIBS", "./Target/Source/Syslibs/Library"),
("LDSCRIPT_PATH", "./Target/Source/Scriptfile"),
("CPPPATH", "./Target/Compiler/Includepaths/Includepath")
)
tree = ElementTree.parse(filename)
for (key, path) in paths:
if key not in result:
result[key] = []
for node in tree.findall(path):
_nkeys = node.keys()
result[key].append(
node.get(_nkeys[0]) if len(_nkeys) == 1 else node.attrib)
return result
def get_build_flags(data):
flags = {}
cflags = set(data.get("CFLAGS", []))
cxxflags = set(data.get("CXXFLAGS", []))
cppflags = set(cflags & cxxflags)
flags['CPPFLAGS'] = list(cppflags)
flags['CXXFLAGS'] = list(cxxflags - cppflags)
flags['CFLAGS'] = list(cflags - cppflags)
return flags
board_type = env.subst("$BOARD")
variant = MBED_VARIANTS[
board_type] if board_type in MBED_VARIANTS else board_type.upper()
eixdata = parse_eix_file(
join(env.subst("$PLATFORMFW_DIR"), "variant", variant, "%s.eix" % variant))
build_flags = get_build_flags(eixdata)
variant_dir = join("$PLATFORMFW_DIR", "variant", variant)
env.Replace(
CPPFLAGS=build_flags.get("CPPFLAGS", []),
CFLAGS=build_flags.get("CFLAGS", []),
CXXFLAGS=build_flags.get("CXXFLAGS", []),
LINKFLAGS=eixdata.get("LINKFLAGS", []),
CPPDEFINES=[define for define in eixdata.get("CPPDEFINES", [])],
LDSCRIPT_PATH=normpath(
join(variant_dir, eixdata.get("LDSCRIPT_PATH")[0]))
)
# Hook for K64F and K22F
if board_type in ("frdm_k22f", "frdm_k64f"):
env.Append(
LINKFLAGS=["-Wl,--start-group"]
)
for lib_path in eixdata.get("CPPPATH"):
_vdir = join("$BUILD_DIR", "FrameworkMbedInc%d" % crc32(lib_path))
env.VariantDir(_vdir, join(variant_dir, lib_path))
env.Append(CPPPATH=[_vdir])
env.Append(
LIBPATH=[join(variant_dir, lib_path)
for lib_path in eixdata.get("LIBPATH", [])
if lib_path.startswith("mbed")]
)
#
# Target: Build mbed Library
#
libs = [l for l in eixdata.get("STDLIBS", []) if l not in env.get("LIBS")]
libs.extend(["mbed", "c", "gcc"])
libs.append(env.Library(
join("$BUILD_DIR", "FrameworkMbed"),
[join(variant_dir, f)
for f in eixdata.get("OBJFILES", [])]
))
env.Append(LIBS=libs)
for _libname, _libdata in get_used_mbedlibs().iteritems():
for _libar in _libdata['ar']:
add_mbedlib(_libname, _libar)
if "deps" not in _libdata:
continue
for libdep in _libdata['deps']:
for _libar in MBED_LIBS_MAP[libdep]['ar']:
add_mbedlib(libdep, _libar)
| mit |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/OpenGL/GL/SUN/global_alpha.py | 4 | 1736 | '''OpenGL extension SUN.global_alpha
This module customises the behaviour of the
OpenGL.raw.GL.SUN.global_alpha to provide a more
Python-friendly API
Overview (from the spec)
Transparency is done in OpenGL using alpha blending. An alpha value
of 0.0 is used for fully transparent objects, while an alpha value
of 1.0 is used for fully opaque objects. A value of 0.25 is 75%
transparent, and so on.
OpenGL defines alpha as a component of the vertex color state.
Whenever a color is set, the alpha component is set along with the
red, green, and blue components. This means that transparency
can't be changed for primitives with per-vertex colors without
modifying the color of each vertex, replacing the old alpha
component with the new alpha component. This can be very expensive
for objects that are drawn using vertex arrays; it all but
precludes the use of display lists.
This extension defines a new global alpha attribute that can be
used to specify an alpha factor that is independent from the alpha
component of the color value. The global alpha factor is
multiplied by the fragment's alpha value after primitive
rasterization and prior to texture mapping, replacing the
fragment's alpha value. The global alpha extension is only
specified in RGBA mode and must be applied prior to any texture
mapping operation. It is enabled by a new GLOBAL_ALPHA flag.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SUN/global_alpha.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.SUN.global_alpha import *
### END AUTOGENERATED SECTION | mit |
cosmicAsymmetry/zulip | zerver/tests/test_signup.py | 1 | 51499 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from mock import patch
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation
from zilencer.models import Deployment
from zerver.forms import HomepageForm
from zerver.views import do_change_password
from zerver.views.invite import get_invitee_emails_set
from zerver.models import (
get_realm_by_string_id, get_prereg_user_by_email, get_user_profile_by_email,
PreregistrationUser, Realm, RealmAlias, Recipient,
Referral, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription, ScheduledJob
)
from zerver.management.commands.deliver_email import send_email_job
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import do_deactivate_realm, do_set_realm_default_language, \
add_new_user_history
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import (
enqueue_welcome_emails, one_click_unsubscribe_link, send_local_email_template_with_delay)
from zerver.lib.test_helpers import find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
from zerver.context_processors import common_context
import re
import ujson
from six.moves import urllib
from six.moves import range
import six
from typing import Any, Text
import os
class PublicURLTest(ZulipTestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
# type: (str, List[str], int) -> None
for url in urls:
# e.g. self.client_post(url) if method is "post"
response = getattr(self, method)(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
# type: () -> None
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
get_urls = {200: ["/accounts/home/", "/accounts/login/"
"/en/accounts/home/", "/ru/accounts/home/",
"/en/accounts/login/", "/ru/accounts/login/",
"/help/"],
302: ["/", "/en/", "/ru/"],
401: ["/json/streams/Denmark/members",
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
"/json/messages",
"/api/v1/streams",
],
404: ["/help/nonexistent"],
}
# Add all files in 'templates/zerver/help' directory (except for 'main.html' and
# 'index.md') to `get_urls['200']` list.
for doc in os.listdir('./templates/zerver/help'):
if doc not in {'main.html', 'index.md', 'include'}:
get_urls[200].append('/help/' + os.path.splitext(doc)[0]) # Strip the extension.
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/messages",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/exists",
"/json/subscriptions/property",
"/json/fetch_api_key",
"/json/users/me/pointer",
"/json/users/me/subscriptions",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
put_urls = {401: ["/json/users/me/pointer"],
}
for status_code, url_set in six.iteritems(get_urls):
self.fetch("client_get", url_set, status_code)
for status_code, url_set in six.iteritems(post_urls):
self.fetch("client_post", url_set, status_code)
for status_code, url_set in six.iteritems(put_urls):
self.fetch("client_put", url_set, status_code)
def test_get_gcid_when_not_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(get_realm_by_string_id("zulip"), stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test", "test")
user_profile = get_user_profile_by_email("test@zulip.com")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("hamlet@zulip.com", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = 'hamlet@zulip.com'
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password.', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("hamlet@zulip.com", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm_by_string_id("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
with queries_captured() as queries:
self.register("test", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm_by_string_id("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm_by_string_id("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("hamlet@zulip.com")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams):
# type: (str, List[Text]) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams})
def check_sent_emails(self, correct_recipients):
# type: (List[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client_post('/json/bulk_invite_users', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("hamlet@zulip.com", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "foo@zulip.com"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm_by_string_id("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm_by_string_id("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_refer_friend(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
invitee = "alice-test@zulip.com"
result = self.client_post('/json/refer_friend', dict(email=invitee))
self.assert_json_success(result)
# verify this works
Referral.objects.get(user_profile=user, email=invitee)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
current_user_email = "hamlet@zulip.com"
self.login(current_user_email)
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
data = {"email": invitee, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=0),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
email_jobs_to_deliver = ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=datetime.datetime.utcnow())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
self.assertTrue(send_email_job(job))
self.assertEqual(len(outbox), email_count + 1)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
username = "user1"
password = "test"
string_id = "zuliptest"
domain = 'test.com'
email = "user1@test.com"
realm = get_realm_by_string_id('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username, password, domain=domain,
realm_subdomain = string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm_by_string_id(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.COMMUNITY)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_with_subdomain(self):
# type: () -> None
username = "user1"
password = "test"
string_id = "zuliptest"
domain = "test.com"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm_by_string_id('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username, password, domain=domain,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm_by_string_id(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
username = "user1"
password = "test"
domain = "test.com"
email = "user1@test.com"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'mit': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(username, password, domain = domain,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(username, password, domain = domain,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
username = "newguy"
email = "newguy@zulip.com"
password = "newpassword"
realm = get_realm_by_string_id('zulip')
domain = realm.domain
do_set_realm_default_language(realm, "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s@%s" % (username, domain)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(username, password, domain)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_unique_completely_open_domain(self):
# type: () -> None
username = "user1"
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm_by_string_id('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
realm = get_realm_by_string_id('mit')
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain='acme.com',
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_completely_open_domain_success(self):
# type: () -> None
username = "user1"
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm_by_string_id('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain='acme.com',
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=realm)
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@zulip.com'}, realm=realm)
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=None)
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
username = "newuser"
password = "testing"
domain = "zulip.com"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"newuser@zulip.com"],
result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
username = "sipbtest"
password = "test"
domain = "mit.edu"
email = "sipbtest@mit.edu"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = 'hamlet@zulip.com'
user_2 = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
| apache-2.0 |
rlpy/rlpy | rlpy/Representations/LocalBases.py | 1 | 7491 | """
Representations which use local bases function (e.g. kernels) distributed
in the statespace according to some scheme (e.g. grid, random, on previous
samples)
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import super
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.utils import old_div
from .Representation import Representation
import numpy as np
from rlpy.Tools.GeneralTools import addNewElementForAllActions
import matplotlib.pyplot as plt
try:
from .kernels import batch
except ImportError:
from .slow_kernels import batch
print("C-Extensions for kernels not available, expect slow runtime")
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
class LocalBases(Representation):
"""
abstract base class for representations that use local basis functions
"""
#: centers of bases
centers = None
#: widths of bases
widths = None
def __init__(self, domain, kernel, normalization=False, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param normalization: (Boolean) If true, normalize feature vector so
that sum( phi(s) ) = 1.
Associates a kernel function with each
"""
self.kernel = batch[kernel.__name__]
self.normalization = normalization
self.centers = np.zeros((0, domain.statespace_limits.shape[0]))
self.widths = np.zeros((0, domain.statespace_limits.shape[0]))
super(LocalBases, self).__init__(domain, seed=seed)
def phi_nonTerminal(self, s):
v = self.kernel(s, self.centers, self.widths)
if self.normalization and not v.sum() == 0.:
# normalize such that each vector has a l1 norm of 1
v /= v.sum()
return v
def plot_2d_feature_centers(self, d1=None, d2=None):
"""
:param d1: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
:param d2: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
Phe centers of all features in dimension d1 and d2.
If no dimensions are specified, the first two continuous dimensions
are shown.
"""
if d1 is None and d2 is None:
# just take the first two dimensions
d1, d2 = self.domain.continuous_dims[:2]
plt.figure("Feature Dimensions {} and {}".format(d1, d2))
for i in range(self.centers.shape[0]):
plt.plot([self.centers[i, d1]],
[self.centers[i, d2]], "r", marker="x")
plt.draw()
class NonparametricLocalBases(LocalBases):
def __init__(self, domain, kernel,
max_similarity=0.9, resolution=5, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param max_similarity: threshold to allow feature to be added to
representation. Larger max_similarity makes it \"easier\" to add
more features by permitting larger values of phi(s) before
discarding. (An existing feature function in phi() with large value
at phi(s) implies that it is very representative of the true
function at *s*. i.e., the value of a feature in phi(s) is
inversely related to the \"similarity\" of a potential new feature.
:param resolution: to be used by the ``kernel()`` function, see parent.
Determines *width* of basis functions, eg sigma in Gaussian basis.
"""
self.max_similarity = max_similarity
self.common_width = old_div((domain.statespace_limits[:, 1]
- domain.statespace_limits[:, 0]), resolution)
self.features_num = 0
super(
NonparametricLocalBases,
self).__init__(
domain,
kernel,
**kwargs)
def pre_discover(self, s, terminal, a, sn, terminaln):
norm = self.normalization
expanded = 0
self.normalization = False
if not terminal:
phi_s = self.phi_nonTerminal(s)
if np.all(phi_s < self.max_similarity):
self._add_feature(s)
expanded += 1
if not terminaln:
phi_s = self.phi_nonTerminal(sn)
if np.all(phi_s < self.max_similarity):
self._add_feature(sn)
expanded += 1
self.normalization = norm
return expanded
def _add_feature(self, center):
self.features_num += 1
self.centers = np.vstack((self.centers, center))
self.widths = np.vstack((self.widths, self.common_width))
# TODO if normalized, use Q estimate for center to fill weight_vec
new = np.zeros((self.domain.actions_num, 1))
self.weight_vec = addNewElementForAllActions(
self.weight_vec,
self.domain.actions_num,
new)
class RandomLocalBases(LocalBases):
def __init__(self, domain, kernel, num=100, resolution_min=5,
resolution_max=None, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param num: Fixed number of feature (kernel) functions to use in
EACH dimension. (for a total of features_num=numDims * num)
:param resolution_min: resolution selected uniform random, lower bound.
:param resolution_max: resolution selected uniform random, upper bound.
:param seed: the random seed to use when scattering basis functions.
Randomly scatter ``num`` feature functions throughout the domain, with
sigma / noise parameter selected uniform random between
``resolution_min`` and ``resolution_max``. NOTE these are
sensitive to the choice of coordinate (scale with coordinate units).
"""
self.features_num = num
self.dim_widths = (domain.statespace_limits[:, 1]
- domain.statespace_limits[:, 0])
self.resolution_max = resolution_max
self.resolution_min = resolution_min
super(
RandomLocalBases,
self).__init__(
domain,
kernel,
seed=seed,
**kwargs)
self.centers = np.zeros((num, len(self.dim_widths)))
self.widths = np.zeros((num, len(self.dim_widths)))
self.init_randomization()
def init_randomization(self):
for i in range(self.features_num):
for d in range(len(self.dim_widths)):
self.centers[i, d] = self.random_state.uniform(
self.domain.statespace_limits[d, 0],
self.domain.statespace_limits[d, 1])
self.widths[i, d] = self.random_state.uniform(
old_div(self.dim_widths[d], self.resolution_max),
old_div(self.dim_widths[d], self.resolution_min))
| bsd-3-clause |
robbiet480/home-assistant | tests/components/speedtestdotnet/test_init.py | 6 | 2222 | """Tests for SpeedTest integration."""
import speedtest
from homeassistant import config_entries
from homeassistant.components import speedtestdotnet
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_setup_with_config(hass):
"""Test that we import the config and setup the integration."""
config = {
speedtestdotnet.DOMAIN: {
speedtestdotnet.CONF_SERVER_ID: "1",
speedtestdotnet.CONF_MANUAL: True,
speedtestdotnet.CONF_SCAN_INTERVAL: "00:01:00",
}
}
with patch("speedtest.Speedtest"):
assert await async_setup_component(hass, speedtestdotnet.DOMAIN, config)
async def test_successful_config_entry(hass):
"""Test that SpeedTestDotNet is configured successfully."""
entry = MockConfigEntry(domain=speedtestdotnet.DOMAIN, data={},)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest"), patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert forward_entry_setup.mock_calls[0][1] == (entry, "sensor",)
async def test_setup_failed(hass):
"""Test SpeedTestDotNet failed due to an error."""
entry = MockConfigEntry(domain=speedtestdotnet.DOMAIN, data={},)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest", side_effect=speedtest.ConfigRetrievalError):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == config_entries.ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass):
"""Test removing SpeedTestDotNet."""
entry = MockConfigEntry(domain=speedtestdotnet.DOMAIN, data={},)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest"):
await hass.config_entries.async_setup(entry.entry_id)
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ENTRY_STATE_NOT_LOADED
assert speedtestdotnet.DOMAIN not in hass.data
| apache-2.0 |
Plain-Andy-legacy/android_external_chromium_org | native_client_sdk/src/build_tools/manifest_util.py | 161 | 19959 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import json
import string
import sys
import urllib2
MANIFEST_VERSION = 2
# Some commonly-used key names.
ARCHIVES_KEY = 'archives'
BUNDLES_KEY = 'bundles'
NAME_KEY = 'name'
REVISION_KEY = 'revision'
VERSION_KEY = 'version'
# Valid values for the archive.host_os field
HOST_OS_LITERALS = frozenset(['mac', 'win', 'linux', 'all'])
# Valid keys for various sdk objects, used for validation.
VALID_ARCHIVE_KEYS = frozenset(['host_os', 'size', 'checksum', 'url'])
# Valid values for bundle.stability field
STABILITY_LITERALS = [
'obsolete', 'post_stable', 'stable', 'beta', 'dev', 'canary']
# Valid values for bundle-recommended field.
YES_NO_LITERALS = ['yes', 'no']
VALID_BUNDLES_KEYS = frozenset([
ARCHIVES_KEY, NAME_KEY, VERSION_KEY, REVISION_KEY,
'description', 'desc_url', 'stability', 'recommended', 'repath',
'sdk_revision'
])
VALID_MANIFEST_KEYS = frozenset(['manifest_version', BUNDLES_KEY])
def GetHostOS():
'''Returns the host_os value that corresponds to the current host OS'''
return {
'linux2': 'linux',
'darwin': 'mac',
'cygwin': 'win',
'win32': 'win'
}[sys.platform]
def DictToJSON(pydict):
"""Convert a dict to a JSON-formatted string."""
pretty_string = json.dumps(pydict, sort_keys=True, indent=2)
# json.dumps sometimes returns trailing whitespace and does not put
# a newline at the end. This code fixes these problems.
pretty_lines = pretty_string.split('\n')
return '\n'.join([line.rstrip() for line in pretty_lines]) + '\n'
def DownloadAndComputeHash(from_stream, to_stream=None, progress_func=None):
'''Download the archive data from from-stream and generate sha1 and
size info.
Args:
from_stream: An input stream that supports read.
to_stream: [optional] the data is written to to_stream if it is
provided.
progress_func: [optional] A function used to report download progress. If
provided, progress_func is called with progress=0 at the
beginning of the download, periodically with progress=1
during the download, and progress=100 at the end.
Return
A tuple (sha1, size) where sha1 is a sha1-hash for the archive data and
size is the size of the archive data in bytes.'''
# Use a no-op progress function if none is specified.
def progress_no_op(progress):
pass
if not progress_func:
progress_func = progress_no_op
sha1_hash = hashlib.sha1()
size = 0
progress_func(progress=0)
while(1):
data = from_stream.read(32768)
if not data:
break
sha1_hash.update(data)
size += len(data)
if to_stream:
to_stream.write(data)
progress_func(size)
progress_func(progress=100)
return sha1_hash.hexdigest(), size
class Error(Exception):
"""Generic error/exception for manifest_util module"""
pass
class Archive(dict):
"""A placeholder for sdk archive information. We derive Archive from
dict so that it is easily serializable. """
def __init__(self, host_os_name):
""" Create a new archive for the given host-os name. """
super(Archive, self).__init__()
self['host_os'] = host_os_name
def CopyFrom(self, src):
"""Update the content of the archive by copying values from the given
dictionary.
Args:
src: The dictionary whose values must be copied to the archive."""
for key, value in src.items():
self[key] = value
def Validate(self, error_on_unknown_keys=False):
"""Validate the content of the archive object. Raise an Error if
an invalid or missing field is found.
Args:
error_on_unknown_keys: If True, raise an Error when unknown keys are
found in the archive.
"""
host_os = self.get('host_os', None)
if host_os and host_os not in HOST_OS_LITERALS:
raise Error('Invalid host-os name in archive')
# Ensure host_os has a valid string. We'll use it for pretty printing.
if not host_os:
host_os = 'all (default)'
if not self.get('url', None):
raise Error('Archive "%s" has no URL' % host_os)
if not self.get('size', None):
raise Error('Archive "%s" has no size' % host_os)
checksum = self.get('checksum', None)
if not checksum:
raise Error('Archive "%s" has no checksum' % host_os)
elif not isinstance(checksum, dict):
raise Error('Archive "%s" has a checksum, but it is not a dict' % host_os)
elif not len(checksum):
raise Error('Archive "%s" has an empty checksum dict' % host_os)
# Verify that all key names are valid.
if error_on_unknown_keys:
for key in self:
if key not in VALID_ARCHIVE_KEYS:
raise Error('Archive "%s" has invalid attribute "%s"' % (
host_os, key))
def UpdateVitals(self, revision):
"""Update the size and checksum information for this archive
based on the content currently at the URL.
This allows the template manifest to be maintained without
the need to size and checksums to be present.
"""
template = string.Template(self['url'])
self['url'] = template.substitute({'revision': revision})
from_stream = urllib2.urlopen(self['url'])
sha1_hash, size = DownloadAndComputeHash(from_stream)
self['size'] = size
self['checksum'] = { 'sha1': sha1_hash }
def __getattr__(self, name):
"""Retrieve values from this dict using attributes.
This allows for foo.bar instead of foo['bar'].
Args:
name: the name of the key, 'bar' in the example above.
Returns:
The value associated with that key."""
if name not in self:
raise AttributeError(name)
# special case, self.checksum returns the sha1, not the checksum dict.
if name == 'checksum':
return self.GetChecksum()
return self.__getitem__(name)
def __setattr__(self, name, value):
"""Set values in this dict using attributes.
This allows for foo.bar instead of foo['bar'].
Args:
name: The name of the key, 'bar' in the example above.
value: The value to associate with that key."""
# special case, self.checksum returns the sha1, not the checksum dict.
if name == 'checksum':
self.setdefault('checksum', {})['sha1'] = value
return
return self.__setitem__(name, value)
def GetChecksum(self, hash_type='sha1'):
"""Returns a given cryptographic checksum of the archive"""
return self['checksum'][hash_type]
class Bundle(dict):
"""A placeholder for sdk bundle information. We derive Bundle from
dict so that it is easily serializable."""
def __init__(self, obj):
""" Create a new bundle with the given bundle name."""
if isinstance(obj, str) or isinstance(obj, unicode):
dict.__init__(self, [(ARCHIVES_KEY, []), (NAME_KEY, obj)])
else:
dict.__init__(self, obj)
def MergeWithBundle(self, bundle):
"""Merge this bundle with |bundle|.
Merges dict in |bundle| with this one in such a way that keys are not
duplicated: the values of the keys in |bundle| take precedence in the
resulting dictionary.
Archives in |bundle| will be appended to archives in self.
Args:
bundle: The other bundle. Must be a dict.
"""
assert self is not bundle
for k, v in bundle.iteritems():
if k == ARCHIVES_KEY:
for archive in v:
self.get(k, []).append(archive)
else:
self[k] = v
def __str__(self):
return self.GetDataAsString()
def GetDataAsString(self):
"""Returns the JSON bundle object, pretty-printed"""
return DictToJSON(self)
def LoadDataFromString(self, json_string):
"""Load a JSON bundle string. Raises an exception if json_string
is not well-formed JSON.
Args:
json_string: a JSON-formatted string containing the bundle
"""
self.CopyFrom(json.loads(json_string))
def CopyFrom(self, source):
"""Update the content of the bundle by copying values from the given
dictionary.
Args:
source: The dictionary whose values must be copied to the bundle."""
for key, value in source.items():
if key == ARCHIVES_KEY:
archives = []
for a in value:
new_archive = Archive(a['host_os'])
new_archive.CopyFrom(a)
archives.append(new_archive)
self[ARCHIVES_KEY] = archives
else:
self[key] = value
def Validate(self, add_missing_info=False, error_on_unknown_keys=False):
"""Validate the content of the bundle. Raise an Error if an invalid or
missing field is found.
Args:
error_on_unknown_keys: If True, raise an Error when unknown keys are
found in the bundle.
"""
# Check required fields.
if not self.get(NAME_KEY):
raise Error('Bundle has no name')
if self.get(REVISION_KEY) == None:
raise Error('Bundle "%s" is missing a revision number' % self[NAME_KEY])
if self.get(VERSION_KEY) == None:
raise Error('Bundle "%s" is missing a version number' % self[NAME_KEY])
if not self.get('description'):
raise Error('Bundle "%s" is missing a description' % self[NAME_KEY])
if not self.get('stability'):
raise Error('Bundle "%s" is missing stability info' % self[NAME_KEY])
if self.get('recommended') == None:
raise Error('Bundle "%s" is missing the recommended field' %
self[NAME_KEY])
# Check specific values
if self['stability'] not in STABILITY_LITERALS:
raise Error('Bundle "%s" has invalid stability field: "%s"' %
(self[NAME_KEY], self['stability']))
if self['recommended'] not in YES_NO_LITERALS:
raise Error(
'Bundle "%s" has invalid recommended field: "%s"' %
(self[NAME_KEY], self['recommended']))
# Verify that all key names are valid.
if error_on_unknown_keys:
for key in self:
if key not in VALID_BUNDLES_KEYS:
raise Error('Bundle "%s" has invalid attribute "%s"' %
(self[NAME_KEY], key))
# Validate the archives
for archive in self[ARCHIVES_KEY]:
if add_missing_info and 'size' not in archive:
archive.UpdateVitals(self[REVISION_KEY])
archive.Validate(error_on_unknown_keys)
def GetArchive(self, host_os_name):
"""Retrieve the archive for the given host os.
Args:
host_os_name: name of host os whose archive must be retrieved.
Return:
An Archive instance or None if it doesn't exist."""
for archive in self[ARCHIVES_KEY]:
if archive.host_os == host_os_name or archive.host_os == 'all':
return archive
return None
def GetHostOSArchive(self):
"""Retrieve the archive for the current host os."""
return self.GetArchive(GetHostOS())
def GetHostOSArchives(self):
"""Retrieve all archives for the current host os, or marked all.
"""
return [archive for archive in self.GetArchives()
if archive.host_os in (GetHostOS(), 'all')]
def GetArchives(self):
"""Returns all the archives in this bundle"""
return self[ARCHIVES_KEY]
def AddArchive(self, archive):
"""Add an archive to this bundle."""
self[ARCHIVES_KEY].append(archive)
def RemoveAllArchives(self):
"""Remove all archives from this Bundle."""
del self[ARCHIVES_KEY][:]
def RemoveAllArchivesForHostOS(self, host_os_name):
"""Remove an archive from this Bundle."""
if host_os_name == 'all':
del self[ARCHIVES_KEY][:]
else:
for i, archive in enumerate(self[ARCHIVES_KEY]):
if archive.host_os == host_os_name:
del self[ARCHIVES_KEY][i]
def __getattr__(self, name):
"""Retrieve values from this dict using attributes.
This allows for foo.bar instead of foo['bar'].
Args:
name: the name of the key, 'bar' in the example above.
Returns:
The value associated with that key."""
if name not in self:
raise AttributeError(name)
return self.__getitem__(name)
def __setattr__(self, name, value):
"""Set values in this dict using attributes.
This allows for foo.bar instead of foo['bar'].
Args:
name: The name of the key, 'bar' in the example above.
value: The value to associate with that key."""
self.__setitem__(name, value)
def __eq__(self, bundle):
"""Test if two bundles are equal.
Normally the default comparison for two dicts is fine, but in this case we
don't care about the list order of the archives.
Args:
bundle: The other bundle to compare against.
Returns:
True if the bundles are equal."""
if not isinstance(bundle, Bundle):
return False
if len(self.keys()) != len(bundle.keys()):
return False
for key in self.keys():
if key not in bundle:
return False
# special comparison for ARCHIVE_KEY because we don't care about the list
# ordering.
if key == ARCHIVES_KEY:
if len(self[key]) != len(bundle[key]):
return False
for archive in self[key]:
if archive != bundle.GetArchive(archive.host_os):
return False
elif self[key] != bundle[key]:
return False
return True
def __ne__(self, bundle):
"""Test if two bundles are unequal.
See __eq__ for more info."""
return not self.__eq__(bundle)
class SDKManifest(object):
"""This class contains utilities for manipulation an SDK manifest string
For ease of unit-testing, this class should not contain any file I/O.
"""
def __init__(self):
"""Create a new SDKManifest object with default contents"""
self._manifest_data = {
"manifest_version": MANIFEST_VERSION,
"bundles": [],
}
def Validate(self, add_missing_info=False):
"""Validate the Manifest file and raises an exception for problems"""
# Validate the manifest top level
if self._manifest_data["manifest_version"] > MANIFEST_VERSION:
raise Error("Manifest version too high: %s" %
self._manifest_data["manifest_version"])
# Verify that all key names are valid.
for key in self._manifest_data:
if key not in VALID_MANIFEST_KEYS:
raise Error('Manifest has invalid attribute "%s"' % key)
# Validate each bundle
for bundle in self._manifest_data[BUNDLES_KEY]:
bundle.Validate(add_missing_info)
def GetBundle(self, name):
"""Get a bundle from the array of bundles.
Args:
name: the name of the bundle to return.
Return:
The first bundle with the given name, or None if it is not found."""
if not BUNDLES_KEY in self._manifest_data:
return None
bundles = [bundle for bundle in self._manifest_data[BUNDLES_KEY]
if bundle[NAME_KEY] == name]
if len(bundles) > 1:
sys.stderr.write("WARNING: More than one bundle with name"
"'%s' exists.\n" % name)
return bundles[0] if len(bundles) > 0 else None
def GetBundles(self):
"""Return all the bundles in the manifest."""
return self._manifest_data[BUNDLES_KEY]
def SetBundle(self, new_bundle):
"""Add or replace a bundle in the manifest.
Note: If a bundle in the manifest already exists with this name, it will be
overwritten with a copy of this bundle, at the same index as the original.
Args:
bundle: The bundle.
"""
name = new_bundle[NAME_KEY]
bundles = self.GetBundles()
new_bundle_copy = copy.deepcopy(new_bundle)
for i, bundle in enumerate(bundles):
if bundle[NAME_KEY] == name:
bundles[i] = new_bundle_copy
return
# Bundle not already in list, append it.
bundles.append(new_bundle_copy)
def RemoveBundle(self, name):
"""Remove a bundle by name.
Args:
name: the name of the bundle to remove.
Return:
True if the bundle was removed, False if there is no bundle with that
name.
"""
if not BUNDLES_KEY in self._manifest_data:
return False
bundles = self._manifest_data[BUNDLES_KEY]
for i, bundle in enumerate(bundles):
if bundle[NAME_KEY] == name:
del bundles[i]
return True
return False
def BundleNeedsUpdate(self, bundle):
"""Decides if a bundle needs to be updated.
A bundle needs to be updated if it is not installed (doesn't exist in this
manifest file) or if its revision is later than the revision in this file.
Args:
bundle: The Bundle to test.
Returns:
True if Bundle needs to be updated.
"""
if NAME_KEY not in bundle:
raise KeyError("Bundle must have a 'name' key.")
local_bundle = self.GetBundle(bundle[NAME_KEY])
return (local_bundle == None) or (
(local_bundle[VERSION_KEY], local_bundle[REVISION_KEY]) <
(bundle[VERSION_KEY], bundle[REVISION_KEY]))
def MergeBundle(self, bundle, allow_existing=True):
"""Merge a Bundle into this manifest.
The new bundle is added if not present, or merged into the existing bundle.
Args:
bundle: The bundle to merge.
"""
if NAME_KEY not in bundle:
raise KeyError("Bundle must have a 'name' key.")
local_bundle = self.GetBundle(bundle.name)
if not local_bundle:
self.SetBundle(bundle)
else:
if not allow_existing:
raise Error('cannot merge manifest bundle \'%s\', it already exists'
% bundle.name)
local_bundle.MergeWithBundle(bundle)
def MergeManifest(self, manifest):
'''Merge another manifest into this manifest, disallowing overriding.
Args
manifest: The manifest to merge.
'''
for bundle in manifest.GetBundles():
self.MergeBundle(bundle, allow_existing=False)
def FilterBundles(self, predicate):
"""Filter the list of bundles by |predicate|.
For all bundles in this manifest, if predicate(bundle) is False, the bundle
is removed from the manifest.
Args:
predicate: a function that take a bundle and returns whether True to keep
it or False to remove it.
"""
self._manifest_data[BUNDLES_KEY] = filter(predicate, self.GetBundles())
def LoadDataFromString(self, json_string, add_missing_info=False):
"""Load a JSON manifest string. Raises an exception if json_string
is not well-formed JSON.
Args:
json_string: a JSON-formatted string containing the previous manifest
all_hosts: True indicates that we should load bundles for all hosts.
False (default) says to only load bundles for the current host"""
new_manifest = json.loads(json_string)
for key, value in new_manifest.items():
if key == BUNDLES_KEY:
# Remap each bundle in |value| to a Bundle instance
bundles = []
for b in value:
new_bundle = Bundle(b[NAME_KEY])
new_bundle.CopyFrom(b)
bundles.append(new_bundle)
self._manifest_data[key] = bundles
else:
self._manifest_data[key] = value
self.Validate(add_missing_info)
def __str__(self):
return self.GetDataAsString()
def __eq__(self, other):
# Access to protected member _manifest_data of a client class
# pylint: disable=W0212
if (self._manifest_data['manifest_version'] !=
other._manifest_data['manifest_version']):
return False
self_bundle_names = set(b.name for b in self.GetBundles())
other_bundle_names = set(b.name for b in other.GetBundles())
if self_bundle_names != other_bundle_names:
return False
for bundle_name in self_bundle_names:
if self.GetBundle(bundle_name) != other.GetBundle(bundle_name):
return False
return True
def __ne__(self, other):
return not (self == other)
def GetDataAsString(self):
"""Returns the current JSON manifest object, pretty-printed"""
return DictToJSON(self._manifest_data)
| bsd-3-clause |
manipopopo/tensorflow | tensorflow/python/kernel_tests/random/random_grad_test.py | 11 | 9670 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_grad
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class AddLeadingUnitDimensionsTest(test.TestCase):
def testBasic(self):
ret = random_grad.add_leading_unit_dimensions(array_ops.ones([3, 2, 1]), 3)
self.assertAllEqual(ret.shape, [1, 1, 1, 3, 2, 1])
def testZeroExtraDimensions(self):
ret = random_grad.add_leading_unit_dimensions(array_ops.ones([3, 2, 1]), 0)
self.assertAllEqual(ret.shape, [3, 2, 1])
def testScalarInput(self):
ret = random_grad.add_leading_unit_dimensions(1.0, 2)
self.assertAllEqual(ret.shape, [1, 1])
def testUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
num_dimensions = array_ops.placeholder(dtypes.int32)
ret = random_grad.add_leading_unit_dimensions(x, num_dimensions)
with self.test_session() as sess:
ret_val = sess.run(ret, {x: np.ones([2, 2]), num_dimensions: 2})
self.assertAllEqual(ret_val.shape, [1, 1, 2, 2])
class RandomGammaGradTest(test.TestCase):
"""Tests for derivative of a sample ~ Gamma(alpha, beta) wrt alpha and beta.
The sample is an "implicit" function of alpha, beta and the independent random
noise u. The derivatives we are looking for are
d sample(alpha, beta, u) / dalpha (and dbeta).
The derivative w.r.t. beta is computed by the standard automatic
differentiation, so we trust that it is computed correctly.
The derivative w.r.t. alpha is computed by Eigen function, so we test it in
several ways. Unfortunately, the standard derivative checking by perturbing
the parameter is impossible here, because we cannot fix the value of u
in the random sampler. Instead, we compare the derivative for the given pair
of (sample, alpha) to the values computed in various ways, and also check
some statistical properties of the derivative.
"""
def testGradientsShape(self):
shape = [2, 3]
alpha = array_ops.ones([2, 2])
beta = array_ops.ones([1, 2])
sample = random_ops.random_gamma(shape, alpha, beta)
grads_alpha, grads_beta = gradients_impl.gradients(sample, [alpha, beta])
self.assertAllEqual(grads_alpha.shape, alpha.shape)
self.assertAllEqual(grads_beta.shape, beta.shape)
def testGradientsShapeWithOneSamplePerParameter(self):
shape = []
alpha = array_ops.ones([2, 2])
beta = array_ops.ones([1, 2])
sample = random_ops.random_gamma(shape, alpha, beta)
grads_alpha, grads_beta = gradients_impl.gradients(sample, [alpha, beta])
self.assertAllEqual(grads_alpha.shape, alpha.shape)
self.assertAllEqual(grads_beta.shape, beta.shape)
def testGradientsUnknownShape(self):
shape = array_ops.placeholder(dtypes.int32)
alpha = array_ops.placeholder(dtypes.float32)
beta = array_ops.placeholder(dtypes.float32)
sample = random_ops.random_gamma(shape, alpha, beta)
grads_alpha, grads_beta = gradients_impl.gradients(sample, [alpha, beta])
alpha_val = np.ones([1, 2])
beta_val = np.ones([2, 1])
with self.test_session() as sess:
grads_alpha_val, grads_beta_val = sess.run(
[grads_alpha, grads_beta],
{alpha: alpha_val, beta: beta_val, shape: [2, 1]})
self.assertAllEqual(grads_alpha_val.shape, alpha_val.shape)
self.assertAllEqual(grads_beta_val.shape, beta_val.shape)
def _testCompareToExplicitDerivative(self, dtype):
"""Compare to the explicit reparameterization derivative.
Verifies that the computed derivative satisfies
dsample / dalpha = d igammainv(alpha, u) / dalpha,
where u = igamma(alpha, sample).
Args:
dtype: TensorFlow dtype to perform the computations in.
"""
delta = 1e-3
np_dtype = dtype.as_numpy_dtype
try:
from scipy import misc # pylint: disable=g-import-not-at-top
from scipy import special # pylint: disable=g-import-not-at-top
alpha_val = np.logspace(-2, 3, dtype=np_dtype)
alpha = constant_op.constant(alpha_val)
sample = random_ops.random_gamma([], alpha, np_dtype(1.0), dtype=dtype)
actual = gradients_impl.gradients(sample, alpha)[0]
(sample_val, actual_val) = self.evaluate((sample, actual))
u = special.gammainc(alpha_val, sample_val)
expected_val = misc.derivative(
lambda alpha_prime: special.gammaincinv(alpha_prime, u),
alpha_val, dx=delta * alpha_val)
self.assertAllClose(actual_val, expected_val, rtol=1e-3, atol=1e-3)
except ImportError as e:
tf_logging.warn("Cannot use special functions in a test: %s" % str(e))
def testCompareToExplicitDerivativeFloat(self):
self._testCompareToExplicitDerivative(dtypes.float32)
def testCompareToExplicitDerivativeDouble(self):
self._testCompareToExplicitDerivative(dtypes.float64)
def _testCompareToImplicitDerivative(self, dtype):
"""Compare to the implicit reparameterization derivative.
Let's derive the formula we compare to.
Start from the fact that CDF maps a random variable to the Uniform
random variable:
igamma(alpha, sample) = u, where u ~ Uniform(0, 1).
Apply d / dalpha to both sides:
d igamma(alpha, sample) / dalpha
+ d igamma(alpha, sample) / dsample * dsample/dalpha = 0
d igamma(alpha, sample) / dalpha
+ d igamma(alpha, sample) / dsample * dsample / dalpha = 0
dsample/dalpha = - (d igamma(alpha, sample) / dalpha)
/ d igamma(alpha, sample) / dsample
This is the equation (8) of https://arxiv.org/abs/1805.08498
Args:
dtype: TensorFlow dtype to perform the computations in.
"""
np_dtype = dtype.as_numpy_dtype
alpha = constant_op.constant(np.logspace(-2, 3, dtype=np_dtype))
sample = random_ops.random_gamma([], alpha, np_dtype(1.0), dtype=dtype)
actual = gradients_impl.gradients(sample, alpha)[0]
sample_sg = array_ops.stop_gradient(sample)
cdf = math_ops.igamma(alpha, sample_sg)
dcdf_dalpha, dcdf_dsample = gradients_impl.gradients(
cdf, [alpha, sample_sg])
# Numerically unstable due to division, do not try at home.
expected = -dcdf_dalpha / dcdf_dsample
(actual_val, expected_val) = self.evaluate((actual, expected))
self.assertAllClose(actual_val, expected_val, rtol=1e-3, atol=1e-3)
def testCompareToImplicitDerivativeFloat(self):
self._testCompareToImplicitDerivative(dtypes.float32)
def testCompareToImplicitDerivativeDouble(self):
self._testCompareToImplicitDerivative(dtypes.float64)
def testAverageAlphaGradient(self):
"""Statistical test for the gradient.
Using the equation (5) of https://arxiv.org/abs/1805.08498, we have
1 = d/dalpha E_{sample ~ Gamma(alpha, 1)} sample
= E_{sample ~ Gamma(alpha, 1)} dsample/dalpha.
Here we verify that the rhs is fairly close to one.
The convergence speed is not great, so we use many samples and loose bounds.
"""
num_samples = 1000
alpha = constant_op.constant([0.8, 1e1, 1e3], dtype=dtypes.float32)
sample = random_ops.random_gamma([num_samples], alpha)
# We need to average the gradients, which is equivalent to averaging the
# samples and then doing backprop.
mean_sample = math_ops.reduce_mean(sample, axis=0)
dsample_dalpha = gradients_impl.gradients(mean_sample, alpha)[0]
dsample_dalpha_val = self.evaluate(dsample_dalpha)
self.assertAllClose(dsample_dalpha_val, [1.0] * 3, atol=1e-1, rtol=1e-1)
def testQuadraticLoss(self):
"""Statistical test for the gradient.
The equation (5) of https://arxiv.org/abs/1805.08498 says
d/dalpha E_{sample ~ Gamma(alpha, 1)} f(sample)
= E_{sample ~ Gamma(alpha, 1)} df(sample)/dalpha.
Choose a quadratic loss function f(sample) = (sample - t)^2.
Then, the lhs can be computed analytically:
d/dalpha E_{sample ~ Gamma(alpha, 1)} f(sample)
= d/dalpha [ (alpha + alpha^2) - 2 * t * alpha + t^2 ]
= 1 + 2 * alpha - 2 * t.
We compare the Monte-Carlo estimate of the expectation with the
true gradient.
"""
num_samples = 1000
t = 0.3
alpha = 0.5
expected = 1 + 2 * alpha - 2 * t
alpha = constant_op.constant(alpha)
sample = random_ops.random_gamma([num_samples], alpha, 1.0)
loss = math_ops.reduce_mean(math_ops.square(sample - t))
dloss_dalpha = gradients_impl.gradients(loss, alpha)[0]
dloss_dalpha_val = self.evaluate(dloss_dalpha)
self.assertAllClose(expected, dloss_dalpha_val, atol=1e-1, rtol=1e-1)
if __name__ == "__main__":
test.main()
| apache-2.0 |
alexmojaki/odo | docs/source/conf.py | 8 | 9099 | # -*- coding: utf-8 -*-
#
# into documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 6 16:03:44 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'odo'
copyright = u'2015, Continuum Analytics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from odo import __version__ as version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ododoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'odo.tex', u'odo Documentation',
u'Matthew Rocklin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'odo', u'odo Documentation',
[u'Continuum Analytics'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'odo', u'odo Documentation',
u'Continuum Analytics', 'odo', 'Data migrations',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'odo'
epub_author = u'Matthew Rocklin'
epub_publisher = u'Continuum Analytics'
epub_copyright = u'2015, Continuum Analytics'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
extlinks = dict(issue=('https://github.com/blaze/odo/issues/%s', '#'))
| bsd-3-clause |
nmercier/linux-cross-gcc | win32/bin/Lib/xml/sax/__init__.py | 63 | 3689 | """Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from xmlreader import InputSource
from handler import ContentHandler, ErrorHandler
from _exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
if "PY_SAX_PARSER" in os.environ:
default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError,e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| bsd-3-clause |
pipermerriam/django | django/template/defaultfilters.py | 224 | 28536 | """Default variable filters."""
from __future__ import unicode_literals
import random as random_module
import re
import warnings
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from pprint import pformat
from django.conf import settings
from django.utils import formats, six
from django.utils.dateformat import format, time_format
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (
avoid_wrapping, conditional_escape, escape, escapejs, linebreaks,
remove_tags, strip_tags, urlize as _urlize,
)
from django.utils.http import urlquote
from django.utils.safestring import SafeData, mark_for_escaping, mark_safe
from django.utils.text import (
Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap,
)
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from .base import Variable, VariableDoesNotExist
from .library import Library
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completely invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_text(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncates HTML after a certain number of chars.
Argument: Number of chars to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Converts URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
return remove_tags(value, tags)
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://www.diveintopython3.net/native-datatypes.html#slicinglists
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def walk_items(item_list):
item_iterator = iter(item_list)
try:
item = next(item_iterator)
while True:
try:
next_item = next(item_iterator)
except StopIteration:
yield item, None
break
if not isinstance(next_item, six.string_types):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
item = next(item_iterator)
continue
yield item, None
item = next_item
except StopIteration:
pass
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(force_text(item)), sublist))
return '\n'.join(output)
value, converted = convert_old_style_list(value)
if converted:
warnings.warn(
"The old style syntax in `unordered_list` is deprecated and will "
"be removed in Django 1.10. Use the the new format instead.",
RemovedInDjango110Warning)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError, ValueError, UnicodeDecodeError):
value = ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
if bytes < KB:
value = ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
elif bytes < MB:
value = ugettext("%s KB") % filesize_number_format(bytes / KB)
elif bytes < GB:
value = ugettext("%s MB") % filesize_number_format(bytes / MB)
elif bytes < TB:
value = ugettext("%s GB") % filesize_number_format(bytes / GB)
elif bytes < PB:
value = ugettext("%s TB") % filesize_number_format(bytes / TB)
else:
value = ugettext("%s PB") % filesize_number_format(bytes / PB)
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if float(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, force_text(e, errors="replace"))
| bsd-3-clause |
SoftwareKing/zstack-woodpecker | integrationtest/vm/virtualrouter/sg/test_egress.py | 4 | 5655 | '''
Test Security Group for 1 VM with egress connection control
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_security_group as test_sg_header
import zstackwoodpecker.zstack_test.zstack_test_sg_vm as test_sg_vm_header
import apibinding.inventory as inventory
test_stub = test_lib.lib_get_test_stub()
Port = test_state.Port
test_obj_dict = test_state.TestStateDict()
def test():
'''
Test image requirements:
1. have nc to check the network port
2. have "nc" to open any port
3. it doesn't include a default firewall
VR image is a good candiate to be the guest image.
'''
test_util.test_dsc("Create 2 VMs with vlan VR L3 network and using VR image.")
vm1 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm1)
vm2 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm2)
vm1.check()
vm2.check()
test_util.test_dsc("Create security groups.")
sg1 = test_stub.create_sg()
test_obj_dict.add_sg(sg1.security_group.uuid)
sg2 = test_stub.create_sg()
test_obj_dict.add_sg(sg2.security_group.uuid)
sg3 = test_stub.create_sg()
test_obj_dict.add_sg(sg3.security_group.uuid)
sg_vm = test_sg_vm_header.ZstackTestSgVm()
sg_vm.check()
vm1_ip = vm1.vm.vmNics[0].ip
l3_uuid = vm1.vm.vmNics[0].l3NetworkUuid
nic_uuid = vm2.vm.vmNics[0].uuid
vm_nics = (nic_uuid, vm2)
rule1 = test_lib.lib_gen_sg_rule(Port.rule1_ports, inventory.TCP, inventory.EGRESS, vm1_ip)
rule2 = test_lib.lib_gen_sg_rule(Port.rule2_ports, inventory.TCP, inventory.EGRESS, vm1_ip)
rule3 = test_lib.lib_gen_sg_rule(Port.rule3_ports, inventory.TCP, inventory.EGRESS, vm1_ip)
rule4 = test_lib.lib_gen_sg_rule(Port.rule4_ports, inventory.TCP, inventory.EGRESS, vm1_ip)
rule5 = test_lib.lib_gen_sg_rule(Port.rule5_ports, inventory.TCP, inventory.EGRESS, vm1_ip)
sg1.add_rule([rule1])
sg2.add_rule([rule1, rule2, rule3])
sg3.add_rule([rule3, rule4, rule5])
sg_vm.check()
sg_vm.add_stub_vm(l3_uuid, vm1)
test_util.test_dsc("Add nic to security group 1.")
test_util.test_dsc("Allowed ports: %s" % Port.get_ports(Port.rule1_ports))
sg_vm.attach(sg1, [vm_nics])
sg_vm.check()
test_util.test_dsc("Remove nic from security group 1.")
test_util.test_dsc("Allowed ports: %s" % test_stub.target_ports)
sg_vm.detach(sg1, nic_uuid)
sg_vm.check()
test_util.test_dsc("Remove rule1 from security group 1.")
sg1.delete_rule([rule1])
sg_vm.check()
test_util.test_dsc("Add rule1, rule2, rule3 to security group 1.")
test_util.test_dsc("Allowed ports: %s" % test_stub.target_ports)
sg1.add_rule([rule1, rule2, rule3])
sg_vm.check()
test_util.test_dsc("Add nic to security group 1 again.")
tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule2_ports + test_stub.rule3_ports
test_util.test_dsc("Allowed ports: %s" % tmp_allowed_ports)
sg_vm.attach(sg1, [vm_nics])
sg_vm.check()
test_util.test_dsc("Remove rule2/3 from security group 1.")
test_util.test_dsc("Allowed ports: %s" % test_stub.rule1_ports)
sg1.delete_rule([rule2, rule3])
sg_vm.check()
test_util.test_dsc("Add rule2, rule3 back to security group 1.")
tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule2_ports + test_stub.rule3_ports
test_util.test_dsc("Allowed ports: %s" % tmp_allowed_ports)
sg1.add_rule([rule2, rule3])
sg_vm.check()
test_util.test_dsc("Remove rule2/3 from security group 1.")
test_util.test_dsc("Allowed ports: %s" % test_stub.rule1_ports)
sg1.delete_rule([rule2, rule3])
sg_vm.check()
#add sg2
test_util.test_dsc("Add nic to security group 2.")
tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule2_ports + test_stub.rule3_ports
test_util.test_dsc("Allowed ports: %s" % tmp_allowed_ports)
sg_vm.attach(sg2, [vm_nics])
sg_vm.check()
#add sg3
test_util.test_dsc("Add nic to security group 3.")
tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule2_ports + test_stub.rule3_ports + test_stub.rule4_ports + test_stub.rule5_ports
test_util.test_dsc("Allowed ports (rule1+rule2+rul3+rule4+rule5): %s" % tmp_allowed_ports)
sg_vm.attach(sg3, [vm_nics])
sg_vm.check()
#detach nic from sg2
test_util.test_dsc("Remove security group 2 for nic.")
tmp_allowed_ports = test_stub.rule1_ports + test_stub.rule3_ports + test_stub.rule4_ports + test_stub.rule5_ports
test_util.test_dsc("Allowed ports (rule1+rule3+rule4+rule5): %s" % tmp_allowed_ports)
sg_vm.detach(sg2, nic_uuid)
sg_vm.check()
#delete sg3
test_util.test_dsc("Delete security group 3.")
test_util.test_dsc("Allowed ports (rule1): %s" % test_stub.rule1_ports)
sg_vm.delete_sg(sg3)
sg_vm.check()
test_obj_dict.rm_sg(sg3.security_group.uuid)
#Cleanup
sg_vm.delete_sg(sg2)
test_obj_dict.rm_sg(sg2.security_group.uuid)
sg_vm.delete_sg(sg1)
test_obj_dict.rm_sg(sg1.security_group.uuid)
sg_vm.check()
vm1.check()
vm2.check()
vm1.destroy()
vm2.destroy()
test_util.test_pass('Security Group Vlan VirtualRouter VMs Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
loveyoupeng/rt | modules/web/src/main/native/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue.py | 134 | 2089 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import threading
class ThreadedMessageQueue(object):
def __init__(self):
self._messages = []
self._is_running = True
self._lock = threading.Lock()
def post(self, message):
with self._lock:
self._messages.append(message)
def stop(self):
with self._lock:
self._is_running = False
def take_all(self):
with self._lock:
messages = self._messages
is_running = self._is_running
self._messages = []
return (messages, is_running)
| gpl-2.0 |
kavigupta/61a-analysis | src/analytics.py | 1 | 4920 | """
A module containing a variety of functions for analyzing the data. This is supposed to be more data
specific than statistics.
"""
import numpy as np
from tools import cached_property
def compensate_for_grader_means(evals, z_thresh=1):
"""
Compensates for grader means by subtracting each grader's average grades per problem. Eliminates
individuals for whom the graders are unusual.
"""
if not evals.evaluation_for(list(evals.emails)[0]).means_need_compensation:
return evals
problematic = set(_identify_problematic_ranges(evals, z_thresh))
filt = evals.remove(problematic)
zeroed = filt.zero_meaned()
return zeroed
class ExamPair:
"""
Structure representing a correlation between exam scores, as well as metadata on location.
"""
def __init__(self, first, second, are_time_adjacent, are_space_adjacent, are_same_room):
self.are_time_adjacent = are_time_adjacent
self.first = first
self.second = second
self.are_space_adjacent = are_space_adjacent
self.are_same_room = are_same_room
@cached_property
def correlation(self):
"""
The correlation between the two exam's rubric items
"""
return self.first.correlation(self.second)
@cached_property
def abs_score_diff(self):
"""
The absolute difference between the exam scores
"""
return abs(self.first.score - self.second.score)
def __repr__(self):
return "ExamPair(%s, %s, %r, %r, %r)" % tuple(self)
def __hash__(self):
return hash((hash(self.first) + hash(self.second), tuple(self)[2:]))
def __eq__(self, other):
align = self.first == other.first and self.second == other.second
mis_align = self.first == other.second and self.second == other.first
if not align and not mis_align:
return False
return tuple(self)[2:] == tuple(other)[2:]
def __iter__(self):
return iter((self.first,
self.second,
self.are_time_adjacent,
self.are_space_adjacent,
self.are_same_room))
def all_pairs(graded_exam, seating_chart, time_delta, progress, require_same_room,
require_not_time_adj, adjacency_type):
"""
Yields an iterable of all pairs between individuals.
"""
if require_same_room:
for _, in_room in seating_chart.emails_by_room:
yield from _pairs_per_individual(graded_exam, seating_chart, time_delta, progress,
in_room, True, require_not_time_adj, adjacency_type)
else:
emails = list(graded_exam.emails)
yield from _pairs_per_individual(graded_exam, seating_chart, time_delta, progress,
emails, False, require_not_time_adj, adjacency_type)
def _pairs_per_individual(graded_exam, seating_chart, time_delta, progress, emails, known_same_room,
require_not_time_adj, adjacency_type):
p_bar = progress(len(emails))
for index_x, email_x in enumerate(emails):
p_bar.update(index_x)
if email_x not in graded_exam.emails:
continue
eval_x = graded_exam.evaluation_for(email_x)
if not known_same_room:
room_x = seating_chart.room_for(email_x)
for email_y in emails[index_x+1:]:
if email_y not in graded_exam.emails:
continue
if not known_same_room:
same_room = room_x == seating_chart.room_for(email_y)
else:
same_room = True
time_adjacent = abs(graded_exam.time_diff(email_x, email_y)) <= time_delta
if require_not_time_adj and time_adjacent:
continue
yield ExamPair(eval_x,
graded_exam.evaluation_for(email_y),
time_adjacent,
seating_chart.are_adjacent(email_x, email_y, adjacency_type),
same_room)
def _unusualness(grader, question):
"""
Get the unusualness of a grader with respect to a graded question; i.e., the average of the
z scores from the overall mean for each rubric item.
"""
overall_mean = question.mean_score
overall_std = question.std_score
by_grader = question.for_grader(grader)
return np.mean((np.abs(by_grader.mean_score - overall_mean) / overall_std).rubric_items)
def _identify_problematic_ranges(evals, z_thresh):
"""
Ouptuts an iterable of emails for which at least one grader had an unusualness greater than the
z threshold.
"""
for _, graded_question in evals:
for grader in graded_question.graders:
if _unusualness(grader, graded_question) > z_thresh:
yield from graded_question.for_grader(grader).emails
| gpl-3.0 |
Vishakha1990/Lambdas | testing/digitalocean/test.py | 1 | 2597 | #!/usr/bin/python
import os, requests, time, json, argparse
API = "https://api.digitalocean.com/v2/droplets"
DROPLET_NAME = "ol-tester"
HEADERS = {
"Authorization": "Bearer "+os.environ['TOKEN'],
"Content-Type": "application/json"
}
def post(args):
r = requests.post(API, data=args, headers=HEADERS)
return r.json()
def get(args):
r = requests.get(API, data=args, headers=HEADERS)
return r.json()
def start():
r = requests.get("https://api.digitalocean.com/v2/account/keys", headers=HEADERS)
keys = map(lambda row: row['id'], r.json()['ssh_keys'])
args = {
"name":DROPLET_NAME,
"region":"nyc2",
"size":"512mb",
"image":"ubuntu-14-04-x64",
"ssh_keys":keys
}
r = requests.post(API, data=json.dumps(args), headers=HEADERS)
return r.json()
def kill():
args = {}
droplets = get(args)['droplets']
for d in droplets:
if d['name'] == DROPLET_NAME:
print 'Deleting %s (%d)' % (d['name'], d['id'])
print requests.delete(API+'/'+str(d['id']), headers=HEADERS)
def lookup(droplet_id):
r = requests.get(API+'/'+str(droplet_id), headers=HEADERS)
return r.json()['droplet']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--quickstart', default=False, action='store_true')
args = parser.parse_args()
global TEST_SCRIPT
if args.quickstart:
TEST_SCRIPT = "qs_test.sh"
else:
TEST_SCRIPT = "test.sh"
# cleanup just in case
kill()
# create new droplet and wait for it
droplet = start()['droplet']
print droplet
while True:
droplet = lookup(droplet['id'])
# status
s = droplet['status']
assert(s in ['active', 'new'])
# addr
ip = None
for addr in droplet["networks"]["v4"]:
if addr["type"] == "public":
ip = addr["ip_address"]
print 'STATUS: %s, IP: %s' % (str(s), str(ip))
if s == 'active' and ip != None:
break
time.sleep(3)
time.sleep(30) # give SSH some time
scp = 'scp -o "StrictHostKeyChecking no" %s root@%s:/tmp' % (TEST_SCRIPT, ip)
print 'RUN ' + scp
rv = os.system(scp)
assert(rv == 0)
cmds = 'bash /tmp/%s' % TEST_SCRIPT
ssh = 'echo "<CMDS>" | ssh -o "StrictHostKeyChecking no" root@<IP>'
ssh = ssh.replace('<CMDS>', cmds).replace('<IP>', ip)
print 'RUN ' + ssh
rv = os.system(ssh)
assert(rv == 0)
# make sure we cleanup everything!
kill()
if __name__ == '__main__':
main()
| apache-2.0 |
stverhae/incubator-airflow | airflow/operators/presto_check_operator.py | 44 | 4442 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.hooks.presto_hook import PrestoHook
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator, IntervalCheckOperator
from airflow.utils.decorators import apply_defaults
class PrestoCheckOperator(CheckOperator):
"""
Performs checks against Presto. The ``PrestoCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alterts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: string
:param presto_conn_id: reference to the Presto database
:type presto_conn_id: string
"""
@apply_defaults
def __init__(
self, sql,
presto_conn_id='presto_default',
*args, **kwargs):
super(PrestoCheckOperator, self).__init__(sql=sql, *args, **kwargs)
self.presto_conn_id = presto_conn_id
self.sql = sql
def get_db_hook(self):
return PrestoHook(presto_conn_id=self.presto_conn_id)
class PrestoValueCheckOperator(ValueCheckOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed
:type sql: string
:param presto_conn_id: reference to the Presto database
:type presto_conn_id: string
"""
@apply_defaults
def __init__(
self, sql, pass_value, tolerance=None,
presto_conn_id='presto_default',
*args, **kwargs):
super(PrestoValueCheckOperator, self).__init__(
sql=sql, pass_value=pass_value, tolerance=tolerance,
*args, **kwargs)
self.presto_conn_id = presto_conn_id
def get_db_hook(self):
return PrestoHook(presto_conn_id=self.presto_conn_id)
class PrestoIntervalCheckOperator(IntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics
:type metrics_threshold: dict
:param presto_conn_id: reference to the Presto database
:type presto_conn_id: string
"""
@apply_defaults
def __init__(
self, table, metrics_thresholds,
date_filter_column='ds', days_back=-7,
presto_conn_id='presto_default',
*args, **kwargs):
super(PrestoIntervalCheckOperator, self).__init__(
table=table, metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column, days_back=days_back,
*args, **kwargs)
self.presto_conn_id = presto_conn_id
def get_db_hook(self):
return PrestoHook(presto_conn_id=self.presto_conn_id)
| apache-2.0 |
javachengwc/hue | desktop/libs/liboozie/src/liboozie/credentials_tests.py | 33 | 2933 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nose.tools import assert_equal, assert_true
import beeswax.conf
from liboozie.credentials import Credentials
LOG = logging.getLogger(__name__)
class TestCredentials():
CREDENTIALS = {
"hcat": "org.apache.oozie.action.hadoop.HCatCredentials",
"hive2": "org.apache.oozie.action.hadoop.Hive2Credentials",
"hbase": "org.apache.oozie.action.hadoop.HbaseCredentials"
}
def test_parse_oozie(self):
oozie_credentialclasses = """
hbase=org.apache.oozie.action.hadoop.HbaseCredentials,
hcat=org.apache.oozie.action.hadoop.HCatCredentials,
hive2=org.apache.oozie.action.hadoop.Hive2Credentials
"""
oozie_config = {'oozie.credentials.credentialclasses': oozie_credentialclasses}
creds = Credentials()
assert_equal({
'hive2': 'org.apache.oozie.action.hadoop.Hive2Credentials',
'hbase': 'org.apache.oozie.action.hadoop.HbaseCredentials',
'hcat': 'org.apache.oozie.action.hadoop.HCatCredentials'
}, creds._parse_oozie(oozie_config)
)
def test_gen_properties(self):
creds = Credentials(credentials=TestCredentials.CREDENTIALS.copy())
hive_properties = {
'thrift_uri': 'thrift://hue-koh-chang:9999',
'kerberos_principal': 'hive',
'hive2.server.principal': 'hive',
}
finish = (
beeswax.conf.HIVE_SERVER_HOST.set_for_testing('hue-koh-chang'),
beeswax.conf.HIVE_SERVER_PORT.set_for_testing(12345),
)
try:
assert_equal({
'hcat': {
'xml_name': 'hcat',
'properties': [
('hcat.metastore.uri', 'thrift://hue-koh-chang:9999'),
('hcat.metastore.principal', 'hive')
]},
'hive2': {
'xml_name': 'hive2',
'properties': [
('hive2.jdbc.url', 'jdbc:hive2://hue-koh-chang:12345/default'),
('hive2.server.principal', 'hive')
]},
'hbase': {
'xml_name': 'hbase',
'properties': []
}
}, creds.get_properties(hive_properties))
finally:
for f in finish:
f()
| apache-2.0 |
talnoah/Lemur_kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
jumpstarter-io/nova | nova/virt/baremetal/ipmi.py | 13 | 10740 | # coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal IPMI power manager.
"""
import os
import stat
import tempfile
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import paths
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import utils as bm_utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help='Path to baremetal terminal program'),
cfg.StrOpt('terminal_cert_dir',
help='Path to baremetal terminal SSL cert(PEM)'),
cfg.StrOpt('terminal_pid_dir',
default=paths.state_path_def('baremetal/console'),
help='Path to directory stores pidfiles of baremetal_terminal'),
cfg.IntOpt('ipmi_power_retry',
default=10,
help='Maximal number of retries for IPMI operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
def _make_password_file(password):
fd, path = tempfile.mkstemp()
os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
with os.fdopen(fd, "w") as f:
# NOTE(r-mibu): Since ipmitool hangs with an empty password file,
# we have to write '\0' if password was empty.
# see https://bugs.launchpad.net/nova/+bug/1237802 for more details
f.write(password or b"\0")
return path
def _get_console_pid_path(node_id):
name = "%s.pid" % node_id
path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
return path
def _get_console_pid(node_id):
pid_path = _get_console_pid_path(node_id)
if os.path.exists(pid_path):
with open(pid_path, 'r') as f:
pid_str = f.read()
try:
return int(pid_str)
except ValueError:
LOG.warn(_("pid file %s does not contain any pid"), pid_path)
return None
class IPMI(base.PowerManager):
"""IPMI Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via IPMI calls. It also provides serial console access
where available.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id is None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to IPMI"))
if self.address is None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to IPMI"))
if self.user is None:
raise exception.InvalidParameterValue(_("User not supplied "
"to IPMI"))
if self.password is None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to IPMI"))
def _exec_ipmitool(self, command):
args = ['ipmitool',
'-I',
'lanplus',
'-H',
self.address,
'-U',
self.user,
'-f']
pwfile = _make_password_file(self.password)
try:
args.append(pwfile)
args.extend(command.split(" "))
out, err = utils.execute(*args, attempts=3)
LOG.debug("ipmitool stdout: '%(out)s', stderr: '%(err)s'",
{'out': out, 'err': err})
return out, err
finally:
bm_utils.unlink_without_raise(pwfile)
def _power_on(self):
"""Turn the power to this node ON."""
def _wait_for_power_on():
"""Called at an interval until the node's power is on."""
if self.is_power_on():
self.state = baremetal_states.ACTIVE
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
LOG.error(_("IPMI power on failed after %d tries") % (
CONF.baremetal.ipmi_power_retry))
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
if not self.power_on_called:
self._exec_ipmitool("power on")
self.power_on_called = True
except Exception:
LOG.exception(_("IPMI power on failed"))
self.retries = 0
self.power_on_called = False
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on)
timer.start(interval=1.0).wait()
def _power_off(self):
"""Turn the power to this node OFF."""
def _wait_for_power_off():
"""Called at an interval until the node's power is off."""
if self.is_power_on() is False:
self.state = baremetal_states.DELETED
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
LOG.error(_("IPMI power off failed after %d tries") % (
CONF.baremetal.ipmi_power_retry))
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
if not self.power_off_called:
self._exec_ipmitool("power off")
self.power_off_called = True
except Exception:
LOG.exception(_("IPMI power off failed"))
self.retries = 0
self.power_off_called = False
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off)
timer.start(interval=1.0).wait()
def _set_pxe_for_next_boot(self):
try:
self._exec_ipmitool("chassis bootdev pxe options=persistent")
except Exception:
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
"""Turns the power to node ON.
Sets node next-boot to PXE and turns the power on,
waiting up to ipmi_power_retry/2 seconds for confirmation
that the power is on.
:returns: One of baremetal_states.py, representing the new state.
"""
if self.is_power_on() and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node.
Turns the power off, sets next-boot to PXE, and turns the power on.
Each action waits up to ipmi_power_retry/2 seconds for confirmation
that the power state has changed.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF.
Turns the power off, and waits up to ipmi_power_retry/2 seconds
for confirmation that the power is off.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
return self.state
def is_power_on(self):
"""Check if the power is currently on.
:returns: True if on; False if off; None if unable to determine.
"""
# NOTE(deva): string matching based on
# http://ipmitool.cvs.sourceforge.net/
# viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c
res = self._exec_ipmitool("power status")[0]
if res == ("Chassis Power is on\n"):
return True
elif res == ("Chassis Power is off\n"):
return False
return None
def start_console(self):
if not self.port:
return
args = []
args.append(CONF.baremetal.terminal)
if CONF.baremetal.terminal_cert_dir:
args.append("-c")
args.append(CONF.baremetal.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(self.port))
args.append("--background=%s" % _get_console_pid_path(self.node_id))
args.append("-s")
try:
pwfile = _make_password_file(self.password)
ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
" -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': self.address,
'user': self.user,
'pwfile': pwfile,
}
args.append(ipmi_args)
# Run shellinaboxd without pipes. Otherwise utils.execute() waits
# infinitely since shellinaboxd does not close passed fds.
x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args]
x.append('</dev/null')
x.append('>/dev/null')
x.append('2>&1')
utils.execute(' '.join(x), shell=True)
finally:
bm_utils.unlink_without_raise(pwfile)
def stop_console(self):
console_pid = _get_console_pid(self.node_id)
if console_pid:
# Allow exitcode 99 (RC_UNAUTHORIZED)
utils.execute('kill', '-TERM', str(console_pid),
run_as_root=True,
check_exit_code=[0, 99])
bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id))
| apache-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/numpy/lib/function_base.py | 7 | 134697 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
number of observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = float(X.shape[1] - ddof)
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
return (dot(X, X_T.conj())/fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no affect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| gpl-2.0 |
msabramo/kallithea | kallithea/controllers/admin/my_account.py | 2 | 10917 | # -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
kallithea.controllers.admin.my_account
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
my account controller for Kallithea admin
This file was forked by the Kallithea project in July 2014.
Original author and date, and relevant copyright and licensing information is below:
:created_on: August 20, 2013
:author: marcink
:copyright: (c) 2013 RhodeCode GmbH, and others.
:license: GPLv3, see LICENSE.md for more details.
"""
import logging
import traceback
import formencode
from sqlalchemy import func
from formencode import htmlfill
from pylons import request, tmpl_context as c, url
from pylons.controllers.util import redirect
from pylons.i18n.translation import _
from kallithea import EXTERN_TYPE_INTERNAL
from kallithea.lib import helpers as h
from kallithea.lib.auth import LoginRequired, NotAnonymous, AuthUser
from kallithea.lib.base import BaseController, render
from kallithea.lib.utils2 import generate_api_key, safe_int
from kallithea.lib.compat import json
from kallithea.model.db import Repository, \
UserEmailMap, UserApiKeys, User, UserFollowing
from kallithea.model.forms import UserForm, PasswordChangeForm
from kallithea.model.user import UserModel
from kallithea.model.repo import RepoModel
from kallithea.model.api_key import ApiKeyModel
from kallithea.model.meta import Session
log = logging.getLogger(__name__)
class MyAccountController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('setting', 'settings', controller='admin/settings',
# path_prefix='/admin', name_prefix='admin_')
@LoginRequired()
@NotAnonymous()
def __before__(self):
super(MyAccountController, self).__before__()
def __load_data(self):
c.user = User.get(self.authuser.user_id)
if c.user.username == User.DEFAULT_USER:
h.flash(_("You can't edit this user since it's"
" crucial for entire application"), category='warning')
return redirect(url('users'))
c.EXTERN_TYPE_INTERNAL = EXTERN_TYPE_INTERNAL
def _load_my_repos_data(self, watched=False):
if watched:
admin = False
repos_list = [x.follows_repository for x in
Session().query(UserFollowing).filter(
UserFollowing.user_id ==
self.authuser.user_id).all()]
else:
admin = True
repos_list = Session().query(Repository)\
.filter(Repository.user_id ==
self.authuser.user_id)\
.order_by(func.lower(Repository.repo_name)).all()
repos_data = RepoModel().get_repos_as_dict(repos_list=repos_list,
admin=admin)
#json used to render the grid
return json.dumps(repos_data)
def my_account(self):
"""
GET /_admin/my_account Displays info about my account
"""
# url('my_account')
c.active = 'profile'
self.__load_data()
c.perm_user = AuthUser(user_id=self.authuser.user_id,
ip_addr=self.ip_addr)
c.extern_type = c.user.extern_type
c.extern_name = c.user.extern_name
defaults = c.user.get_dict()
update = False
if request.POST:
_form = UserForm(edit=True,
old_data={'user_id': self.authuser.user_id,
'email': self.authuser.email})()
form_result = {}
try:
post_data = dict(request.POST)
post_data['new_password'] = ''
post_data['password_confirmation'] = ''
form_result = _form.to_python(post_data)
# skip updating those attrs for my account
skip_attrs = ['admin', 'active', 'extern_type', 'extern_name',
'new_password', 'password_confirmation']
#TODO: plugin should define if username can be updated
if c.extern_type != EXTERN_TYPE_INTERNAL:
# forbid updating username for external accounts
skip_attrs.append('username')
UserModel().update(self.authuser.user_id, form_result,
skip_attrs=skip_attrs)
h.flash(_('Your account was updated successfully'),
category='success')
Session().commit()
update = True
except formencode.Invalid, errors:
return htmlfill.render(
render('admin/my_account/my_account.html'),
defaults=errors.value,
errors=errors.error_dict or {},
prefix_error=False,
encoding="UTF-8",
force_defaults=False)
except Exception:
log.error(traceback.format_exc())
h.flash(_('Error occurred during update of user %s') \
% form_result.get('username'), category='error')
if update:
return redirect('my_account')
return htmlfill.render(
render('admin/my_account/my_account.html'),
defaults=defaults,
encoding="UTF-8",
force_defaults=False)
def my_account_password(self):
c.active = 'password'
self.__load_data()
if request.POST:
_form = PasswordChangeForm(self.authuser.username)()
try:
form_result = _form.to_python(request.POST)
UserModel().update(self.authuser.user_id, form_result)
Session().commit()
h.flash(_("Successfully updated password"), category='success')
except formencode.Invalid as errors:
return htmlfill.render(
render('admin/my_account/my_account.html'),
defaults=errors.value,
errors=errors.error_dict or {},
prefix_error=False,
encoding="UTF-8",
force_defaults=False)
except Exception:
log.error(traceback.format_exc())
h.flash(_('Error occurred during update of user password'),
category='error')
return render('admin/my_account/my_account.html')
def my_account_repos(self):
c.active = 'repos'
self.__load_data()
#json used to render the grid
c.data = self._load_my_repos_data()
return render('admin/my_account/my_account.html')
def my_account_watched(self):
c.active = 'watched'
self.__load_data()
#json used to render the grid
c.data = self._load_my_repos_data(watched=True)
return render('admin/my_account/my_account.html')
def my_account_perms(self):
c.active = 'perms'
self.__load_data()
c.perm_user = AuthUser(user_id=self.authuser.user_id,
ip_addr=self.ip_addr)
return render('admin/my_account/my_account.html')
def my_account_emails(self):
c.active = 'emails'
self.__load_data()
c.user_email_map = UserEmailMap.query()\
.filter(UserEmailMap.user == c.user).all()
return render('admin/my_account/my_account.html')
def my_account_emails_add(self):
email = request.POST.get('new_email')
try:
UserModel().add_extra_email(self.authuser.user_id, email)
Session().commit()
h.flash(_("Added email %s to user") % email, category='success')
except formencode.Invalid, error:
msg = error.error_dict['email']
h.flash(msg, category='error')
except Exception:
log.error(traceback.format_exc())
h.flash(_('An error occurred during email saving'),
category='error')
return redirect(url('my_account_emails'))
def my_account_emails_delete(self):
email_id = request.POST.get('del_email_id')
user_model = UserModel()
user_model.delete_extra_email(self.authuser.user_id, email_id)
Session().commit()
h.flash(_("Removed email from user"), category='success')
return redirect(url('my_account_emails'))
def my_account_api_keys(self):
c.active = 'api_keys'
self.__load_data()
show_expired = True
c.lifetime_values = [
(str(-1), _('forever')),
(str(5), _('5 minutes')),
(str(60), _('1 hour')),
(str(60 * 24), _('1 day')),
(str(60 * 24 * 30), _('1 month')),
]
c.lifetime_options = [(c.lifetime_values, _("Lifetime"))]
c.user_api_keys = ApiKeyModel().get_api_keys(self.authuser.user_id,
show_expired=show_expired)
return render('admin/my_account/my_account.html')
def my_account_api_keys_add(self):
lifetime = safe_int(request.POST.get('lifetime'), -1)
description = request.POST.get('description')
ApiKeyModel().create(self.authuser.user_id, description, lifetime)
Session().commit()
h.flash(_("Api key successfully created"), category='success')
return redirect(url('my_account_api_keys'))
def my_account_api_keys_delete(self):
api_key = request.POST.get('del_api_key')
user_id = self.authuser.user_id
if request.POST.get('del_api_key_builtin'):
user = User.get(user_id)
if user:
user.api_key = generate_api_key(user.username)
Session().add(user)
Session().commit()
h.flash(_("Api key successfully reset"), category='success')
elif api_key:
ApiKeyModel().delete(api_key, self.authuser.user_id)
Session().commit()
h.flash(_("Api key successfully deleted"), category='success')
return redirect(url('my_account_api_keys'))
| gpl-3.0 |
Observer-Wu/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py | 121 | 3076 | # Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.bot.sheriff import Sheriff
from webkitpy.tool.bot.irc_command import commands as irc_commands
from webkitpy.tool.bot.ircbot import IRCBot
from webkitpy.tool.commands.queues import AbstractQueue
from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
_log = logging.getLogger(__name__)
class SheriffBot(AbstractQueue, StepSequenceErrorHandler):
name = "webkitbot"
watchers = AbstractQueue.watchers + [
"abarth@webkit.org",
"eric@webkit.org",
]
# AbstractQueue methods
def begin_work_queue(self):
AbstractQueue.begin_work_queue(self)
self._sheriff = Sheriff(self._tool, self)
self._irc_bot = IRCBot(self.name, self._tool, self._sheriff, irc_commands)
self._tool.ensure_irc_connected(self._irc_bot.irc_delegate())
def work_item_log_path(self, failure_map):
return None
def _is_old_failure(self, revision):
return self._tool.status_server.svn_revision(revision)
def next_work_item(self):
self._irc_bot.process_pending_messages()
return
def process_work_item(self, failure_map):
return True
def handle_unexpected_error(self, failure_map, message):
_log.error(message)
# StepSequenceErrorHandler methods
@classmethod
def handle_script_error(cls, tool, state, script_error):
# Ideally we would post some information to IRC about what went wrong
# here, but we don't have the IRC password in the child process.
pass
| bsd-3-clause |
frankyrumple/ope | admin_app/gluon/__init__.py | 21 | 2775 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Web2Py framework modules
========================
"""
__all__ = ['A', 'B', 'BEAUTIFY', 'BODY', 'BR', 'CAT', 'CENTER', 'CLEANUP', 'CODE', 'CRYPT', 'DAL', 'DIV', 'EM', 'EMBED', 'FIELDSET', 'FORM', 'Field', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'HEAD', 'HR', 'HTML', 'HTTP', 'I', 'IFRAME', 'IMG', 'INPUT', 'IS_ALPHANUMERIC', 'IS_DATE', 'IS_DATETIME', 'IS_DATETIME_IN_RANGE', 'IS_DATE_IN_RANGE', 'IS_DECIMAL_IN_RANGE', 'IS_EMAIL', 'IS_LIST_OF_EMAILS', 'IS_EMPTY_OR', 'IS_EQUAL_TO', 'IS_EXPR', 'IS_FLOAT_IN_RANGE', 'IS_IMAGE', 'IS_JSON', 'IS_INT_IN_RANGE', 'IS_IN_DB', 'IS_IN_SET', 'IS_IPV4', 'IS_LENGTH', 'IS_LIST_OF', 'IS_LOWER', 'IS_MATCH', 'IS_NOT_EMPTY', 'IS_NOT_IN_DB', 'IS_NULL_OR', 'IS_SLUG', 'IS_STRONG', 'IS_TIME', 'IS_UPLOAD_FILENAME', 'IS_UPPER', 'IS_URL', 'LABEL', 'LEGEND', 'LI', 'LINK', 'LOAD', 'MARKMIN', 'MENU', 'META', 'OBJECT', 'OL', 'ON', 'OPTGROUP', 'OPTION', 'P', 'PRE', 'SCRIPT', 'SELECT', 'SPAN', 'SQLFORM', 'SQLTABLE', 'STRONG', 'STYLE', 'TABLE', 'TAG', 'TBODY', 'TD', 'TEXTAREA', 'TFOOT', 'TH', 'THEAD', 'TITLE', 'TR', 'TT', 'UL', 'URL', 'XHTML', 'XML', 'redirect', 'current', 'embed64']
#: add pydal to sys.modules
import os
import sys
try:
sys.path.append(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "packages", "dal"))
import pydal
sys.modules['pydal'] = pydal
except ImportError:
raise RuntimeError(
"web2py depends on pydal, which apparently you have not installed.\n" +
"Probably you cloned the repository using git without '--recursive'" +
"\nTo fix this, please run (from inside your web2py folder):\n\n" +
" git submodule update --init --recursive\n\n" +
"You can also download a complete copy from http://www.web2py.com."
)
from globals import current
from html import *
from validators import *
from http import redirect, HTTP
from dal import DAL, Field
from sqlhtml import SQLFORM, SQLTABLE
from compileapp import LOAD
# Dummy code to enable code completion in IDE's.
if 0:
from globals import Request, Response, Session
from cache import Cache
from languages import translator
from tools import Auth, Crud, Mail, Service, PluginManager
# API objects
request = Request()
response = Response()
session = Session()
cache = Cache(request)
T = translator(request)
# Objects commonly defined in application model files
# (names are conventions only -- not part of API)
db = DAL()
auth = Auth(db)
crud = Crud(db)
mail = Mail()
service = Service()
plugins = PluginManager()
| mit |
unicefuganda/mics | survey/views/surveys.py | 2 | 2608 | from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from survey.models.surveys import Survey
from survey.forms.surveys import SurveyForm
from survey.views.custom_decorators import handle_object_does_not_exist
@permission_required('auth.can_view_batches')
def index(request):
surveys = Survey.objects.all().order_by('created')
context = {'surveys': surveys, 'request': request,
'survey_form': SurveyForm()}
return render(request, 'surveys/index.html',
context)
@permission_required('auth.can_view_batches')
def new(request):
response = None
survey_form = SurveyForm()
if request.method == 'POST':
survey_form = SurveyForm(request.POST)
if survey_form.is_valid():
Survey.save_sample_size(survey_form)
messages.success(request, 'Survey successfully added.')
response = HttpResponseRedirect('/surveys/')
context = {'survey_form': survey_form,
'title': "New Survey",
'button_label': 'Create',
'id': 'add-survey-form',
'action': "/surveys/new/",
'cancel_url': '/surveys/',
}
return response or render(request, 'surveys/new.html', context)
@handle_object_does_not_exist(message="Survey does not exist.")
@permission_required('auth.can_view_batches')
def edit(request, survey_id):
survey = Survey.objects.get(id=survey_id)
survey_form = SurveyForm(instance=survey)
if request.method == 'POST':
survey_form = SurveyForm(instance=survey, data=request.POST)
if survey_form.is_valid():
Survey.save_sample_size(survey_form)
messages.success(request, 'Survey successfully edited.')
return HttpResponseRedirect('/surveys/')
context = {'survey_form': survey_form,
'title': "Edit Survey",
'button_label': 'Save',
'id': 'edit-survey-form',
'cancel_url': '/surveys/',
'action': '/surveys/%s/edit/' %survey_id
}
return render(request, 'surveys/new.html', context)
@handle_object_does_not_exist(message="Survey does not exist.")
@permission_required('auth.can_view_batches')
def delete(request, survey_id):
survey = Survey.objects.get(id=survey_id)
messages.error(request, "Survey cannot be deleted.")
return HttpResponseRedirect('/surveys/') | bsd-3-clause |
Thingee/cinder | cinder/db/sqlalchemy/migrate_repo/versions/011_add_bootable_column.py | 11 | 1537 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, MetaData, Table
def upgrade(migrate_engine):
"""Add bootable column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
bootable = Column('bootable', Boolean)
volumes.create_column(bootable)
volumes.update().values(bootable=False).execute()
glance_metadata = Table('volume_glance_metadata', meta, autoload=True)
glance_items = list(glance_metadata.select().execute())
for item in glance_items:
volumes.update().\
where(volumes.c.id == item['volume_id']).\
values(bootable=True).execute()
def downgrade(migrate_engine):
"""Remove bootable column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
bootable = volumes.columns.bootable
#bootable = Column('bootable', Boolean)
volumes.drop_column(bootable)
| apache-2.0 |
Oliver2213/NVDAYoutube-dl | addon/globalPlugins/nvdaYoutubeDL/youtube_dl/extractor/wsj.py | 23 | 3399 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'https?://video-api\.wsj\.com/api-video/player/iframe\.html\?guid=(?P<id>[a-zA-Z0-9-]+)'
IE_DESC = 'Wall Street Journal'
_TEST = {
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': '9747d7a6ebc2f4df64b981e1dde9efa9',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
bitrates = [128, 174, 264, 320, 464, 664, 1264]
api_url = (
'http://video-api.wsj.com/api-video/find_all_videos.asp?'
'type=guid&count=1&query=%s&'
'fields=hls,adZone,thumbnailList,guid,state,secondsUntilStartTime,'
'author,description,name,linkURL,videoStillURL,duration,videoURL,'
'adCategory,catastrophic,linkShortURL,doctypeID,youtubeID,'
'titletag,rssURL,wsj-section,wsj-subsection,allthingsd-section,'
'allthingsd-subsection,sm-section,sm-subsection,provider,'
'formattedCreationDate,keywords,keywordsOmniture,column,editor,'
'emailURL,emailPartnerID,showName,omnitureProgramName,'
'omnitureVideoFormat,linkRelativeURL,touchCastID,'
'omniturePublishDate,%s') % (
video_id, ','.join('video%dkMP4Url' % br for br in bitrates))
info = self._download_json(api_url, video_id)['items'][0]
# Thumbnails are conveniently in the correct format already
thumbnails = info.get('thumbnailList')
creator = info.get('author')
uploader_id = info.get('editor')
categories = info.get('keywords')
duration = int_or_none(info.get('duration'))
upload_date = unified_strdate(
info.get('formattedCreationDate'), day_first=False)
title = info.get('name', info.get('titletag'))
formats = [{
'format_id': 'f4m',
'format_note': 'f4m (meta URL)',
'url': info['videoURL'],
}]
if info.get('hls'):
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
preference=0, entry_protocol='m3u8_native'))
for br in bitrates:
field = 'video%dkMP4Url' % br
if info.get(field):
formats.append({
'format_id': 'mp4-%d' % br,
'container': 'mp4',
'tbr': br,
'url': info[field],
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'thumbnails': thumbnails,
'creator': creator,
'uploader_id': uploader_id,
'duration': duration,
'upload_date': upload_date,
'title': title,
'categories': categories,
}
| gpl-2.0 |
ioannistsanaktsidis/invenio | modules/bibformat/lib/elements/bfe_photos.py | 18 | 5831 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Print photos of the record (if bibdoc file)
"""
import cgi
from invenio.bibdocfile import BibRecDocs
from invenio.urlutils import (create_html_link,
get_relative_url)
def format_element(bfo, separator=" ", style='', img_style='', text_style='font-size:small',
print_links='yes', max_photos='', show_comment='yes',
img_max_width='250px', display_all_version_links='yes'):
"""
Lists the photos of a record. Display the icon version, linked to
its original version.
This element works for photos appended to a record as BibDoc
files, for which a preview icon has been generated. If there are
several formats for one photo, use the first one found.
@param separator: separator between each photo
@param print_links: if 'yes', print links to the original photo
@param style: style attributes of the whole image block. Eg: "padding:2px;border:1px"
@param img_style: style attributes of the images. Eg: "width:50px;border:none"
@param text_style: style attributes of the text. Eg: "font-size:small"
@param max_photos: the maximum number of photos to display
@param show_comment: if 'yes', display the comment of each photo
@param display_all_version_links: if 'yes', print links to additional (sub)formats
"""
photos = []
bibarchive = BibRecDocs(bfo.recID)
bibdocs = bibarchive.list_bibdocs()
if max_photos.isdigit():
max_photos = int(max_photos)
else:
max_photos = len(bibdocs)
for doc in bibdocs[:max_photos]:
found_icons = []
found_url = ''
for docfile in doc.list_latest_files():
if docfile.is_icon():
found_icons.append((
docfile.get_size(),
get_relative_url(docfile.get_url())
))
else:
found_url = get_relative_url(docfile.get_url())
found_icons.sort()
if found_icons:
additional_links = ''
name = bibarchive.get_docname(doc.id)
comment = doc.list_latest_files()[0].get_comment()
preview_url = None
if len(found_icons) > 1:
preview_url = get_relative_url(found_icons[1][1])
additional_urls = [(docfile.get_size(), get_relative_url(docfile.get_url()), \
docfile.get_superformat(), docfile.get_subformat()) \
for docfile in doc.list_latest_files() if not docfile.is_icon()]
additional_urls.sort()
additional_links = [create_html_link(url, urlargd={}, \
linkattrd={'style': 'font-size:x-small'}, \
link_label="%s %s (%s)" % (format.strip('.').upper(), subformat, format_size(size))) \
for (size, url, format, subformat) in additional_urls]
img = '<img src="%(icon_url)s" alt="%(name)s" style="max-width:%(img_max_width)s;_width:%(img_max_width)s;%(img_style)s" />' % \
{'icon_url': cgi.escape(get_relative_url(found_icons[0][1]), True),
'name': cgi.escape(name, True),
'img_style': img_style,
'img_max_width': img_max_width}
if print_links.lower() == 'yes':
img = '<a href="%s">%s</a>' % (cgi.escape(preview_url or found_url, True), img)
if display_all_version_links.lower() == 'yes' and additional_links:
img += '<br />' + ' '.join(additional_links) + '<br />'
if show_comment.lower() == 'yes' and comment:
img += '<div style="margin-auto;text-align:center;%(text_style)s">%(comment)s</div>' % \
{'comment': comment.replace('\n', '<br/>'),
'text_style': text_style}
img = '<div style="vertical-align: middle;text-align:center;display:inline-block;display: -moz-inline-stack;zoom: 1;*display: inline;max-width:%(img_max_width)s;_width:%(img_max_width)s;text-align:center;%(style)s">%(img)s</div>' % \
{'img_max_width': img_max_width,
'style': style,
'img': img}
photos.append(img)
return '<div>' + separator.join(photos) + '</div>'
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
def format_size(size):
"""
Get human-readable string for the given size in Bytes
"""
if size < 1024:
return "%d byte%s" % (size, size != 1 and 's' or '')
elif size < 1024 * 1024:
return "%.1f KB" % (size / 1024)
elif size < 1024 * 1024 * 1024:
return "%.1f MB" % (size / (1024 * 1024))
else:
return "%.1f GB" % (size / (1024 * 1024 * 1024))
| gpl-2.0 |
hsaputra/tensorflow | tensorflow/examples/adding_an_op/zero_out_2_test.py | 111 | 1988 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for version 2 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.adding_an_op import zero_out_grad_2 # pylint: disable=unused-import
from tensorflow.examples.adding_an_op import zero_out_op_2
class ZeroOut2Test(tf.test.TestCase):
def test(self):
with self.test_session():
result = zero_out_op_2.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
def test_2d(self):
with self.test_session():
result = zero_out_op_2.zero_out([[6, 5, 4], [3, 2, 1]])
self.assertAllEqual(result.eval(), [[6, 0, 0], [0, 0, 0]])
def test_grad(self):
with self.test_session():
shape = (5,)
x = tf.constant([5, 4, 3, 2, 1], dtype=tf.float32)
y = zero_out_op_2.zero_out(x)
err = tf.test.compute_gradient_error(x, shape, y, shape)
self.assertLess(err, 1e-4)
def test_grad_2d(self):
with self.test_session():
shape = (2, 3)
x = tf.constant([[6, 5, 4], [3, 2, 1]], dtype=tf.float32)
y = zero_out_op_2.zero_out(x)
err = tf.test.compute_gradient_error(x, shape, y, shape)
self.assertLess(err, 1e-4)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
klmitch/nova | nova/tests/functional/compute/test_resource_tracker.py | 1 | 30750 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import fixtures
import mock
import os_resource_classes as orc
import os_traits
from oslo_utils.fixture import uuidsentinel as uuids
import yaml
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conf
from nova import context
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.virt import driver as virt_driver
CONF = conf.CONF
VCPU = orc.VCPU
MEMORY_MB = orc.MEMORY_MB
DISK_GB = orc.DISK_GB
COMPUTE_HOST = 'compute-host'
class IronicResourceTrackerTest(test.TestCase):
"""Tests the behaviour of the resource tracker with regards to the
transitional period between adding support for custom resource classes in
the placement API and integrating inventory and allocation records for
Ironic baremetal nodes with those custom resource classes.
"""
FLAVOR_FIXTURES = {
'CUSTOM_SMALL_IRON': objects.Flavor(
name='CUSTOM_SMALL_IRON',
flavorid=42,
vcpus=4,
memory_mb=4096,
root_gb=1024,
swap=0,
ephemeral_gb=0,
extra_specs={},
),
'CUSTOM_BIG_IRON': objects.Flavor(
name='CUSTOM_BIG_IRON',
flavorid=43,
vcpus=16,
memory_mb=65536,
root_gb=1024,
swap=0,
ephemeral_gb=0,
extra_specs={},
),
}
COMPUTE_NODE_FIXTURES = {
uuids.cn1: objects.ComputeNode(
uuid=uuids.cn1,
hypervisor_hostname='cn1',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=4,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=4096,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=1024,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
uuids.cn2: objects.ComputeNode(
uuid=uuids.cn2,
hypervisor_hostname='cn2',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=4,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=4096,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=1024,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
uuids.cn3: objects.ComputeNode(
uuid=uuids.cn3,
hypervisor_hostname='cn3',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=16,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=65536,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=2048,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
}
INSTANCE_FIXTURES = {
uuids.instance1: objects.Instance(
uuid=uuids.instance1,
flavor=FLAVOR_FIXTURES['CUSTOM_SMALL_IRON'],
vm_state=vm_states.BUILDING,
task_state=task_states.SPAWNING,
power_state=power_state.RUNNING,
project_id='project',
user_id=uuids.user,
),
}
def _set_client(self, client):
"""Set up embedded report clients to use the direct one from the
interceptor.
"""
self.report_client = client
self.rt.reportclient = client
def setUp(self):
super(IronicResourceTrackerTest, self).setUp()
self.flags(
reserved_host_memory_mb=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0,
)
self.ctx = context.RequestContext('user', 'project')
driver = mock.MagicMock(autospec=virt_driver.ComputeDriver)
driver.node_is_available.return_value = True
def fake_upt(provider_tree, nodename, allocations=None):
inventory = {
'CUSTOM_SMALL_IRON': {
'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
provider_tree.update_inventory(nodename, inventory)
driver.update_provider_tree.side_effect = fake_upt
self.driver_mock = driver
self.rt = resource_tracker.ResourceTracker(COMPUTE_HOST, driver)
self.instances = self.create_fixtures()
def create_fixtures(self):
for flavor in self.FLAVOR_FIXTURES.values():
# Clone the object so the class variable isn't
# modified by reference.
flavor = flavor.obj_clone()
flavor._context = self.ctx
flavor.obj_set_defaults()
flavor.create()
# We create some compute node records in the Nova cell DB to simulate
# data before adding integration for Ironic baremetal nodes with the
# placement API...
for cn in self.COMPUTE_NODE_FIXTURES.values():
# Clone the object so the class variable isn't
# modified by reference.
cn = cn.obj_clone()
cn._context = self.ctx
cn.obj_set_defaults()
cn.create()
instances = {}
for instance in self.INSTANCE_FIXTURES.values():
# Clone the object so the class variable isn't
# modified by reference.
instance = instance.obj_clone()
instance._context = self.ctx
instance.obj_set_defaults()
instance.create()
instances[instance.uuid] = instance
return instances
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
def test_node_stats_isolation(self):
"""Regression test for bug 1784705 introduced in Ocata.
The ResourceTracker.stats field is meant to track per-node stats
so this test registers three compute nodes with a single RT where
each node has unique stats, and then makes sure that after updating
usage for an instance, the nodes still have their unique stats and
nothing is leaked from node to node.
"""
self.useFixture(func_fixtures.PlacementFixture())
# Before the resource tracker is "initialized", we shouldn't have
# any compute nodes or stats in the RT's cache...
self.assertEqual(0, len(self.rt.compute_nodes))
self.assertEqual(0, len(self.rt.stats))
# Now "initialize" the resource tracker. This is what
# nova.compute.manager.ComputeManager does when "initializing" the
# nova-compute service. Do this in a predictable order so cn1 is
# first and cn3 is last.
for cn in sorted(self.COMPUTE_NODE_FIXTURES.values(),
key=lambda _cn: _cn.hypervisor_hostname):
nodename = cn.hypervisor_hostname
# Fake that each compute node has unique extra specs stats and
# the RT makes sure those are unique per node.
stats = {'node:%s' % nodename: nodename}
self.driver_mock.get_available_resource.return_value = {
'hypervisor_hostname': nodename,
'hypervisor_type': 'ironic',
'hypervisor_version': 0,
'vcpus': cn.vcpus,
'vcpus_used': cn.vcpus_used,
'memory_mb': cn.memory_mb,
'memory_mb_used': cn.memory_mb_used,
'local_gb': cn.local_gb,
'local_gb_used': cn.local_gb_used,
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
'stats': stats,
}
self.rt.update_available_resource(self.ctx, nodename)
self.assertEqual(3, len(self.rt.compute_nodes))
self.assertEqual(3, len(self.rt.stats))
def _assert_stats():
# Make sure each compute node has a unique set of stats and
# they don't accumulate across nodes.
for _cn in self.rt.compute_nodes.values():
node_stats_key = 'node:%s' % _cn.hypervisor_hostname
self.assertIn(node_stats_key, _cn.stats)
node_stat_count = 0
for stat in _cn.stats:
if stat.startswith('node:'):
node_stat_count += 1
self.assertEqual(1, node_stat_count, _cn.stats)
_assert_stats()
# Now "spawn" an instance to the first compute node by calling the
# RT's instance_claim().
cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
cn1_nodename = cn1_obj.hypervisor_hostname
inst = self.instances[uuids.instance1]
with self.rt.instance_claim(self.ctx, inst, cn1_nodename, {}):
_assert_stats()
class TestUpdateComputeNodeReservedAndAllocationRatio(
integrated_helpers.ProviderUsageBaseTestCase):
"""Tests reflecting reserved and allocation ratio inventory from
nova-compute to placement
"""
compute_driver = 'fake.FakeDriver'
@staticmethod
def _get_reserved_host_values_from_config():
return {
'VCPU': CONF.reserved_host_cpus,
'MEMORY_MB': CONF.reserved_host_memory_mb,
'DISK_GB': compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb)
}
def _assert_reserved_inventory(self, inventories):
reserved = self._get_reserved_host_values_from_config()
for rc, res in reserved.items():
self.assertIn('reserved', inventories[rc])
self.assertEqual(res, inventories[rc]['reserved'],
'Unexpected resource provider inventory '
'reserved value for %s' % rc)
def test_update_inventory_reserved_and_allocation_ratio_from_conf(self):
# Start a compute service which should create a corresponding resource
# provider in the placement service.
compute_service = self._start_compute('fake-host')
# Assert the compute node resource provider exists in placement with
# the default reserved and allocation ratio values from config.
rp_uuid = self._get_provider_uuid_by_host('fake-host')
inventories = self._get_provider_inventory(rp_uuid)
# The default allocation ratio config values are all 0.0 and get
# defaulted to real values in the ComputeNode object, so we need to
# check our defaults against what is in the ComputeNode object.
ctxt = context.get_admin_context()
# Note that the CellDatabases fixture usage means we don't need to
# target the context to cell1 even though the compute_nodes table is
# in the cell1 database.
cn = objects.ComputeNode.get_by_uuid(ctxt, rp_uuid)
ratios = {
'VCPU': cn.cpu_allocation_ratio,
'MEMORY_MB': cn.ram_allocation_ratio,
'DISK_GB': cn.disk_allocation_ratio
}
for rc, ratio in ratios.items():
self.assertIn(rc, inventories)
self.assertIn('allocation_ratio', inventories[rc])
self.assertEqual(ratio, inventories[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
self._assert_reserved_inventory(inventories)
# Now change the configuration values, restart the compute service,
# and ensure the changes are reflected in the resource provider
# inventory records. We use 2.0 since disk_allocation_ratio defaults
# to 1.0.
self.flags(cpu_allocation_ratio=2.0)
self.flags(ram_allocation_ratio=2.0)
self.flags(disk_allocation_ratio=2.0)
self.flags(reserved_host_cpus=2)
self.flags(reserved_host_memory_mb=1024)
self.flags(reserved_host_disk_mb=8192)
self.restart_compute_service(compute_service)
# The ratios should now come from config overrides rather than the
# defaults in the ComputeNode object.
ratios = {
'VCPU': CONF.cpu_allocation_ratio,
'MEMORY_MB': CONF.ram_allocation_ratio,
'DISK_GB': CONF.disk_allocation_ratio
}
attr_map = {
'VCPU': 'cpu',
'MEMORY_MB': 'ram',
'DISK_GB': 'disk',
}
cn = objects.ComputeNode.get_by_uuid(ctxt, rp_uuid)
inventories = self._get_provider_inventory(rp_uuid)
for rc, ratio in ratios.items():
# Make sure the config is what we expect.
self.assertEqual(2.0, ratio,
'Unexpected config allocation ratio for %s' % rc)
# Make sure the values in the DB are updated.
self.assertEqual(
ratio, getattr(cn, '%s_allocation_ratio' % attr_map[rc]),
'Unexpected ComputeNode allocation ratio for %s' % rc)
# Make sure the values in placement are updated.
self.assertEqual(ratio, inventories[rc]['allocation_ratio'],
'Unexpected resource provider inventory '
'allocation ratio for %s' % rc)
# The reserved host values should also come from config.
self._assert_reserved_inventory(inventories)
def test_allocation_ratio_create_with_initial_allocation_ratio(self):
# The xxx_allocation_ratio is set to None by default, and we use
# 16.1/1.6/1.1 since disk_allocation_ratio defaults to 16.0/1.5/1.0.
self.flags(initial_cpu_allocation_ratio=16.1)
self.flags(initial_ram_allocation_ratio=1.6)
self.flags(initial_disk_allocation_ratio=1.1)
# Start a compute service which should create a corresponding resource
# provider in the placement service.
self._start_compute('fake-host')
# Assert the compute node resource provider exists in placement with
# the default reserved and allocation ratio values from config.
rp_uuid = self._get_provider_uuid_by_host('fake-host')
inventories = self._get_provider_inventory(rp_uuid)
ctxt = context.get_admin_context()
# Note that the CellDatabases fixture usage means we don't need to
# target the context to cell1 even though the compute_nodes table is
# in the cell1 database.
cn = objects.ComputeNode.get_by_uuid(ctxt, rp_uuid)
ratios = {
'VCPU': cn.cpu_allocation_ratio,
'MEMORY_MB': cn.ram_allocation_ratio,
'DISK_GB': cn.disk_allocation_ratio
}
initial_ratio_conf = {
'VCPU': CONF.initial_cpu_allocation_ratio,
'MEMORY_MB': CONF.initial_ram_allocation_ratio,
'DISK_GB': CONF.initial_disk_allocation_ratio
}
for rc, ratio in ratios.items():
self.assertIn(rc, inventories)
self.assertIn('allocation_ratio', inventories[rc])
# Check the allocation_ratio values come from the new
# CONF.initial_xxx_allocation_ratio
self.assertEqual(initial_ratio_conf[rc], ratio,
'Unexpected allocation ratio for %s' % rc)
# Check the initial allocation ratio is updated to inventories
self.assertEqual(ratio, inventories[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
def test_allocation_ratio_overwritten_from_config(self):
# NOTE(yikun): This test case includes below step:
# 1. Overwrite the allocation_ratio via the placement API directly -
# run the RT.update_available_resource periodic and assert the
# allocation ratios are not overwritten from config.
#
# 2. Set the CONF.*_allocation_ratio, run the periodic, and assert
# that the config overwrites what was set via the placement API.
compute_service = self._start_compute('fake-host')
rp_uuid = self._get_provider_uuid_by_host('fake-host')
ctxt = context.get_admin_context()
rt = compute_service.manager.rt
inv = self.placement.get(
'/resource_providers/%s/inventories' % rp_uuid).body
ratios = {'VCPU': 16.1, 'MEMORY_MB': 1.6, 'DISK_GB': 1.1}
for rc, ratio in ratios.items():
inv['inventories'][rc]['allocation_ratio'] = ratio
# Overwrite the allocation_ratio via the placement API directly
self._update_inventory(rp_uuid, inv)
inv = self._get_provider_inventory(rp_uuid)
# Check inventories is updated to ratios
for rc, ratio in ratios.items():
self.assertIn(rc, inv)
self.assertIn('allocation_ratio', inv[rc])
self.assertEqual(ratio, inv[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
# Make sure xxx_allocation_ratio is None by default
self.assertIsNone(CONF.cpu_allocation_ratio)
self.assertIsNone(CONF.ram_allocation_ratio)
self.assertIsNone(CONF.disk_allocation_ratio)
# run the RT.update_available_resource periodic
rt.update_available_resource(ctxt, 'fake-host')
# assert the allocation ratios are not overwritten from config
inv = self._get_provider_inventory(rp_uuid)
for rc, ratio in ratios.items():
self.assertIn(rc, inv)
self.assertIn('allocation_ratio', inv[rc])
self.assertEqual(ratio, inv[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
# set the CONF.*_allocation_ratio
self.flags(cpu_allocation_ratio=15.9)
self.flags(ram_allocation_ratio=1.4)
self.flags(disk_allocation_ratio=0.9)
# run the RT.update_available_resource periodic
rt.update_available_resource(ctxt, 'fake-host')
inv = self._get_provider_inventory(rp_uuid)
ratios = {
'VCPU': CONF.cpu_allocation_ratio,
'MEMORY_MB': CONF.ram_allocation_ratio,
'DISK_GB': CONF.disk_allocation_ratio
}
# assert that the config overwrites what was set via the placement API.
for rc, ratio in ratios.items():
self.assertIn(rc, inv)
self.assertIn('allocation_ratio', inv[rc])
self.assertEqual(ratio, inv[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
class TestProviderConfig(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests for adding inventories and traits to resource providers using
provider config files described in spec provider-config-file.
"""
compute_driver = 'fake.FakeDriver'
BASE_CONFIG = {
"meta": {
"schema_version": "1.0"
},
"providers": []
}
EMPTY_PROVIDER = {
"identification": {
},
"inventories": {
"additional": []
},
"traits": {
"additional": []
}
}
def setUp(self):
super().setUp()
# make a new temp dir and configure nova-compute to look for provider
# config files there
self.pconf_loc = self.useFixture(fixtures.TempDir()).path
self.flags(provider_config_location=self.pconf_loc, group='compute')
def _create_config_entry(self, id_value, id_method="uuid", cfg_file=None):
"""Adds an entry in the config file for the provider using the
requested identification method [uuid, name] with additional traits
and inventories.
"""
# if an existing config file was not passed, create a new one
if not cfg_file:
cfg_file = copy.deepcopy(self.BASE_CONFIG)
provider = copy.deepcopy(self.EMPTY_PROVIDER)
# create identification method
provider['identification'] = {id_method: id_value}
# create entries for additional traits and inventories using values
# unique to this provider entry
provider['inventories']['additional'].append({
orc.normalize_name(id_value): {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
})
provider['traits']['additional'].append(
os_traits.normalize_name(id_value))
# edit cfg_file in place, but return it in case this is the first call
cfg_file['providers'].append(provider)
return cfg_file
def _assert_inventory_and_traits(self, provider, config):
"""Asserts that the inventory and traits on the provider include those
defined in the provided config file. If the provider was identified
explicitly, also asserts that the $COMPUTE_NODE values are not included
on the provider.
Testing for specific inventory values is done in depth in unit tests
so here we are just checking for keys.
"""
# retrieve actual inventory and traits for the provider
actual_inventory = list(
self._get_provider_inventory(provider['uuid']).keys())
actual_traits = self._get_provider_traits(provider['uuid'])
# search config file data for expected inventory and traits
# since we also want to check for unexpected inventory,
# we also need to track compute node entries
expected_inventory, expected_traits = [], []
cn_expected_inventory, cn_expected_traits = [], []
for p_config in config['providers']:
_pid = p_config['identification']
# check for explicit uuid/name match
if _pid.get("uuid") == provider['uuid'] \
or _pid.get("name") == provider['name']:
expected_inventory = list(p_config.get(
"inventories", {}).get("additional", [])[0].keys())
expected_traits = p_config.get(
"traits", {}).get("additional", [])
# check for uuid==$COMPUTE_NODE match
elif _pid.get("uuid") == "$COMPUTE_NODE":
cn_expected_inventory = list(p_config.get(
"inventories", {}).get("additional", [])[0].keys())
cn_expected_traits = p_config.get(
"traits", {}).get("additional", [])
# if expected inventory or traits are found,
# test that they all exist in the actual inventory/traits
missing_inventory, missing_traits = None, None
unexpected_inventory, unexpected_traits = None, None
if expected_inventory or expected_traits:
missing_inventory = [key for key in expected_inventory
if key not in actual_inventory]
missing_traits = [key for key in expected_traits
if key not in actual_traits]
# if $COMPUTE_NODE values are also found,
# test that they do not exist
if cn_expected_inventory or cn_expected_traits:
unexpected_inventory = [
key for key in actual_inventory
if key in cn_expected_inventory and key
not in expected_inventory]
missing_traits = [
trait for trait in cn_expected_traits
if trait in actual_traits and trait
not in expected_traits]
# if no explicit values were found, test for $COMPUTE_NODE values
elif cn_expected_inventory or cn_expected_traits:
missing_inventory = [key for key in cn_expected_inventory
if key not in actual_inventory]
missing_traits = [trait for trait in cn_expected_traits
if trait not in actual_traits]
# if no values were found, the test is broken
else:
self.fail("No expected values were found, the test is broken.")
self.assertFalse(missing_inventory,
msg="Missing inventory: %s" % missing_inventory)
self.assertFalse(unexpected_inventory,
msg="Unexpected inventory: %s" % unexpected_inventory)
self.assertFalse(missing_traits,
msg="Missing traits: %s" % missing_traits)
self.assertFalse(unexpected_traits,
msg="Unexpected traits: %s" % unexpected_traits)
def _place_config_file(self, file_name, file_data):
"""Creates a file in the provider config directory using file_name and
dumps file_data to it in yaml format.
NOTE: The file name should end in ".yaml" for Nova to recognize and
load it.
"""
with open(os.path.join(self.pconf_loc, file_name), "w") as open_file:
yaml.dump(file_data, open_file)
def test_single_config_file(self):
"""Tests that additional inventories and traits defined for a provider
are applied to the correct provider.
"""
# create a config file with both explicit name and uuid=$COMPUTE_NODE
config = self._create_config_entry("fake-host", id_method="name")
self._place_config_file("provider_config1.yaml", config)
# start nova-compute
self._start_compute("fake-host")
# test that only inventory from the explicit entry exists
provider = self._get_resource_provider_by_uuid(
self._get_provider_uuid_by_host("fake-host"))
self._assert_inventory_and_traits(provider, config)
def test_multiple_config_files(self):
"""This performs the same test as test_single_config_file but splits
the configurations into separate files.
"""
# create a config file with uuid=$COMPUTE_NODE
config1 = self._create_config_entry("$COMPUTE_NODE", id_method="uuid")
self._place_config_file("provider_config1.yaml", config1)
# create a second config file with explicit name
config2 = self._create_config_entry("fake-host", id_method="name")
self._place_config_file("provider_config2.yaml", config2)
# start nova-compute
self._start_compute("fake-host")
# test that only inventory from the explicit entry exists
provider1 = self._get_resource_provider_by_uuid(
self._get_provider_uuid_by_host("fake-host"))
self._assert_inventory_and_traits(provider1, config2)
def test_multiple_compute_nodes(self):
"""This test mimics an ironic-like environment with multiple compute
nodes. Some nodes will be updated with the uuid=$COMPUTE_NODE provider
config entries and others will use explicit name matching.
"""
# get some uuids to use as compute host names
provider_names = [uuids.cn2, uuids.cn3, uuids.cn4,
uuids.cn5, uuids.cn6, uuids.cn7]
# create config file with $COMPUTE_NODE entry
config = self._create_config_entry("$COMPUTE_NODE", id_method="uuid")
# add three explicit name entries
for provider_name in provider_names[-3:]:
self._create_config_entry(provider_name, id_method="name",
cfg_file=config)
self._place_config_file("provider.yaml", config)
# start the compute services
for provider_name in provider_names:
self._start_compute(provider_name)
# test for expected inventory and traits on each provider
for provider_name in provider_names:
self._assert_inventory_and_traits(
self._get_resource_provider_by_uuid(
self._get_provider_uuid_by_host(provider_name)),
config)
def test_end_to_end(self):
"""This test emulates a full end to end test showing that without this
feature a vm cannot be spawning using a custom trait and then start a
compute service that provides that trait.
"""
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.GlanceFixture(self))
# Start nova services.
self.api = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1')).admin_api
self.api.microversion = 'latest'
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.start_service('conductor')
# start nova-compute that will not have the additional trait.
self._start_compute("fake-host-1")
node_name = "fake-host-2"
# create a config file with explicit name
provider_config = self._create_config_entry(
node_name, id_method="name")
self._place_config_file("provider_config.yaml", provider_config)
self._create_flavor(
name='CUSTOM_Flavor', id=42, vcpu=4, memory_mb=4096,
disk=1024, swap=0, extra_spec={
f"trait:{os_traits.normalize_name(node_name)}": "required"
})
self._create_server(
flavor_id=42, expected_state='ERROR',
networks=[{'port': self.neutron.port_1['id']}])
# start compute node that will report the custom trait.
self._start_compute("fake-host-2")
self._create_server(
flavor_id=42, expected_state='ACTIVE',
networks=[{'port': self.neutron.port_1['id']}])
| apache-2.0 |
yehudagale/fuzzyJoiner | old/TripletLossFacenetLSTM-angular.py | 1 | 21847 | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# """
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=True
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
# if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=modified_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| epl-1.0 |
AsylumConnect/asylum-connect-catalog | app/__init__.py | 2 | 2981 | import os
from flask import Flask
from flask.ext.mail import Mail
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.assets import Environment
from flask.ext.wtf import CsrfProtect
from flask.ext.compress import Compress
from flask.ext.rq import RQ
from config import config
from assets import (app_css, app_js, vendor_css, vendor_js, asylum_css,
asylum_js, asylum_scss)
basedir = os.path.abspath(os.path.dirname(__file__))
mail = Mail()
db = SQLAlchemy()
csrf = CsrfProtect()
compress = Compress()
# Set up Flask-Login
login_manager = LoginManager()
# TODO: Ideally this should be strong, but that led to bugs. Once this is
# fixed, switch protection mode back to 'strong'
login_manager.session_protection = 'basic'
login_manager.login_view = 'account.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# Set up extensions
mail.init_app(app)
db.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
compress.init_app(app)
RQ(app)
# Register Jinja template functions
from utils import register_template_utils
register_template_utils(app)
# Set up asset pipeline
assets_env = Environment(app)
dirs = ['assets/styles', 'assets/scripts']
for path in dirs:
assets_env.append_path(os.path.join(basedir, path))
assets_env.url_expire = True
assets_env.register('app_css', app_css)
assets_env.register('app_js', app_js)
assets_env.register('vendor_css', vendor_css)
assets_env.register('vendor_js', vendor_js)
assets_env.register('asylum_css', asylum_css)
assets_env.register('asylum_scss', asylum_scss)
assets_env.register('asylum_js', asylum_js)
# Configure SSL if platform supports it
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask.ext.sslify import SSLify
SSLify(app)
# Create app blueprints
from main import main as main_blueprint
app.register_blueprint(main_blueprint)
from account import account as account_blueprint
app.register_blueprint(account_blueprint, url_prefix='/account')
from admin import admin as admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix='/admin')
from bulk_resource import bulk_resource as bulk_resource_blueprint
app.register_blueprint(
bulk_resource_blueprint, url_prefix='/bulk-resource')
from descriptor import descriptor as descriptor_blueprint
app.register_blueprint(descriptor_blueprint, url_prefix='/descriptor')
from single_resource import single_resource as single_resource_blueprint
app.register_blueprint(
single_resource_blueprint, url_prefix='/single-resource')
from suggestion import suggestion as suggestion_blueprint
app.register_blueprint(suggestion_blueprint, url_prefix='/suggestion')
return app
| mit |
hynekcer/django | django/contrib/gis/forms/fields.py | 504 | 4316 | from __future__ import unicode_literals
from django import forms
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.utils.translation import ugettext_lazy as _
from .widgets import OpenLayersWidget
class GeometryField(forms.Field):
"""
This is the basic form field for a Geometry. Any textual input that is
accepted by GEOSGeometry is accepted by this form. By default,
this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON.
"""
widget = OpenLayersWidget
geom_type = 'GEOMETRY'
default_error_messages = {
'required': _('No geometry value provided.'),
'invalid_geom': _('Invalid geometry value.'),
'invalid_geom_type': _('Invalid geometry type.'),
'transform_error': _('An error occurred when transforming the geometry '
'to the SRID of the geometry form field.'),
}
def __init__(self, **kwargs):
# Pop out attributes from the database field, or use sensible
# defaults (e.g., allow None).
self.srid = kwargs.pop('srid', None)
self.geom_type = kwargs.pop('geom_type', self.geom_type)
super(GeometryField, self).__init__(**kwargs)
self.widget.attrs['geom_type'] = self.geom_type
def to_python(self, value):
"""
Transforms the value to a Geometry object.
"""
if value in self.empty_values:
return None
if not isinstance(value, GEOSGeometry):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom')
# Try to set the srid
if not value.srid:
try:
value.srid = self.widget.map_srid
except AttributeError:
if self.srid:
value.srid = self.srid
return value
def clean(self, value):
"""
Validates that the input value can be converted to a Geometry
object (which is returned). A ValidationError is raised if
the value cannot be instantiated as a Geometry.
"""
geom = super(GeometryField, self).clean(value)
if geom is None:
return geom
# Ensuring that the geometry is of the correct type (indicated
# using the OGC string label).
if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY':
raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type')
# Transforming the geometry if the SRID was set.
if self.srid and self.srid != -1 and self.srid != geom.srid:
try:
geom.transform(self.srid)
except GEOSException:
raise forms.ValidationError(
self.error_messages['transform_error'], code='transform_error')
return geom
def has_changed(self, initial, data):
""" Compare geographic value of data with its initial value. """
try:
data = self.to_python(data)
initial = self.to_python(initial)
except forms.ValidationError:
return True
# Only do a geographic comparison if both values are available
if initial and data:
data.transform(initial.srid)
# If the initial value was not added by the browser, the geometry
# provided may be slightly different, the first time it is saved.
# The comparison is done with a very low tolerance.
return not initial.equals_exact(data, tolerance=0.000001)
else:
# Check for change of state of existence
return bool(initial) != bool(data)
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
class PointField(GeometryField):
geom_type = 'POINT'
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
class PolygonField(GeometryField):
geom_type = 'POLYGON'
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
| bsd-3-clause |
cyandterry/Python-Study | Ninja/Leetcode/99_Recover_Binary_Search_Tree.py | 2 | 1570 | """
Two elements of a binary search tree (BST) are swapped by mistake.
Recover the tree without changing its structure.
Note:
A solution using O(n) space is pretty straight forward. Could you devise a constant space solution?
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a tree node
def recoverTree(self, root):
self.last = None
self.wrongs = [None, None]
self.recover_helper(root)
self.wrongs[0].val, self.wrongs[1].val = self.wrongs[1].val, self.wrongs[0].val
return root
def recover_helper(self, root):
if not root:
return
self.recover_helper(root.left)
if self.last and self.last.val > root.val:
if not self.wrongs[0]:
self.wrongs[0] = self.last
self.wrongs[1] = root
self.last = root
self.recover_helper(root.right)
# Note:
# 1. Very normal inorder traversal
# 2. Notice line 32,33. Always update wrongs[1], but wrongs[0] will only update one time
# Reason is image [1,2,3,4,5,6], swap to [1,2,6,4,5,3]
# We will find out 6 > 4 and 5 > 3
# So first time we should update wrongs[0] = last,
# second time we should update wrongs[1] = root
# But line 34 first time we also update wrong[1] because if we have [1,2,4,3,5,6]
# 4 is next to 3 so we need to update them at the same time
| mit |
gautamMalu/rootfs_xen_arndale | usr/lib/python2.7/lib-tk/tkFont.py | 36 | 6094 | # Tkinter font wrapper
#
# written by Fredrik Lundh, February 1998
#
# FIXME: should add 'displayof' option where relevant (actual, families,
# measure, and metrics)
#
__version__ = "0.9"
import Tkinter
# weight/slant
NORMAL = "normal"
ROMAN = "roman"
BOLD = "bold"
ITALIC = "italic"
def nametofont(name):
"""Given the name of a tk named font, returns a Font representation.
"""
return Font(name=name, exists=True)
class Font:
"""Represents a named font.
Constructor options are:
font -- font specifier (name, system font, or (family, size, style)-tuple)
name -- name to use for this font configuration (defaults to a unique name)
exists -- does a named font by this name already exist?
Creates a new named font if False, points to the existing font if True.
Raises _Tkinter.TclError if the assertion is false.
the following are ignored if font is specified:
family -- font 'family', e.g. Courier, Times, Helvetica
size -- font size in points
weight -- font thickness: NORMAL, BOLD
slant -- font slant: ROMAN, ITALIC
underline -- font underlining: false (0), true (1)
overstrike -- font strikeout: false (0), true (1)
"""
def _set(self, kw):
options = []
for k, v in kw.items():
options.append("-"+k)
options.append(str(v))
return tuple(options)
def _get(self, args):
options = []
for k in args:
options.append("-"+k)
return tuple(options)
def _mkdict(self, args):
options = {}
for i in range(0, len(args), 2):
options[args[i][1:]] = args[i+1]
return options
def __init__(self, root=None, font=None, name=None, exists=False, **options):
if not root:
root = Tkinter._default_root
tk = getattr(root, 'tk', root)
if font:
# get actual settings corresponding to the given font
font = tk.splitlist(tk.call("font", "actual", font))
else:
font = self._set(options)
if not name:
name = "font" + str(id(self))
self.name = name
if exists:
self.delete_font = False
# confirm font exists
if self.name not in tk.splitlist(tk.call("font", "names")):
raise Tkinter._tkinter.TclError, "named font %s does not already exist" % (self.name,)
# if font config info supplied, apply it
if font:
tk.call("font", "configure", self.name, *font)
else:
# create new font (raises TclError if the font exists)
tk.call("font", "create", self.name, *font)
self.delete_font = True
self._tk = tk
self._split = tk.splitlist
self._call = tk.call
def __str__(self):
return self.name
def __eq__(self, other):
return isinstance(other, Font) and self.name == other.name
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def __del__(self):
try:
if self.delete_font:
self._call("font", "delete", self.name)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def copy(self):
"Return a distinct copy of the current font"
return Font(self._tk, **self.actual())
def actual(self, option=None):
"Return actual font attributes"
if option:
return self._call("font", "actual", self.name, "-"+option)
else:
return self._mkdict(
self._split(self._call("font", "actual", self.name))
)
def cget(self, option):
"Get font attribute"
return self._call("font", "config", self.name, "-"+option)
def config(self, **options):
"Modify font attributes"
if options:
self._call("font", "config", self.name,
*self._set(options))
else:
return self._mkdict(
self._split(self._call("font", "config", self.name))
)
configure = config
def measure(self, text):
"Return text width"
return int(self._call("font", "measure", self.name, text))
def metrics(self, *options):
"""Return font metrics.
For best performance, create a dummy widget
using this font before calling this method."""
if options:
return int(
self._call("font", "metrics", self.name, self._get(options))
)
else:
res = self._split(self._call("font", "metrics", self.name))
options = {}
for i in range(0, len(res), 2):
options[res[i][1:]] = int(res[i+1])
return options
def families(root=None):
"Get font families (as a tuple)"
if not root:
root = Tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "families"))
def names(root=None):
"Get names of defined fonts (as a tuple)"
if not root:
root = Tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "names"))
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
root = Tkinter.Tk()
# create a font
f = Font(family="times", size=30, weight=NORMAL)
print f.actual()
print f.actual("family")
print f.actual("weight")
print f.config()
print f.cget("family")
print f.cget("weight")
print names()
print f.measure("hello"), f.metrics("linespace")
print f.metrics()
f = Font(font=("Courier", 20, "bold"))
print f.measure("hello"), f.metrics("linespace")
w = Tkinter.Label(root, text="Hello, world", font=f)
w.pack()
w = Tkinter.Button(root, text="Quit!", command=root.destroy)
w.pack()
fb = Font(font=w["font"]).copy()
fb.config(weight=BOLD)
w.config(font=fb)
Tkinter.mainloop()
| gpl-2.0 |
krsjoseph/youtube-dl | youtube_dl/extractor/tvc.py | 113 | 3902 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
)
class TVCIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?tvc\.ru/video/iframe/id/(?P<id>\d+)'
_TEST = {
'url': 'http://www.tvc.ru/video/iframe/id/74622/isPlay/false/id_stat/channel/?acc_video_id=/channel/brand/id/17/show/episodes/episode_id/39702',
'md5': 'bbc5ff531d1e90e856f60fc4b3afd708',
'info_dict': {
'id': '74622',
'ext': 'mp4',
'title': 'События. "События". Эфир от 22.05.2015 14:30',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 1122,
},
}
@classmethod
def _extract_url(cls, webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:http:)?//(?:www\.)?tvc\.ru/video/iframe/id/[^"]+)\1', webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://www.tvc.ru/video/json/id/%s' % video_id, video_id)
formats = []
for info in video.get('path', {}).get('quality', []):
video_url = info.get('url')
if not video_url:
continue
format_id = self._search_regex(
r'cdnvideo/([^/]+?)(?:-[^/]+?)?/', video_url,
'format id', default=None)
formats.append({
'url': video_url,
'format_id': format_id,
'width': int_or_none(info.get('width')),
'height': int_or_none(info.get('height')),
'tbr': int_or_none(info.get('bitrate')),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': video['title'],
'thumbnail': video.get('picture'),
'duration': int_or_none(video.get('duration')),
'formats': formats,
}
class TVCArticleIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?tvc\.ru/(?!video/iframe/id/)(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://www.tvc.ru/channel/brand/id/29/show/episodes/episode_id/39702/',
'info_dict': {
'id': '74622',
'ext': 'mp4',
'title': 'События. "События". Эфир от 22.05.2015 14:30',
'description': 'md5:ad7aa7db22903f983e687b8a3e98c6dd',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 1122,
},
}, {
'url': 'http://www.tvc.ru/news/show/id/69944',
'info_dict': {
'id': '75399',
'ext': 'mp4',
'title': 'Эксперты: в столице встал вопрос о максимально безопасных остановках',
'description': 'md5:f2098f71e21f309e89f69b525fd9846e',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 278,
},
}, {
'url': 'http://www.tvc.ru/channel/brand/id/47/show/episodes#',
'info_dict': {
'id': '2185',
'ext': 'mp4',
'title': 'Ещё не поздно. Эфир от 03.08.2013',
'description': 'md5:51fae9f3f8cfe67abce014e428e5b027',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 3316,
},
}]
def _real_extract(self, url):
webpage = self._download_webpage(url, self._match_id(url))
return {
'_type': 'url_transparent',
'ie_key': 'TVC',
'url': self._og_search_video_url(webpage),
'title': clean_html(self._og_search_title(webpage)),
'description': clean_html(self._og_search_description(webpage)),
'thumbnail': self._og_search_thumbnail(webpage),
}
| unlicense |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Instagram/SearchUsers.py | 5 | 3764 | # -*- coding: utf-8 -*-
###############################################################################
#
# SearchUsers
# Allows you to search for users by name.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchUsers(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchUsers Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchUsers, self).__init__(temboo_session, '/Library/Instagram/SearchUsers')
def new_input_set(self):
return SearchUsersInputSet()
def _make_result_set(self, result, path):
return SearchUsersResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchUsersChoreographyExecution(session, exec_id, path)
class SearchUsersInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchUsers
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((conditional, string) The access token retrieved during the OAuth 2.0 process. Required unless you provide the ClientID.)
"""
super(SearchUsersInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Instagram after registering your application. Required unless you provide an AccessToken.)
"""
super(SearchUsersInputSet, self)._set_input('ClientID', value)
def set_Count(self, value):
"""
Set the value of the Count input for this Choreo. ((optional, integer) The number of results to return.)
"""
super(SearchUsersInputSet, self)._set_input('Count', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((required, string) The query string to use for the user search.)
"""
super(SearchUsersInputSet, self)._set_input('Query', value)
class SearchUsersResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchUsers Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Instagram.)
"""
return self._output.get('Response', None)
class SearchUsersChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchUsersResultSet(response, path)
| apache-2.0 |
LLNL/spack | lib/spack/external/_pytest/pytester.py | 11 | 40498 | """ (disabled by default) support for testing pytest and pytest plugins. """
from __future__ import absolute_import, division, print_function
import codecs
import gc
import os
import platform
import re
import subprocess
import sys
import time
import traceback
from fnmatch import fnmatch
from weakref import WeakKeyDictionary
from _pytest.capture import MultiCapture, SysCapture
from _pytest._code import Source
import py
import pytest
from _pytest.main import Session, EXIT_OK
from _pytest.assertion.rewrite import AssertionRewritingHook
def pytest_addoption(parser):
# group = parser.getgroup("pytester", "pytester (self-tests) options")
parser.addoption('--lsof',
action="store_true", dest="lsof", default=False,
help=("run FD checks if lsof is available"))
parser.addoption('--runpytest', default="inprocess", dest="runpytest",
choices=("inprocess", "subprocess", ),
help=("run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"))
def pytest_configure(config):
# This might be called multiple times. Only take the first.
global _pytest_fullpath
try:
_pytest_fullpath
except NameError:
_pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
_pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
class LsofFdLeakChecker(object):
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith('f') and ("deleted" not in line and
'mem' not in line and "txt" not in line and 'cwd' not in line)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split('\0')
fd = fields[0][1:]
filename = fields[1][1:]
if filename.startswith('/'):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
py.process.cmdexec("lsof -v")
except (py.process.cmdexec.Error, UnicodeDecodeError):
# cmdexec may raise UnicodeDecodeError on Windows systems
# with locale other than english:
# https://bitbucket.org/pytest-dev/py/issues/66
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
error.append("See issue #2366")
item.warn('', "\n".join(error))
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
'python2.7': r'C:\Python27\python.exe',
'python2.6': r'C:\Python26\python.exe',
'python3.1': r'C:\Python31\python.exe',
'python3.2': r'C:\Python32\python.exe',
'python3.3': r'C:\Python33\python.exe',
'python3.4': r'C:\Python34\python.exe',
'python3.5': r'C:\Python35\python.exe',
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
import subprocess
popen = subprocess.Popen([str(executable), "--version"],
universal_newlines=True, stderr=subprocess.PIPE)
out, err = popen.communicate()
if name == "jython":
if not err or "2.5" not in err:
executable = None
if "2.5.2" in err:
executable = None # http://bugs.jython.org/issue1790
elif popen.returncode != 0:
# Handle pyenv's 127.
executable = None
cache[name] = executable
return executable
@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
'pypy', 'pypy3'])
def anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
pytest.skip("no suitable %s found" % (name,))
return executable
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
""" Return a helper which offers a gethookrecorder(hook)
method which returns a HookRecorder instance which helps
to make assertions about called hooks.
"""
return PytestArg(request)
class PytestArg:
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(values):
"""Only return names from iterator values without a leading underscore."""
return [x for x in values if x[0] != "_"]
class ParsedCall:
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d['_name']
return "<ParsedCall %r(**%r)>" % (self._name, d)
class HookRecorder:
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording
each call before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print("CHECKERMATCH", repr(check), "->", call)
else:
print("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % str(x) for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
values = self.getcalls(name)
assert len(values) == 1, (name, values)
return values[0]
# functionality for test reports
def getreports(self,
names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(self, inamepart="",
names="pytest_runtest_logreport pytest_collectreport", when=None):
""" return a testreport whose dotted import path matches """
values = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, 'when', None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
values.append(rep)
if not values:
raise ValueError("could not find test report matching %r: "
"no test reports at all!" % (inamepart,))
if len(values) > 1:
raise ValueError(
"found 2 or more testreports matching %r: %s" % (inamepart, values))
return values[0]
def getfailures(self,
names='pytest_runtest_logreport pytest_collectreport'):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures('pytest_collectreport')
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports(
"pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
@pytest.fixture(name='LineMatcher')
def LineMatcher_fixture(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
class RunResult:
"""The result of running a command.
Attributes:
:ret: The return value.
:outlines: List of lines captured from stdout.
:errlines: List of lines captures from stderr.
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used
``stdout.fnmatch_lines()`` method.
:stderrr: :py:class:`LineMatcher` of stderr.
:duration: Duration in seconds.
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
""" Return a dictionary of outcomestring->num from parsing
the terminal output that the test process produced."""
for line in reversed(self.outlines):
if 'seconds' in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
raise ValueError("Pytest terminal report not found")
def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0):
""" assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run."""
d = self.parseoutcomes()
obtained = {
'passed': d.get('passed', 0),
'skipped': d.get('skipped', 0),
'failed': d.get('failed', 0),
'error': d.get('error', 0),
}
assert obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error)
class Testdir:
"""Temporary test directory with tools to test/run pytest itself.
This is based on the ``tmpdir`` fixture but provides a number of
methods which aid with testing pytest itself. Unless
:py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
current working directory.
Attributes:
:tmpdir: The :py:class:`py.path.local` instance of the temporary
directory.
:plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but
plugins can be added to the list. The type of items to add to
the list depend on the method which uses them so refer to them
for details.
"""
def __init__(self, request, tmpdir_factory):
self.request = request
self._mod_collections = WeakKeyDictionary()
# XXX remove duplication with tmpdir plugin
basetmp = tmpdir_factory.ensuretemp("testdir")
name = request.function.__name__
for i in range(100):
try:
tmpdir = basetmp.mkdir(name + str(i))
except py.error.EEXIST:
continue
break
self.tmpdir = tmpdir
self.plugins = []
self._savesyspath = (list(sys.path), list(sys.meta_path))
self._savemodulekeys = set(sys.modules)
self.chdir() # always chdir
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this
tries to clean this up. It does not remove the temporary
directory however so it can be looked at after the test run
has finished.
"""
sys.path[:], sys.meta_path[:] = self._savesyspath
if hasattr(self, '_olddir'):
self._olddir.chdir()
self.delete_loaded_modules()
def delete_loaded_modules(self):
"""Delete modules that have been loaded during a test.
This allows the interpreter to catch module changes in case
the module is re-imported.
"""
for name in set(sys.modules).difference(self._savemodulekeys):
# some zope modules used by twisted-related tests keeps internal
# state and can't be deleted; we had some trouble in the past
# with zope.interface for example
if not name.startswith("zope"):
del sys.modules[name]
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
assert not hasattr(pluginmanager, "reprec")
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
old = self.tmpdir.chdir()
if not hasattr(self, '_olddir'):
self._olddir = old
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
items = list(kwargs.items())
if args:
source = py.builtin._totext("\n").join(
map(py.builtin._totext, args)) + py.builtin._totext("\n")
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for name, value in items:
p = self.tmpdir.join(name).new(ext=ext)
p.dirpath().ensure_dir()
source = Source(value)
def my_totext(s, encoding="utf-8"):
if py.builtin._isbytes(s):
s = py.builtin._totext(s, encoding=encoding)
return s
source_unicode = "\n".join([my_totext(line) for line in source.lines])
source = py.builtin._totext(source_unicode)
content = source.strip().encode(encoding) # + "\n"
# content = content.rstrip() + "\n"
p.write(content, "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
"""Create a new file in the testdir.
ext: The extension the file should use, including the dot.
E.g. ".py".
args: All args will be treated as strings and joined using
newlines. The result will be written as contents to the
file. The name of the file will be based on the test
function requesting this fixture.
E.g. "testdir.makefile('.txt', 'line1', 'line2')"
kwargs: Each keyword is the name of a file, while the value of
it will be written as contents of the file.
E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile('.ini', tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)['pytest']
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile('.py', args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile('.txt', args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically after the test.
"""
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
# a call to syspathinsert() usually means that the caller
# wants to import some dynamically created files.
# with python3 we thus invalidate import caches.
self._possibly_invalidate_import_caches()
def _possibly_invalidate_import_caches(self):
# invalidate caches if we can (py33 and above)
try:
import importlib
except ImportError:
pass
else:
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)directory with an empty ``__init__.py``
file so that is recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to
create the configuration.
:param arg: A :py:class:`py.path.local` instance of the file.
"""
session = Session(config)
assert '::' not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses
:py:meth:`parseconfigure` to create the (configured) pytest
Config instance.
:param path: A :py:class:`py.path.local` instance of the file.
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of
all the test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (the class which contains the test
method) must provide a ``.getrunner()`` method which should
return a runner which can run the test protocol for a single
item, like e.g. :py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder`
instance for the result.
:param source: The source code of the test module.
:param cmdlineargs: Any extra command line arguments to use.
:return: :py:class:`HookRecorder` instance of the result.
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
return self.inline_run(*values)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Returns a tuple of the collected items and a
:py:class:`HookRecorder` instance.
This runs the :py:func:`pytest.main` function to run all of
pytest inside the test process itself like
:py:meth:`inline_run`. However the return value is a tuple of
the collection items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, **kwargs):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
This runs the :py:func:`pytest.main` function to run all of
pytest inside the test process itself. This means it can
return a :py:class:`HookRecorder` instance which gives more
detailed results from then run then can be done by matching
stdout/stderr from :py:meth:`runpytest`.
:param args: Any command line arguments to pass to
:py:func:`pytest.main`.
:param plugin: (keyword-only) Extra plugin instances the
``pytest.main()`` instance should use.
:return: A :py:class:`HookRecorder` instance.
"""
# When running py.test inline any plugins active in the main
# test process are already imported. So this disables the
# warning which will trigger to say they can no longer be
# re-written, which is fine as they are already re-written.
orig_warn = AssertionRewritingHook._warn_already_imported
def revert():
AssertionRewritingHook._warn_already_imported = orig_warn
self.request.addfinalizer(revert)
AssertionRewritingHook._warn_already_imported = lambda *a: None
rec = []
class Collect:
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
self.delete_loaded_modules()
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec:
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
def runpytest_inprocess(self, *args, **kwargs):
""" Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides. """
if kwargs.get("syspathinsert"):
self.syspathinsert()
now = time.time()
capture = MultiCapture(Capture=SysCapture)
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec:
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec:
ret = 3
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret,
out.split("\n"), err.split("\n"),
time.time() - now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
""" Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = [str(x) for x in args]
for x in args:
if str(x).startswith('--basetemp'):
# print("basedtemp exists: %s" %(args,))
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
# print("added basetemp: %s" %(args,))
return args
def parseconfig(self, *args):
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config
to create a new :py:class:`_pytest.core.PluginManager` and
call the pytest_cmdline_parse hook to create new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin
modules which will be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance
like :py:meth:`parseconfig`, but also calls the
pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs pytest's
collection on the resulting module, returning the test item
for the requested function name.
:param source: The module source.
:param funcname: The name of the test function for which the
Item must be returned.
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "%r item not found in module:\n%s\nitems: %s" % (
funcname, source, items)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs pytest's
collection on the resulting module, returning all test items
contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile`
and then runs the pytest collection on it, returning the
collection node for the test module.
:param source: The source code of the module to collect.
:param configargs: Any extra arguments to pass to
:py:meth:`parseconfigure`.
:param withinit: Whether to also write a ``__init__.py`` file
to the temporary directory to ensure it is a package.
"""
kw = {self.request.function.__name__: Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
self.config = config = self.parseconfigure(path, *configargs)
node = self.getnode(config, path)
return node
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection
node matching the given name.
:param modcol: A module collection node, see
:py:meth:`getmodulecol`.
:param name: The name of the node to return.
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
for colitem in self._mod_collections[modcol]:
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working
directory is the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(filter(None, [
str(os.getcwd()), env.get('PYTHONPATH', '')]))
kw['env'] = env
popen = subprocess.Popen(cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw)
popen.stdin.close()
return popen
def run(self, *cmdargs):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and
stderr.
Returns a :py:class:`RunResult`.
"""
return self._run(*cmdargs)
def _run(self, *cmdargs):
cmdargs = [str(x) for x in cmdargs]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running:", ' '.join(cmdargs))
print(" in:", str(py.path.local()))
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(cmdargs, stdout=f1, stderr=f2,
close_fds=(sys.platform != "win32"))
ret = popen.wait()
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def _getpytestargs(self):
# we cannot use "(sys.executable,script)"
# because on windows the script is e.g. a pytest.exe
return (sys.executable, _pytest_fullpath,) # noqa
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, **kwargs):
"""Run pytest as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added
using the ``-p`` command line option. Addtionally
``--basetemp`` is used put any temporary files and directories
in a numbered directory prefixed with "runpytest-" so they do
not conflict with the normal numberd pytest location for
temporary files and directories.
Returns a :py:class:`RunResult`.
"""
p = py.path.local.make_numbered_dir(prefix="runpytest-",
keep=None, rootdir=self.tmpdir)
args = ('--basetemp=%s' % p, ) + args
# for x in args:
# if '--confcutdir' in str(x):
# break
# else:
# pass
# args = ('--confcutdir=.',) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ('-p', plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run pytest using pexpect.
This makes sure to use the right pytest and sets up the
temporary directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),)
class LineComp:
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
""" assert that lines2 are contained (linearly) in lines1.
return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher:
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing
newlines, i.e. ``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
self._log_output = []
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = Source(lines2)
if isinstance(lines2, Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the
output, in any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or fnmatch(x, line):
self._log("matched: ", repr(line))
break
else:
self._log("line %r not found in output" % line)
raise ValueError(self._log_text)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i + 1:]
raise ValueError("line %r not found in output" % fnline)
def _log(self, *args):
self._log_output.append(' '.join((str(x) for x in args)))
@property
def _log_text(self):
return '\n'.join(self._log_output)
def fnmatch_lines(self, lines2):
"""Search the text for matching lines.
The argument is a list of lines which have to match and can
use glob wildcards. If they do not match an pytest.fail() is
called. The matches and non-matches are also printed on
stdout.
"""
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
self._log("exact match:", repr(line))
break
elif fnmatch(nextline, line):
self._log("fnmatch:", repr(line))
self._log(" with:", repr(nextline))
break
else:
if not nomatchprinted:
self._log("nomatch:", repr(line))
nomatchprinted = True
self._log(" and:", repr(nextline))
extralines.append(nextline)
else:
self._log("remains unmatched: %r" % (line,))
pytest.fail(self._log_text)
| lgpl-2.1 |
lmorchard/django-allauth | allauth/utils.py | 36 | 8688 | import base64
import re
import unicodedata
import json
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import validate_email, ValidationError
from django.core import urlresolvers
from django.contrib.sites.models import Site
from django.db.models import FieldDoesNotExist
from django.db.models.fields import (DateTimeField, DateField,
EmailField, TimeField,
BinaryField)
from django.utils import six, dateparse
from django.utils.six.moves.urllib.parse import urlsplit
from django.core.serializers.json import DjangoJSONEncoder
try:
from django.utils.encoding import force_text, force_bytes
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
import importlib
except:
from django.utils import importlib
def _generate_unique_username_base(txts, regex=None):
username = None
regex = regex or '[^\w\s@+.-]'
for txt in txts:
if not txt:
continue
username = unicodedata.normalize('NFKD', force_text(txt))
username = username.encode('ascii', 'ignore').decode('ascii')
username = force_text(re.sub(regex, '', username).lower())
# Django allows for '@' in usernames in order to accomodate for
# project wanting to use e-mail for username. In allauth we don't
# use this, we already have a proper place for putting e-mail
# addresses (EmailAddress), so let's not use the full e-mail
# address and only take the part leading up to the '@'.
username = username.split('@')[0]
username = username.strip()
username = re.sub('\s+', '_', username)
if username:
break
return username or 'user'
def get_username_max_length():
from .account.app_settings import USER_MODEL_USERNAME_FIELD
if USER_MODEL_USERNAME_FIELD is not None:
User = get_user_model()
max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length
else:
max_length = 0
return max_length
def generate_unique_username(txts, regex=None):
from .account.app_settings import USER_MODEL_USERNAME_FIELD
username = _generate_unique_username_base(txts, regex)
User = get_user_model()
max_length = get_username_max_length()
i = 0
while True:
try:
if i:
pfx = str(i + 1)
else:
pfx = ''
ret = username[0:max_length - len(pfx)] + pfx
query = {USER_MODEL_USERNAME_FIELD + '__iexact': ret}
User.objects.get(**query)
i += 1
except User.DoesNotExist:
return ret
def valid_email_or_none(email):
ret = None
try:
if email:
validate_email(email)
if len(email) <= EmailField().max_length:
ret = email
except ValidationError:
pass
return ret
def email_address_exists(email, exclude_user=None):
from .account import app_settings as account_settings
from .account.models import EmailAddress
emailaddresses = EmailAddress.objects
if exclude_user:
emailaddresses = emailaddresses.exclude(user=exclude_user)
ret = emailaddresses.filter(email__iexact=email).exists()
if not ret:
email_field = account_settings.USER_MODEL_EMAIL_FIELD
if email_field:
users = get_user_model().objects
if exclude_user:
users = users.exclude(pk=exclude_user.pk)
ret = users.filter(**{email_field+'__iexact': email}).exists()
return ret
def import_attribute(path):
assert isinstance(path, six.string_types)
pkg, attr = path.rsplit('.', 1)
ret = getattr(importlib.import_module(pkg), attr)
return ret
def import_callable(path_or_callable):
if not hasattr(path_or_callable, '__call__'):
ret = import_attribute(path_or_callable)
else:
ret = path_or_callable
return ret
try:
from django.contrib.auth import get_user_model
except ImportError:
# To keep compatibility with Django 1.4
def get_user_model():
from . import app_settings
from django.db.models import get_model
try:
app_label, model_name = app_settings.USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the"
" form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model"
" '%s' that has not been installed"
% app_settings.USER_MODEL)
return user_model
def get_current_site(request=None):
"""Wrapper around ``Site.objects.get_current`` to handle ``Site`` lookups
by request in Django >= 1.8.
:param request: optional request object
:type request: :class:`django.http.HttpRequest`
"""
# >= django 1.8
if request and hasattr(Site.objects, '_get_site_by_request'):
site = Site.objects.get_current(request=request)
else:
site = Site.objects.get_current()
return site
def resolve_url(to):
"""
Subset of django.shortcuts.resolve_url (that one is 1.5+)
"""
try:
return urlresolvers.reverse(to)
except urlresolvers.NoReverseMatch:
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
def serialize_instance(instance):
"""
Since Django 1.6 items added to the session are no longer pickled,
but JSON encoded by default. We are storing partially complete models
in the session (user, account, token, ...). We cannot use standard
Django serialization, as these are models are not "complete" yet.
Serialization will start complaining about missing relations et al.
"""
data = {}
for k, v in instance.__dict__.items():
if k.startswith('_') or callable(v):
continue
try:
if isinstance(instance._meta.get_field(k), BinaryField):
v = force_text(base64.b64encode(v))
except FieldDoesNotExist:
pass
data[k] = v
return json.loads(json.dumps(data, cls=DjangoJSONEncoder))
def deserialize_instance(model, data):
ret = model()
for k, v in data.items():
if v is not None:
try:
f = model._meta.get_field(k)
if isinstance(f, DateTimeField):
v = dateparse.parse_datetime(v)
elif isinstance(f, TimeField):
v = dateparse.parse_time(v)
elif isinstance(f, DateField):
v = dateparse.parse_date(v)
elif isinstance(f, BinaryField):
v = force_bytes(
base64.b64decode(
force_bytes(v)))
except FieldDoesNotExist:
pass
setattr(ret, k, v)
return ret
def set_form_field_order(form, fields_order):
if hasattr(form.fields, 'keyOrder'):
form.fields.keyOrder = fields_order
else:
# Python 2.7+
from collections import OrderedDict
assert isinstance(form.fields, OrderedDict)
form.fields = OrderedDict((f, form.fields[f])
for f in fields_order)
def build_absolute_uri(request, location, protocol=None):
"""request.build_absolute_uri() helper
Like request.build_absolute_uri, but gracefully handling
the case where request is None.
"""
from .account import app_settings as account_settings
if request is None:
site = get_current_site()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
uri = '{proto}://{domain}{url}'.format(
proto=account_settings.DEFAULT_HTTP_PROTOCOL,
domain=site.domain,
url=location)
else:
uri = location
else:
uri = request.build_absolute_uri(location)
if protocol:
uri = protocol + ':' + uri.partition(':')[2]
return uri
def get_form_class(forms, form_id, default_form):
form_class = forms.get(form_id, default_form)
if isinstance(form_class, six.string_types):
form_class = import_attribute(form_class)
return form_class
def get_request_param(request, param, default=None):
return request.POST.get(param) or request.GET.get(param, default)
| mit |
samlaudev/LeetCode | Python/Insert Delete GetRandom O(1) - Duplicates allowed/Solution.py | 1 | 2970 | # Problem: Insert Delete GetRandom O(1) - Duplicates allowed
#
# Design a data structure that supports all following operations in average O(1) time.
#
# Note: Duplicate elements are allowed.
# 1. insert(val): Inserts an item val to the collection.
# 2. remove(val): Removes an item val from the collection if present.
# 3. getRandom: Returns a random element from current collection of elements. The probability of each element being returned is linearly related to the number of same value the collection contains.
#
# Example:
#
# // Init an empty collection.
# RandomizedCollection collection = new RandomizedCollection();
#
# // Inserts 1 to the collection. Returns true as the collection did not contain 1.
# collection.insert(1);
#
# // Inserts another 1 to the collection. Returns false as the collection contained 1. Collection now contains [1,1].
# collection.insert(1);
#
# // Inserts 2 to the collection, returns true. Collection now contains [1,1,2].
# collection.insert(2);
#
# // getRandom should return 1 with the probability 2/3, and returns 2 with the probability 1/3.
# collection.getRandom();
#
# // Removes 1 from the collection, returns true. Collection now contains [1,2].
# collection.remove(1);
#
# // getRandom should return 1 and 2 both equally likely.
# collection.getRandom();
#
################################################################################
from random import randint
from collections import defaultdict
class RandomizedCollection(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.__list = []
self.__used = defaultdict(list)
def insert(self, val):
"""
Inserts a value to the collection. Returns true if the collection did not already contain the specified element.
:type val: int
:rtype: bool
"""
has = val in self.__used
self.__list += val,
self.__used[val] += len(self.__list) - 1,
return not has
def remove(self, val):
"""
Removes a value from the collection. Returns true if the collection contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.__used:
return False
last = self.__list.pop()
self.__used[last].remove(len(self.__list))
if val != last:
index = self.__used[val].pop()
self.__used[last].append(index)
self.__list[index] = last
if not self.__used[val]:
del self.__used[val]
return True
def getRandom(self):
"""
Get a random element from the collection.
:rtype: int
"""
return self.__list[randint(0, len(self.__list) - 1)]
# Your RandomizedCollection object will be instantiated and called as such:
# obj = RandomizedCollection()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| mit |
robovm/robovm-studio | python/lib/Lib/sets.py | 132 | 19624 | """Classes to represent arbitrary sets (including sets of sets).
This module implements sets using dictionaries whose values are
ignored. The usual operations (union, intersection, deletion, etc.)
are provided as both methods and operators.
Important: sets are not sequences! While they support 'x in s',
'len(s)', and 'for x in s', none of those operations are unique for
sequences; for example, mappings support all three as well. The
characteristic operation for sequences is subscripting with small
integers: s[i], for i in range(len(s)). Sets don't support
subscripting at all. Also, sequences allow multiple occurrences and
their elements have a definite order; sets on the other hand don't
record multiple occurrences and don't remember the order of element
insertion (which is why they don't support s[i]).
The following classes are provided:
BaseSet -- All the operations common to both mutable and immutable
sets. This is an abstract class, not meant to be directly
instantiated.
Set -- Mutable sets, subclass of BaseSet; not hashable.
ImmutableSet -- Immutable sets, subclass of BaseSet; hashable.
An iterable argument is mandatory to create an ImmutableSet.
_TemporarilyImmutableSet -- A wrapper around a Set, hashable,
giving the same hash value as the immutable set equivalent
would have. Do not use this class directly.
Only hashable objects can be added to a Set. In particular, you cannot
really add a Set as an element to another Set; if you try, what is
actually added is an ImmutableSet built from it (it compares equal to
the one you tried adding).
When you ask if `x in y' where x is a Set and y is a Set or
ImmutableSet, x is wrapped into a _TemporarilyImmutableSet z, and
what's tested is actually `z in y'.
"""
# Code history:
#
# - Greg V. Wilson wrote the first version, using a different approach
# to the mutable/immutable problem, and inheriting from dict.
#
# - Alex Martelli modified Greg's version to implement the current
# Set/ImmutableSet approach, and make the data an attribute.
#
# - Guido van Rossum rewrote much of the code, made some API changes,
# and cleaned up the docstrings.
#
# - Raymond Hettinger added a number of speedups and other
# improvements.
from __future__ import generators
try:
from itertools import ifilter, ifilterfalse
except ImportError:
# Code to make the module run under Py2.2
def ifilter(predicate, iterable):
if predicate is None:
def predicate(x):
return x
for x in iterable:
if predicate(x):
yield x
def ifilterfalse(predicate, iterable):
if predicate is None:
def predicate(x):
return x
for x in iterable:
if not predicate(x):
yield x
try:
True, False
except NameError:
True, False = (0==0, 0!=0)
__all__ = ['BaseSet', 'Set', 'ImmutableSet']
class BaseSet(object):
"""Common base class for mutable and immutable sets."""
__slots__ = ['_data']
# Constructor
def __init__(self):
"""This is an abstract class."""
# Don't call this from a concrete subclass!
if self.__class__ is BaseSet:
raise TypeError, ("BaseSet is an abstract class. "
"Use Set or ImmutableSet.")
# Standard protocols: __len__, __repr__, __str__, __iter__
def __len__(self):
"""Return the number of elements of a set."""
return len(self._data)
def __repr__(self):
"""Return string representation of a set.
This looks like 'Set([<list of elements>])'.
"""
return self._repr()
# __str__ is the same as __repr__
__str__ = __repr__
def _repr(self, sorted=False):
elements = self._data.keys()
if sorted:
elements.sort()
return '%s(%r)' % (self.__class__.__name__, elements)
def __iter__(self):
"""Return an iterator over the elements or a set.
This is the keys iterator for the underlying dict.
"""
return self._data.iterkeys()
# Three-way comparison is not supported. However, because __eq__ is
# tried before __cmp__, if Set x == Set y, x.__eq__(y) returns True and
# then cmp(x, y) returns 0 (Python doesn't actually call __cmp__ in this
# case).
def __cmp__(self, other):
raise TypeError, "can't compare sets using cmp()"
# Equality comparisons using the underlying dicts. Mixed-type comparisons
# are allowed here, where Set == z for non-Set z always returns False,
# and Set != z always True. This allows expressions like "x in y" to
# give the expected result when y is a sequence of mixed types, not
# raising a pointless TypeError just because y contains a Set, or x is
# a Set and y contain's a non-set ("in" invokes only __eq__).
# Subtle: it would be nicer if __eq__ and __ne__ could return
# NotImplemented instead of True or False. Then the other comparand
# would get a chance to determine the result, and if the other comparand
# also returned NotImplemented then it would fall back to object address
# comparison (which would always return False for __eq__ and always
# True for __ne__). However, that doesn't work, because this type
# *also* implements __cmp__: if, e.g., __eq__ returns NotImplemented,
# Python tries __cmp__ next, and the __cmp__ here then raises TypeError.
def __eq__(self, other):
if isinstance(other, BaseSet):
return self._data == other._data
else:
return False
def __ne__(self, other):
if isinstance(other, BaseSet):
return self._data != other._data
else:
return True
# Copying operations
def copy(self):
"""Return a shallow copy of a set."""
result = self.__class__()
result._data.update(self._data)
return result
__copy__ = copy # For the copy module
def __deepcopy__(self, memo):
"""Return a deep copy of a set; used by copy module."""
# This pre-creates the result and inserts it in the memo
# early, in case the deep copy recurses into another reference
# to this same set. A set can't be an element of itself, but
# it can certainly contain an object that has a reference to
# itself.
from copy import deepcopy
result = self.__class__()
memo[id(self)] = result
data = result._data
value = True
for elt in self:
data[deepcopy(elt, memo)] = value
return result
# Standard set operations: union, intersection, both differences.
# Each has an operator version (e.g. __or__, invoked with |) and a
# method version (e.g. union).
# Subtle: Each pair requires distinct code so that the outcome is
# correct when the type of other isn't suitable. For example, if
# we did "union = __or__" instead, then Set().union(3) would return
# NotImplemented instead of raising TypeError (albeit that *why* it
# raises TypeError as-is is also a bit subtle).
def __or__(self, other):
"""Return the union of two sets as a new set.
(I.e. all elements that are in either set.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.union(other)
def union(self, other):
"""Return the union of two sets as a new set.
(I.e. all elements that are in either set.)
"""
result = self.__class__(self)
result._update(other)
return result
def __and__(self, other):
"""Return the intersection of two sets as a new set.
(I.e. all elements that are in both sets.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.intersection(other)
def intersection(self, other):
"""Return the intersection of two sets as a new set.
(I.e. all elements that are in both sets.)
"""
if not isinstance(other, BaseSet):
other = Set(other)
if len(self) <= len(other):
little, big = self, other
else:
little, big = other, self
common = ifilter(big._data.has_key, little)
return self.__class__(common)
def __xor__(self, other):
"""Return the symmetric difference of two sets as a new set.
(I.e. all elements that are in exactly one of the sets.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
(I.e. all elements that are in exactly one of the sets.)
"""
result = self.__class__()
data = result._data
value = True
selfdata = self._data
try:
otherdata = other._data
except AttributeError:
otherdata = Set(other)._data
for elt in ifilterfalse(otherdata.has_key, selfdata):
data[elt] = value
for elt in ifilterfalse(selfdata.has_key, otherdata):
data[elt] = value
return result
def __sub__(self, other):
"""Return the difference of two sets as a new Set.
(I.e. all elements that are in this set and not in the other.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.difference(other)
def difference(self, other):
"""Return the difference of two sets as a new Set.
(I.e. all elements that are in this set and not in the other.)
"""
result = self.__class__()
data = result._data
try:
otherdata = other._data
except AttributeError:
otherdata = Set(other)._data
value = True
for elt in ifilterfalse(otherdata.has_key, self):
data[elt] = value
return result
# Membership test
def __contains__(self, element):
"""Report whether an element is a member of a set.
(Called in response to the expression `element in self'.)
"""
try:
return element in self._data
except TypeError:
transform = getattr(element, "__as_temporarily_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
return transform() in self._data
# Subset and superset test
def issubset(self, other):
"""Report whether another set contains this set."""
self._binary_sanity_check(other)
if len(self) > len(other): # Fast check for obvious cases
return False
for elt in ifilterfalse(other._data.has_key, self):
return False
return True
def issuperset(self, other):
"""Report whether this set contains another set."""
self._binary_sanity_check(other)
if len(self) < len(other): # Fast check for obvious cases
return False
for elt in ifilterfalse(self._data.has_key, other):
return False
return True
# Inequality comparisons using the is-subset relation.
__le__ = issubset
__ge__ = issuperset
def __lt__(self, other):
self._binary_sanity_check(other)
return len(self) < len(other) and self.issubset(other)
def __gt__(self, other):
self._binary_sanity_check(other)
return len(self) > len(other) and self.issuperset(other)
# Assorted helpers
def _binary_sanity_check(self, other):
# Check that the other argument to a binary operation is also
# a set, raising a TypeError otherwise.
if not isinstance(other, BaseSet):
raise TypeError, "Binary operation only permitted between sets"
def _compute_hash(self):
# Calculate hash code for a set by xor'ing the hash codes of
# the elements. This ensures that the hash code does not depend
# on the order in which elements are added to the set. This is
# not called __hash__ because a BaseSet should not be hashable;
# only an ImmutableSet is hashable.
result = 0
for elt in self:
result ^= hash(elt)
return result
def _update(self, iterable):
# The main loop for update() and the subclass __init__() methods.
data = self._data
# Use the fast update() method when a dictionary is available.
if isinstance(iterable, BaseSet):
data.update(iterable._data)
return
value = True
if type(iterable) in (list, tuple, xrange):
# Optimized: we know that __iter__() and next() can't
# raise TypeError, so we can move 'try:' out of the loop.
it = iter(iterable)
while True:
try:
for element in it:
data[element] = value
return
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
data[transform()] = value
else:
# Safe: only catch TypeError where intended
for element in iterable:
try:
data[element] = value
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
data[transform()] = value
class ImmutableSet(BaseSet):
"""Immutable set class."""
__slots__ = ['_hashcode']
# BaseSet + hashing
def __init__(self, iterable=None):
"""Construct an immutable set from an optional iterable."""
self._hashcode = None
self._data = {}
if iterable is not None:
self._update(iterable)
def __hash__(self):
if self._hashcode is None:
self._hashcode = self._compute_hash()
return self._hashcode
def __getstate__(self):
return self._data, self._hashcode
def __setstate__(self, state):
self._data, self._hashcode = state
class Set(BaseSet):
""" Mutable set class."""
__slots__ = []
# BaseSet + operations requiring mutability; no hashing
def __init__(self, iterable=None):
"""Construct a set from an optional iterable."""
self._data = {}
if iterable is not None:
self._update(iterable)
def __getstate__(self):
# getstate's results are ignored if it is not
return self._data,
def __setstate__(self, data):
self._data, = data
def __hash__(self):
"""A Set cannot be hashed."""
# We inherit object.__hash__, so we must deny this explicitly
raise TypeError, "Can't hash a Set, only an ImmutableSet."
# In-place union, intersection, differences.
# Subtle: The xyz_update() functions deliberately return None,
# as do all mutating operations on built-in container types.
# The __xyz__ spellings have to return self, though.
def __ior__(self, other):
"""Update a set with the union of itself and another."""
self._binary_sanity_check(other)
self._data.update(other._data)
return self
def union_update(self, other):
"""Update a set with the union of itself and another."""
self._update(other)
def __iand__(self, other):
"""Update a set with the intersection of itself and another."""
self._binary_sanity_check(other)
self._data = (self & other)._data
return self
def intersection_update(self, other):
"""Update a set with the intersection of itself and another."""
if isinstance(other, BaseSet):
self &= other
else:
self._data = (self.intersection(other))._data
def __ixor__(self, other):
"""Update a set with the symmetric difference of itself and another."""
self._binary_sanity_check(other)
self.symmetric_difference_update(other)
return self
def symmetric_difference_update(self, other):
"""Update a set with the symmetric difference of itself and another."""
data = self._data
value = True
if not isinstance(other, BaseSet):
other = Set(other)
if self is other:
self.clear()
for elt in other:
if elt in data:
del data[elt]
else:
data[elt] = value
def __isub__(self, other):
"""Remove all elements of another set from this set."""
self._binary_sanity_check(other)
self.difference_update(other)
return self
def difference_update(self, other):
"""Remove all elements of another set from this set."""
data = self._data
if not isinstance(other, BaseSet):
other = Set(other)
if self is other:
self.clear()
for elt in ifilter(data.has_key, other):
del data[elt]
# Python dict-like mass mutations: update, clear
def update(self, iterable):
"""Add all values from an iterable (such as a list or file)."""
self._update(iterable)
def clear(self):
"""Remove all elements from this set."""
self._data.clear()
# Single-element mutations: add, remove, discard
def add(self, element):
"""Add an element to a set.
This has no effect if the element is already present.
"""
try:
self._data[element] = True
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
self._data[transform()] = True
def remove(self, element):
"""Remove an element from a set; it must be a member.
If the element is not a member, raise a KeyError.
"""
try:
del self._data[element]
except TypeError:
transform = getattr(element, "__as_temporarily_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
del self._data[transform()]
def discard(self, element):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
try:
self.remove(element)
except KeyError:
pass
def pop(self):
"""Remove and return an arbitrary set element."""
return self._data.popitem()[0]
def __as_immutable__(self):
# Return a copy of self as an immutable set
return ImmutableSet(self)
def __as_temporarily_immutable__(self):
# Return self wrapped in a temporarily immutable set
return _TemporarilyImmutableSet(self)
class _TemporarilyImmutableSet(BaseSet):
# Wrap a mutable set as if it was temporarily immutable.
# This only supplies hashing and equality comparisons.
def __init__(self, set):
self._set = set
self._data = set._data # Needed by ImmutableSet.__eq__()
def __hash__(self):
return self._set._compute_hash()
| apache-2.0 |
NoUsername/PrivateNotesExperimental | notes/views.py | 7 | 4286 | #
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.http import HttpResponseRedirect, HttpResponseForbidden, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator
from django.contrib.auth.models import User
from django.template import RequestContext
from django.db.models import Q
from snowy.notes.utils import note_to_html
from snowy.notes.models import *
def note_index(request, username,
template_name='notes/note_index.html'):
author = get_object_or_404(User, username=username)
enabled = author.is_active
# TODO: retrieve the last open note from the user
last_modified = Note.objects.user_viewable(request.user, author) \
.order_by('-user_modified')
if last_modified.count() > 0:
return HttpResponseRedirect(last_modified[0].get_absolute_url())
# TODO: Instruction page to tell user to either sync or create a new note
return render_to_response(template_name,
{'author': author,
'enabled': enabled,
# Django 1.1 does not support == operator, so
# we need to do the check here and pass it along
'author_is_user': username==request.user.username},
context_instance=RequestContext(request))
def note_list(request, username,
template_name='notes/note_list.html'):
author = get_object_or_404(User, username=username)
notes = Note.objects.user_viewable(request.user, author)
return render_to_response(template_name,
{'notes': notes,
'author': author},
context_instance=RequestContext(request))
def notebook_detail(request, username, notebook_id, slug,
template_name='notes/note_list.html'):
author = get_object_or_404(User, username=username)
notebook = get_object_or_404(NoteTag, pk=notebook_id)
notes = Note.objects.user_viewable(request.user, author).filter(tags__in=[notebook]).order_by("-modified")
return render_to_response(template_name,
{'notes': notes,
'author': author,
'username': username,
'notebook': notebook},
context_instance=RequestContext(request))
def note_detail(request, username, note_id, slug='',
template_name='notes/note_detail.html'):
author = get_object_or_404(User, username=username)
note = get_object_or_404(Note, pk=note_id, author=author)
if request.user != author and note.permissions == 0:
return HttpResponseForbidden()
if note.slug != slug:
return HttpResponseRedirect(note.get_absolute_url())
body = note_to_html(note, author)
try:
# Get the notebook name, if any
notebook = note.tags.get(is_notebook=True)
if notebook:
notebook = notebook.get_name_for_display()
except ObjectDoesNotExist:
notebook = []
return render_to_response(template_name,
{'title': note.title, 'note': note,
'notebook': notebook, 'body': body,
'request': request, 'author': author},
context_instance=RequestContext(request))
| agpl-3.0 |
gsmaxwell/phase_offset_rx | gnuradio-core/src/python/gnuradio/gr/qa_nlog10.py | 19 | 1497 | #!/usr/bin/env python
#
# Copyright 2005,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_nlog10(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
src_data = (-10, 0, 10, 100, 1000, 10000, 100000)
expected_result = (-180, -180, 10, 20, 30, 40, 50)
src = gr.vector_source_f(src_data)
op = gr.nlog10_ff(10)
dst = gr.vector_sink_f()
self.tb.connect (src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual (expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_nlog10, "test_nlog10.xml")
| gpl-3.0 |
grevutiu-gabriel/sympy | sympy/matrices/expressions/tests/test_inverse.py | 92 | 1261 | from sympy.core import symbols
from sympy.matrices.expressions import MatrixSymbol, Inverse
from sympy.matrices import eye, Identity, ShapeError
from sympy.utilities.pytest import raises
from sympy import refine, Q
n, m, l = symbols('n m l', integer=True)
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', m, l)
C = MatrixSymbol('C', n, n)
D = MatrixSymbol('D', n, n)
E = MatrixSymbol('E', m, n)
def test_inverse():
raises(ShapeError, lambda: Inverse(A))
raises(ShapeError, lambda: Inverse(A*B))
assert Inverse(C).shape == (n, n)
assert Inverse(A*E).shape == (n, n)
assert Inverse(E*A).shape == (m, m)
assert Inverse(C).inverse() == C
assert isinstance(Inverse(Inverse(C)), Inverse)
assert C.inverse().inverse() == C
assert C.inverse()*C == Identity(C.rows)
assert Identity(n).inverse() == Identity(n)
assert (3*Identity(n)).inverse() == Identity(n)/3
# Simplifies Muls if possible (i.e. submatrices are square)
assert (C*D).inverse() == D.I*C.I
# But still works when not possible
assert isinstance((A*E).inverse(), Inverse)
assert Inverse(eye(3)).doit() == eye(3)
assert Inverse(eye(3)).doit(deep=False) == eye(3)
def test_refine():
assert refine(C.I, Q.orthogonal(C)) == C.T
| bsd-3-clause |
ArtsiomCh/tensorflow | tensorflow/contrib/learn/python/learn/estimators/composable_model_test.py | 77 | 6392 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ComposableModel classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import composable_model
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
def _iris_input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150, 1], dtype=dtypes.int32)
def _base_model_fn(features, labels, mode, params):
model = params['model']
feature_columns = params['feature_columns']
head = params['head']
if mode == model_fn_lib.ModeKeys.TRAIN:
logits = model.build_model(features, feature_columns, is_training=True)
elif mode == model_fn_lib.ModeKeys.EVAL:
logits = model.build_model(features, feature_columns, is_training=False)
else:
raise NotImplementedError
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
assert global_step
train_step = model.get_train_step(loss)
with ops.control_dependencies(train_step):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def _linear_estimator(head, feature_columns):
return estimator.Estimator(
model_fn=_base_model_fn,
params={
'model':
composable_model.LinearComposableModel(
num_label_columns=head.logits_dimension),
'feature_columns':
feature_columns,
'head':
head
})
def _joint_linear_estimator(head, feature_columns):
return estimator.Estimator(
model_fn=_base_model_fn,
params={
'model':
composable_model.LinearComposableModel(
num_label_columns=head.logits_dimension, _joint_weights=True),
'feature_columns':
feature_columns,
'head':
head
})
def _dnn_estimator(head, feature_columns, hidden_units):
return estimator.Estimator(
model_fn=_base_model_fn,
params={
'model':
composable_model.DNNComposableModel(
num_label_columns=head.logits_dimension,
hidden_units=hidden_units),
'feature_columns':
feature_columns,
'head':
head
})
class ComposableModelTest(test.TestCase):
def testLinearModel(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
head = head_lib.multi_class_head(n_classes=2)
classifier = _linear_estimator(head, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=1000)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=2000)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointLinearModel(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.sparse_column_with_hash_bucket('age', 2)
head = head_lib.multi_class_head(n_classes=2)
classifier = _joint_linear_estimator(head, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=1000)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=2000)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testDNNModel(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
head = head_lib.multi_class_head(n_classes=3)
classifier = _dnn_estimator(
head, feature_columns=cont_features, hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_fn, steps=1000)
classifier.evaluate(input_fn=_iris_input_fn, steps=100)
if __name__ == '__main__':
test.main()
| apache-2.0 |
cancro7/gem5 | util/encode_inst_dep_trace.py | 13 | 8604 | #!/usr/bin/env python
# Copyright (c) 2015 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Radhika Jagtap
#
# This script is used to dump ASCII traces of the instruction dependency
# graph to protobuf format.
#
# The ASCII trace format uses one line per instruction with the format
# instruction sequence number, (optional) pc, (optional) weight, type,
# (optional) flags, (optional) physical addr, (optional) size, comp delay,
# (repeated) order dependencies comma-separated, and (repeated) register
# dependencies comma-separated.
#
# examples:
# seq_num,[pc],[weight,]type,[p_addr,size,flags,]comp_delay:[rob_dep]:
# [reg_dep]
# 1,35652,1,COMP,8500::
# 2,35656,1,COMP,0:,1:
# 3,35660,1,LOAD,1748752,4,74,500:,2:
# 4,35660,1,COMP,0:,3:
# 5,35664,1,COMP,3000::,4
# 6,35666,1,STORE,1748752,4,74,1000:,3:,4,5
# 7,35666,1,COMP,3000::,4
# 8,35670,1,STORE,1748748,4,74,0:,6,3:,7
# 9,35670,1,COMP,500::,7
import protolib
import sys
# Import the packet proto definitions. If they are not found, attempt
# to generate them automatically. This assumes that the script is
# executed from the gem5 root.
try:
import inst_dep_record_pb2
except:
print "Did not find proto definition, attempting to generate"
from subprocess import call
error = call(['protoc', '--python_out=util', '--proto_path=src/proto',
'src/proto/inst_dep_record.proto'])
if not error:
import inst_dep_record_pb2
print "Generated proto definitions for instruction dependency record"
else:
print "Failed to import proto definitions"
exit(-1)
DepRecord = inst_dep_record_pb2.InstDepRecord
def main():
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], " <ASCII input> <protobuf output>"
exit(-1)
# Open the file in write mode
proto_out = open(sys.argv[2], 'wb')
# Open the file in read mode
try:
ascii_in = open(sys.argv[1], 'r')
except IOError:
print "Failed to open ", sys.argv[1], " for reading"
exit(-1)
# Write the magic number in 4-byte Little Endian, similar to what
# is done in src/proto/protoio.cc
proto_out.write("gem5")
# Add the packet header
header = inst_dep_record_pb2.InstDepRecordHeader()
header.obj_id = "Converted ASCII trace " + sys.argv[1]
# Assume the default tick rate
header.tick_freq = 1000000000
header.window_size = 120
protolib.encodeMessage(proto_out, header)
print "Creating enum name,value lookup from proto"
enumValues = {}
for namestr, valdesc in DepRecord.DESCRIPTOR.enum_values_by_name.items():
print '\t', namestr, valdesc.number
enumValues[namestr] = valdesc.number
num_records = 0
# For each line in the ASCII trace, create a packet message and
# write it to the encoded output
for line in ascii_in:
inst_info_str, rob_dep_str, reg_dep_str = (line.strip()).split(':')
inst_info_list = inst_info_str.split(',')
dep_record = DepRecord()
dep_record.seq_num = long(inst_info_list[0])
dep_record.pc = long(inst_info_list[1])
dep_record.weight = long(inst_info_list[2])
# If the type is not one of the enum values, it should be a key error
try:
dep_record.type = enumValues[inst_info_list[3]]
except KeyError:
print "Seq. num", dep_record.seq_num, "has unsupported type", \
inst_info_list[3]
exit(-1)
if dep_record.type == DepRecord.INVALID:
print "Seq. num", dep_record.seq_num, "is of INVALID type"
exit(-1)
# If the instruction is a load or store record the physical addr,
# size flags in addition to recording the computation delay
if dep_record.type in [DepRecord.LOAD, DepRecord.STORE]:
p_addr, size, flags, comp_delay = inst_info_list[4:8]
dep_record.p_addr = long(p_addr)
dep_record.size = int(size)
dep_record.flags = int(flags)
dep_record.comp_delay = long(comp_delay)
else:
comp_delay = inst_info_list[4]
dep_record.comp_delay = long(comp_delay)
# Parse the register and order dependencies both of which are
# repeated fields. An empty list is valid.
rob_deps = rob_dep_str.strip().split(',')
for a_dep in rob_deps:
# if the string is empty, split(',') returns 1 item: ''
# if the string is ",4", split(',') returns 2 items: '', '4'
# long('') gives error, so check if the item is non-empty
if a_dep:
dep_record.rob_dep.append(long(a_dep))
reg_deps = reg_dep_str.split(',')
for a_dep in reg_deps:
if a_dep:
dep_record.reg_dep.append(long(a_dep))
protolib.encodeMessage(proto_out, dep_record)
num_records += 1
print "Converted", num_records, "records."
# We're done
ascii_in.close()
proto_out.close()
if __name__ == "__main__":
main()
| bsd-3-clause |
mcgachey/edx-platform | common/lib/capa/capa/registry.py | 201 | 1921 | """A registry for finding classes based on tags in the class."""
class TagRegistry(object):
"""
A registry mapping tags to handlers.
(A dictionary with some extra error checking.)
"""
def __init__(self):
self._mapping = {}
def register(self, cls):
"""
Register cls as a supported tag type. It is expected to define cls.tags as a list of tags
that it implements.
If an already-registered type has registered one of those tags, will raise ValueError.
If there are no tags in cls.tags, will also raise ValueError.
"""
# Do all checks and complain before changing any state.
if len(cls.tags) == 0:
raise ValueError("No tags specified for class {0}".format(cls.__name__))
for tag in cls.tags:
if tag in self._mapping:
other_cls = self._mapping[tag]
if cls == other_cls:
# registering the same class multiple times seems silly, but ok
continue
raise ValueError(
"Tag {0} already registered by class {1}."
" Can't register for class {2}".format(
tag,
other_cls.__name__,
cls.__name__,
)
)
# Ok, should be good to change state now.
for t in cls.tags:
self._mapping[t] = cls
# Returning the cls means we can use this as a decorator.
return cls
def registered_tags(self):
"""
Get a list of all the tags that have been registered.
"""
return self._mapping.keys()
def get_class_for_tag(self, tag):
"""
For any tag in registered_tags(), returns the corresponding class. Otherwise, will raise
KeyError.
"""
return self._mapping[tag]
| agpl-3.0 |
egabancho/invenio | invenio/legacy/search_engine/__init__.py | 1 | 332135 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301,W0703
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import os
import re
import time
import string
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except ImportError:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from six import iteritems, string_types
## import Invenio stuff:
from invenio.base.globals import cfg
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_SCOAP3_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_BIBSORT_ENABLED, \
CFG_XAPIAN_ENABLED, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_BASE_URL, \
CFG_BIBFORMAT_HIDDEN_TAGS
try:
from invenio.config import CFG_BIBSORT_DEFAULT_FIELD, \
CFG_BIBSORT_DEFAULT_FIELD_ORDER
except ImportError:
CFG_BIBSORT_DEFAULT_FIELD = 'latest first'
CFG_BIBSORT_DEFAULT_FIELD_ORDER = 'd'
from invenio.modules.search.errors import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError
from invenio.legacy.bibrecord import (get_fieldvalues,
get_fieldvalues_alephseq_like)
from .utils import record_exists
from invenio.legacy.bibrecord import create_record, record_xml_output
from invenio.legacy.bibrank.record_sorter import (
get_bibrank_methods,
is_method_valid,
rank_records as rank_records_bibrank,
rank_by_citations)
from invenio.legacy.bibrank.downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.legacy.bibindex.engine_stemmer import stem
from invenio.modules.indexer.tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.modules.indexer.tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.legacy.bibindex.engine_utils import author_name_requires_phrase_search, \
get_field_tags
from invenio.legacy.bibindex.engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.legacy.bibindex.engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.legacy.bibindex.adminlib import get_idx_indexer
from invenio.modules.formatter import format_record, format_records, get_output_format_content_type, create_excel
from invenio.legacy.bibrank.downloads_grapher import create_download_history_graph_and_box
from invenio.modules.knowledge.api import get_kbr_values
from invenio.legacy.miscutil.data_cacher import DataCacher
from invenio.legacy.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.modules.access.control import acc_get_action_id
from invenio.modules.access.local_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.legacy.websearch.adminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from intbitset import intbitset
from invenio.legacy.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.modules.access.engine import acc_authorize_action
from invenio.ext.logging import register_exception
from invenio.ext.cache import cache
from invenio.utils.text import encode_for_xml, wash_for_utf8, strip_accents
from invenio.utils.html import get_mathjax_header
from invenio.utils.html import nmtoken_from_string
from invenio.legacy import bibrecord
import invenio.legacy.template
webstyle_templates = invenio.legacy.template.load('webstyle')
webcomment_templates = invenio.legacy.template.load('webcomment')
websearch_templates = invenio.legacy.template.load('websearch')
from invenio.legacy.bibrank.citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, \
get_refersto_hitset, get_citedby_hitset, get_cited_by_list, \
get_refers_to_list, get_citers_log
from invenio.legacy.bibrank.citation_grapher import create_citation_history_graph_and_box
from invenio.legacy.bibrank.selfcites_searcher import get_self_cited_by_list, \
get_self_cited_by, \
get_self_refers_to_list
from invenio.legacy.dbquery import run_sql, run_sql_with_limit, \
wash_table_column_name, get_table_update_time
from invenio.legacy.webuser import getUid, collect_user_info, session_param_set
from invenio.legacy.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.base.i18n import gettext_set_language
from invenio.legacy.search_engine.query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio.utils import apache
from invenio.legacy.miscutil.solrutils_bibindex_searcher import solr_get_bitset
from invenio.legacy.miscutil.xapianutils_bibindex_searcher import xapian_get_bitset
from invenio.modules.search import services
from invenio.legacy.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.legacy.websearch_external_collections.config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.legacy.bibauthorid.config import LIMIT_TO_COLLECTIONS as BIBAUTHORID_LIMIT_TO_COLLECTIONS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile(r'[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile(r'\sand\s', re.I)
re_logical_or = re.compile(r'\sor\s', re.I)
re_logical_not = re.compile(r'\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile(r"\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile(r"\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + r'\s')
## em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
"search_info" : "I",
"overview" : "O",
"all_portalboxes" : "P",
"te_portalbox" : "Pte",
"tp_portalbox" : "Ptp",
"np_portalbox" : "Pnp",
"ne_portalbox" : "Pne",
"lt_portalbox" : "Plt",
"rt_portalbox" : "Prt",
"search_services": "SER"};
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_slave=True)
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except NameError:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
auths = acc_authorize_action(
user_info,
'viewrestrcoll',
batch_args=True,
collection=restricted_collection_cache.cache
)
for collection, auth in zip(restricted_collection_cache.cache, auths):
if auth[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
if CFG_CERN_SITE:
#the egroup might be in the form egroup@cern.ch
if email_or_group.replace('@cern.ch', ' [CERN]') in user_info['group']:
return True
return False
###FIXME: This method needs to be refactorized
def is_user_viewer_of_record(user_info, recid):
"""
Check if the user is allow to view the record based in the marc tags
inside CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
i.e. his email is inside the 506__m tag or he is inside an e-group listed
in the 506__m tag
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'allow to view' the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
if is_user_viewer_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if not restricted_collections and record_public_p(recid):
## The record is public and not part of any restricted collection
return (0, '')
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in restricted_collections:
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (1, """The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record.""")
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class FieldTokenizerDataCacher(DataCacher):
"""
Provides cache for tokenizer information for fields corresponding to indexes.
This class is not to be used directly; use function
get_field_tokenizer_type() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT fld.code, ind.tokenizer FROM idxINDEX AS ind, field AS fld, idxINDEX_field AS indfld WHERE ind.id = indfld.id_idxINDEX AND indfld.id_field = fld.id""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
field_tokenizer_cache.is_ok_p
except Exception:
field_tokenizer_cache = FieldTokenizerDataCacher()
def get_field_tokenizer_type(field_name, recreate_cache_if_needed=True):
"""Return tokenizer type for given field corresponding to an index if applicable."""
if recreate_cache_if_needed:
field_tokenizer_cache.recreate_cache_if_needed()
tokenizer = None
try:
tokenizer = field_tokenizer_cache.cache[field_name]
except KeyError:
return None
return tokenizer
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
res = run_sql("SELECT name FROM collection")
for name in res:
ret[name[0]] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if coll not in collection_reclist_cache.cache:
return intbitset() # collection does not exist; return empty set
if not collection_reclist_cache.cache[coll]:
# collection's reclist not in the cache yet, so calculate it
# and fill the cache:
reclist = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
if res and res[0][1]:
reclist = intbitset(res[0][1])
collection_reclist_cache.cache[coll] = reclist
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({'value': code,
'text': name
})
else:
formats.append({'value': 'hb',
'text': "HTML brief"
})
return formats
# Flask cache for search results.
from invenio.modules.search.cache import search_results_cache, get_search_results_cache_key
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if c not in ret:
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if f not in ret:
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT name FROM collection ORDER BY name ASC")
for c_name in res:
c_name = c_name[0]
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score ASC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"""
Returns list of whitespace-separated words from pattern, removing any
trailing punctuation-like signs from words in pattern.
"""
words = {}
# clean trailing punctuation signs inside pattern
pattern = re_punctuation_followed_by_space.sub(' ', pattern)
for word in pattern.split():
if word not in words:
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'w'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
write_warning("Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning", req=req)
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and p.find(',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: x.group(1).replace(' ', '__SPACE__'), p)
# wash argument:
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in p.split(): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if pi.find(":") > 0:
fi, pi = pi.split(":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = pi.replace('"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = pi.replace("'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
write_warning("Ignoring standalone wildcard word.", "Warning", req=req)
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
write_warning("Ignoring empty <em>%s</em> search term." % fi, "Warning", req=req)
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o, p, wash_field(f), t] for o, p, f, t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p='', em=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = get_output_format_content_type(of, 'text/xml')
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "intbitset":
req.content_type = "application/octet-stream"
req.send_http_header()
elif of == "recjson":
req.content_type = "application/json"
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
# Add metadata in meta tags for Google scholar-esque harvesting...
# only if we have a detailed meta format and we are looking at a
# single record
if recID != -1 and CFG_WEBSEARCH_DETAILED_META_FORMAT and \
record_exists(recID) == 1:
metaheaderadd += format_record(recID,
CFG_WEBSEARCH_DETAILED_META_FORMAT,
ln=ln)
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_BASE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.', '_').replace('-', '_').replace(':', '_')
body_css_classes.append(css)
## finally, print page header:
if em == '' or EM_REPOSITORY["header"] in em:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
else:
req.content_type = content_type
req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG, em=""):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if of == "intbitset":
return intbitset()
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
if em == "" or EM_REPOSITORY["footer"] in em:
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_add_to_search_pattern(p, p1, f1, m1, op1):
"""Create the search pattern """
if not p1:
return p
init_search_pattern = p
# operation: AND, OR, AND NOT
if op1 == 'a' and p: # we don't want '+' at the begining of the query
op = ' +'
elif op1 == 'o':
op = ' |'
elif op1 == 'n':
op = ' -'
else:
op = ''
# field
field = ''
if f1:
field = f1 + ':'
# type of search
pattern = p1
start = '('
end = ')'
if m1 == 'e':
start = end = '"'
elif m1 == 'p':
start = end = "'"
elif m1 == 'r':
start = end = '/'
else: # m1 == 'o' or m1 =='a'
words = p1.strip().split(' ')
if len(words) == 1:
start = end = ''
pattern = field + words[0]
elif m1 == 'o':
pattern = ' |'.join([field + word for word in words])
else:
pattern = ' '.join([field + word for word in words])
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + start + pattern + end
if not pattern:
return ''
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + field + start + pattern + end
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")),
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")),
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action="", em=""):
"""Create search box for 'search again in the results page' functionality."""
if em != "" and EM_REPOSITORY["search_box"] not in em:
if EM_REPOSITORY["body"] in em and cc != CFG_SITE_NAME:
return '''
<h1 class="headline">%(ccname)s</h1>''' % {'ccname' : cgi.escape(cc), }
else:
return ""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({'value': cx,
'text': cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({'value': CFG_SITE_NAME,
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({'value': '',
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("remove this publisher or journal") or _("remove this collection"))
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({'value': val['value'],
'text': val['text'],
'selected' : (c == re.sub(r"^[\s\-]*", "", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{'value': '',
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("add another publisher or journal") or _("add another collection"))
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{'value': CFG_SITE_NAME,
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower(), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value': code,
'text': name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title and (em=="" or EM_REPOSITORY["body"] in em)
)
def create_exact_author_browse_help_link(p=None, p1=None, p2=None, p3=None, f=None, f1=None, f2=None, f3=None,
rm=None, cc=None, ln=None, jrec=None, rg=None, aas=0, action=""):
"""Creates a link to help switch from author to exact author while browsing"""
if action == 'browse':
search_fields = (f, f1, f2, f3)
if 'author' in search_fields or 'firstauthor' in search_fields:
def add_exact(field):
if field == 'author' or field == 'firstauthor':
return 'exact' + field
return field
fe, f1e, f2e, f3e = [add_exact(field) for field in search_fields]
link_name = f or f1
link_name = (link_name == 'firstauthor' and 'exact first author') or 'exact author'
return websearch_templates.tmpl_exact_author_browse_help_link(p=p, p1=p1, p2=p2, p3=p3, f=fe, f1=f1e, f2=f2e, f3=f3e,
rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg, aas=aas, action=action,
link_name=link_name)
return ""
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value': '',
'text': _(CFG_BIBSORT_DEFAULT_FIELD)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if ci in collection_reclist_cache.cache:
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if cc not in collection_reclist_cache.cache:
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if coll in collection_reclist_cache.cache:
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def get_synonym_terms(term, kbr_name, match_type, use_memoise=False):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@param use_memoise: can we memoise while doing lookups?
@type use_memoise: bool
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_comma']:
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_number']:
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e',
use_memoise=use_memoise):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(ouput_format):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(ouput_format[0:3]).isdigit() and len(ouput_format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
else:
return ouput_format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = p.strip()
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
if not res or not res[0][0]:
return False
try:
return res[0][0].startswith("hostedcollection:")
except IndexError:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
res = run_sql("SELECT name FROM collection WHERE name=%s", (c,))
if res:
return res[0][0]
else:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, coll_type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'coll_type' for collection 'coll'.
If coll_type = '*', both regular and virtual collections will be returned.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
if coll_type == '*':
coll_type_query = " IN ('r', 'v')"
query_params = (coll, )
else:
coll_type_query = "=%s"
query_params = (coll_type, coll)
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type%s AND ccc.name=%%s" % coll_type_query
query += " ORDER BY cc.score ASC"
res = run_sql(query, query_params)
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
class CollectionAllChildrenDataCacher(DataCacher):
"""Cache for all children of a collection (regular & virtual, public & private)"""
def __init__(self):
def cache_filler():
def get_all_children(coll, coll_type='r', public_only=1, d_internal_coll_sons=None):
"""Return a list of all children of type 'coll_type' for collection 'coll'.
If public_only, then return only non-restricted child collections.
If coll_type='*', then return both regular and virtual collections.
d_internal_coll_sons is an internal dictionary used in recursion for
minimizing the number of database calls and should not be used outside
this scope.
"""
if not d_internal_coll_sons:
d_internal_coll_sons = {}
children = []
if coll not in d_internal_coll_sons:
d_internal_coll_sons[coll] = get_coll_sons(coll, coll_type, public_only)
for child in d_internal_coll_sons[coll]:
children.append(child)
children.extend(get_all_children(child, coll_type, public_only, d_internal_coll_sons)[0])
return children, d_internal_coll_sons
ret = {}
d_internal_coll_sons = None
collections = collection_reclist_cache.cache.keys()
for collection in collections:
ret[collection], d_internal_coll_sons = get_all_children(collection, '*', public_only=0, d_internal_coll_sons=d_internal_coll_sons)
return ret
def timestamp_verifier():
return max(get_table_update_time('collection'), get_table_update_time('collection_collection'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_allchildren_cache.is_ok_p:
raise Exception
except Exception:
collection_allchildren_cache = CollectionAllChildrenDataCacher()
def get_collection_allchildren(coll, recreate_cache_if_needed=True):
"""Returns the list of all children of a collection."""
if recreate_cache_if_needed:
collection_allchildren_cache.recreate_cache_if_needed()
if coll not in collection_allchildren_cache.cache:
return [] # collection does not exist; return empty list
return collection_allchildren_cache.cache[coll]
def get_coll_real_descendants(coll, coll_type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score ASC""",
(coll, coll_type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern_phrases(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Returns either biliographic phrases or words indexes."""
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and p.find(":") > 0: # does 'p' contain ':'?
f, p = p.split(":", 1)
## do we search in words indexes?
# FIXME uncomment this
#if not f:
# return browse_in_bibwords(req, p, f)
coll_hitset = intbitset()
for coll_name in colls:
coll_hitset |= get_collection_reclist(coll_name)
index_id = get_index_id_from_field(f)
if index_id != 0:
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll_hitset)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
register_exception(req=req, alert_admin=True)
# probably there are no hits at all:
#req.write(_("No values found."))
return []
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#write_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
nbhits = get_nbhits_in_bibxxx(phrase, f, coll_hitset)
if nbhits > 0:
browsed_phrases_in_colls.append([phrase, nbhits])
return browsed_phrases_in_colls
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Displays either biliographic phrases or words indexes."""
# load the right message language
_ = gettext_set_language(ln)
browsed_phrases_in_colls = browse_pattern_phrases(req, colls, p, f, rg, ln)
if len(browsed_phrases_in_colls) == 0:
req.write(_("No values found."))
return
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
'ap' is also internally used for allowing hidden tag search
(for requests coming from webcoll, for example). In this
case ap=-9
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)), req=req)
write_warning("Search stage 1: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = cfg['CFG_BIBFORMAT_HIDDEN_TAGS']
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if not req and ap == -9: # special request, coming from webcoll
can_see_hidden = True
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for dummy_o, p, f, m in basic_search_units]
if 'fulltext' in fields_to_be_searched:
write_warning(_("Full-text search is currently available for all arXiv papers, many theses, a few report series and some journal articles"), req=req)
elif 'caption' in fields_to_be_searched:
write_warning(_("Warning: figure caption search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") %
{'x_range_from_year': '2008',
'x_range_to_year': '2012'}, req=req)
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
write_warning(_("There is no index %(x_name)s. Searching for %(x_text)s in all fields.", x_name=bsu_f, x_text=bsu_p), req=req)
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
write_warning(_('Instead searching %(x_name)s.', x_name=str([bsu_o, bsu_p, bsu_f, bsu_m])), req=req)
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError as excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term too generic, displaying only partial results..."), req=req)
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
write_warning(_("No phrase index available for fulltext yet, looking for word combination..."), req=req)
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
write_warning("Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(cgi.escape(repr(bsu_p)), repr(myhiddens)), req=req)
display_nearest_terms_box = False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
write_warning("Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset), req=req)
if len(basic_search_unit_hitset) > 0 or \
ap<1 or \
bsu_o in ("|", "-") or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
write_warning("Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)), req=req)
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
write_warning(_("No exact match found for %(x_query1)s, using %(x_query2)s instead...") %
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"}, req=req)
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
write_warning("Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])), req=req)
write_warning("Search stage 2: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(this_unit_operation), "Error", req=req)
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
write_warning(text, req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection), req=req)
write_warning("Search stage 3: execution took %.2f seconds." % (t2 - t1), req=req)
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1) but still call it for searches like ('U(1)' | 'U(2)'):
if not re_pattern_parens.search(re_pattern_parens_quotes.sub('_', p)):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
write_warning("Search stage 1: search_pattern_parenthesised() searched %s." % repr(p), req=req)
write_warning("Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result), req=req)
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box = False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
write_warning(_("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."), req=req)
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0, ignore_synonyms=None):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
Parameter 'ignore_synonyms' is a list of terms for which we
should not try to further find a synonym.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
tokenizer = get_field_tokenizer_type(f)
hitset_cjk = intbitset()
if tokenizer == "BibIndexCJKTokenizer":
if is_there_any_CJK_character_in_text(p):
cjk_tok = BibIndexCJKTokenizer()
chars = cjk_tok.tokenize_for_words(p)
for char in chars:
hitset_cjk |= search_unit_in_bibwords(char, f, wl)
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f or 'anyfield'):
if ignore_synonyms is None:
ignore_synonyms = []
ignore_synonyms.append(p)
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][1]):
if p_synonym != p and \
not p_synonym in ignore_synonyms:
hitset_synonyms |= search_unit(p_synonym, f, m, wl,
ignore_synonyms)
## look up hits:
if f == 'fulltext' and get_idx_indexer('fulltext') == 'SOLR' and CFG_SOLR_URL:
# redirect to Solr
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
elif f == 'fulltext' and get_idx_indexer('fulltext') == 'XAPIAN' and CFG_XAPIAN_ENABLED:
# redirect to Xapian
try:
return search_unit_in_xapian(p, f, m)
except:
# There were troubles with getting full-text search
# results from Xapian. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'referstoexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_refersto_excluding_selfcites(p)
elif f == 'cataloguer':
# we are doing search by the cataloguer nickname
hitset = search_unit_in_record_history(p)
elif f == 'rawref':
from invenio.legacy.refextract.api import search_from_reference
field, pattern = search_from_reference(p)
return search_unit(pattern, field)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif f == 'collection':
# we are doing search by the collection name or MARC field
hitset = search_unit_collection(p, m, wl=wl)
elif f == 'tag':
module_found = False
try:
from invenio.modules.tags.search_units import search_unit_in_tags
module_found = True
except:
# WebTag module is disabled, so ignore 'tag' selector
pass
if module_found:
return search_unit_in_tags(p)
elif f == 'citedbyexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_citedby_excluding_selfcites(p)
elif m == 'a' or m == 'r' or f == 'subject':
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
if m == 'a' and index_id in get_idxpair_field_ids():
#for exact match on the admin configured fields we are searching in the pair tables
hitset = search_unit_in_idxpairs(p, f, m, wl)
else:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
# if not hitset and m == 'a' and (p[0] != '%' and p[-1] != '%'):
# #if we have no results by doing exact matching, do partial matching
# #for removing the distinction between simple and double quotes
# hitset = search_unit_in_bibxxx('%' + p + '%', f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
elif p.startswith("citedexcludingselfcites:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:], exclude_selfcites=True)
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
hitset |= hitset_cjk
return hitset
def get_idxpair_field_ids():
"""Returns the list of ids for the fields that idxPAIRS should be used on"""
index_dict = dict(run_sql("SELECT name, id FROM idxINDEX"))
return [index_dict[field] for field in index_dict if field in cfg['CFG_WEBSEARCH_IDXPAIRS_FIELDS']]
def search_unit_in_bibwords(word, f, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
# wash 'word' argument and run query:
if f.endswith('count') and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = word.replace('*', '%') # we now use '*' as the truncation character
words = word.split("->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
# We remove trailing truncation character before stemming
if word0.endswith('%'):
word0 = stem(word0[:-1], stemming_language) + '%'
else:
word0 = stem(word0, stemming_language)
if word1.endswith('%'):
word1 = stem(word1[:-1], stemming_language) + '%'
else:
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f.endswith('count'):
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX,
(word0_washed, word1_washed), wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
# We remove trailing truncation character before stemming
if word.endswith('%'):
word = stem(word[:-1], stemming_language) + '%'
else:
word = stem(word, stemming_language)
if word.find('%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX,
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(wash_index_term(word),))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibwrd)
else:
hitset = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_idxpairs(p, f, search_type, wl=0):
"""Searches for pair 'p' inside idxPAIR table for field 'f' and
returns hitset of recIDs found."""
limit_reached = 0 # flag for knowing if the query limit has been reached
do_exact_search = True # flag to know when it makes sense to try to do exact matching
result_set = intbitset()
#determine the idxPAIR table to read from
index_id = get_index_id_from_field(f)
if not index_id:
return intbitset()
stemming_language = get_index_stemming_language(index_id)
pairs_tokenizer = BibIndexDefaultTokenizer(stemming_language)
idxpair_table_washed = wash_table_column_name("idxPAIR%02dF" % index_id)
if p.startswith("%") and p.endswith("%"):
p = p[1:-1]
original_pattern = p
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
queries_releated_vars = [] # contains tuples of (query_addons, query_params, use_query_limit)
#is it a span query?
ps = p.split("->", 1)
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
#so we are dealing with a span query
pairs_left = pairs_tokenizer.tokenize_for_pairs(ps[0])
pairs_right = pairs_tokenizer.tokenize_for_pairs(ps[1])
if not pairs_left or not pairs_right:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
elif len(pairs_left) != len(pairs_right):
# it is kind of hard to know what the user actually wanted
# we have to do: foo bar baz -> qux xyz, so let's swith to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
elif len(pairs_left) > 1 and \
len(pairs_right) > 1 and \
pairs_left[:-1] != pairs_right[:-1]:
# again we have something like: foo bar baz -> abc xyz qux
# so we'd better switch to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
else:
# finally, we can treat the search using idxPairs
# at this step we have either: foo bar -> abc xyz
# or foo bar abc -> foo bar xyz
queries_releated_vars = [("BETWEEN %s AND %s", (pairs_left[-1], pairs_right[-1]), True)]
for pair in pairs_left[:-1]:# which should be equal with pairs_right[:-1]
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False # no exact search for span queries
elif p.find('%') > -1:
#tokenizing p will remove the '%', so we have to make sure it stays
replacement = 'xxxxxxxxxx' #hopefuly this will not clash with anything in the future
p = string.replace(p, '%', replacement)
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
if string.find(pair, replacement) > -1:
pair = string.replace(pair, replacement, '%') #we replace back the % sign
queries_releated_vars.append(("LIKE %s", (pair, ), True))
else:
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False
else:
#normal query
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
queries_releated_vars.append(("= %s", (pair, ), False))
first_results = 1 # flag to know if it's the first set of results or not
for query_var in queries_releated_vars:
query_addons = query_var[0]
query_params = query_var[1]
use_query_limit = query_var[2]
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params, wildcard_limit=wl) #kwalitee:disable=sql
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params) #kwalitee:disable=sql
if not res:
return intbitset()
for pair, hitlist in res:
hitset_idxpairs = intbitset(hitlist)
if first_results:
result_set = hitset_idxpairs
first_results = 0
else:
result_set.intersection_update(hitset_idxpairs)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(result_set)
# check if we need to eliminate the false positives
if cfg['CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH'] and do_exact_search:
# we need to eliminate the false positives
idxphrase_table_washed = wash_table_column_name("idxPHRASE%02dR" % index_id)
not_exact_search = intbitset()
for recid in result_set:
res = run_sql("SELECT termlist FROM %s WHERE id_bibrec %s" %(idxphrase_table_washed, '=%s'), (recid, )) #kwalitee:disable=sql
if res:
termlist = deserialize_via_marshal(res[0][0])
if not [term for term in termlist if term.lower().find(p.lower()) > -1]:
not_exact_search.add(recid)
else:
not_exact_search.add(recid)
# remove the recs that are false positives from the final result
result_set.difference_update(not_exact_search)
return result_set
def search_unit_in_idxphrases(p, f, search_type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
# detect query type (exact phrase, partial phrase, regexp):
if search_type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = p.replace('*', '%') # we now use '*' as the truncation character
ps = p.split("->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if p.find('%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons), query_params)
# fill the result set:
for dummy_word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibphrase)
else:
hitset = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
hitset = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
return hitset
def search_unit_in_solr(p, f=None, m=None):
"""
Query a Solr index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(f, p)
def search_unit_in_xapian(p, f=None, m=None):
"""
Query a Xapian index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return xapian_get_bitset(f, p)
def search_unit_in_bibrec(datetext1, datetext2, search_type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
hitset = intbitset()
if search_type and search_type.startswith("m"):
search_type = "modification_date"
else:
search_type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (search_type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (search_type, search_type),
(datetext1, datetext2))
for row in res:
hitset += row[0]
return hitset
def search_unit_by_times_cited(p, exclude_selfcites=False):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
#this is sort of stupid but since we may need to
#get the records that do _not_ have cites, we have to
#know the ids of all records, too
#but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = []
if p == 0 or p == "0" or \
p.startswith("0->") or p.endswith("->0"):
allrecs = intbitset(run_sql("SELECT id FROM bibrec"))
return get_records_with_num_cites(numstr, allrecs,
exclude_selfcites=exclude_selfcites)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
return get_refersto_hitset(ahitset)
else:
return intbitset([])
def search_unit_refersto_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citers = intbitset()
citations = get_cited_by_list(ahitset)
selfcitations = get_self_cited_by_list(ahitset)
for cites, selfcites in zip(citations, selfcitations):
# cites is in the form [(citee, citers), ...]
citers += cites[1] - selfcites[1]
return citers
else:
return intbitset([])
def search_unit_in_record_history(query):
"""
Return hitset of recIDs that were modified by the given cataloguer
"""
if query:
try:
cataloguer_name, modification_date = query.split(":")
except ValueError:
cataloguer_name = query
modification_date = ""
if modification_date:
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
modification_date = spires_syntax_converter.convert_date(modification_date)
parts = modification_date.split('->', 1)
if len(parts) > 1:
start_date, end_date = parts
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date>=%s AND job_date<=%s",
(cataloguer_name, start_date, end_date))
else:
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date LIKE %s",
(cataloguer_name, modification_date + '%',))
return intbitset(res)
else:
sql = "SELECT id_bibrec FROM hstRECORD WHERE job_person=%s"
res = intbitset(run_sql(sql, (cataloguer_name,)))
return res
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_citedby_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def search_unit_collection(query, m, wl=None):
"""
Search for records satisfying the query (e.g. collection:"BOOK" or
collection:"Books") and return list of records in the collection.
"""
if len(query):
ahitset = get_collection_reclist(query)
if not ahitset:
return search_unit_in_bibwords(query, 'collection', m, wl=wl)
return ahitset
else:
return intbitset([])
def search_unit_citedby_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citees = intbitset()
references = get_refers_to_list(ahitset)
selfreferences = get_self_refers_to_list(ahitset)
for refs, selfrefs in zip(references, selfreferences):
# refs is in the form [(citer, citees), ...]
citees += refs[1] - selfrefs[1]
return citees
else:
return intbitset([])
def get_records_that_can_be_displayed(user_info,
hitset_in_any_collection,
current_coll=CFG_SITE_NAME,
colls=None,
permitted_restricted_collections=None):
"""
Return records that can be displayed.
"""
records_that_can_be_displayed = intbitset()
if colls is None:
colls = [current_coll]
# let's get the restricted collections the user has rights to view
if permitted_restricted_collections is None:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
if policy == 'ANY':# the user needs to have access to at least one collection that restricts the records
#we need this to be able to remove records that are both in a public and restricted collection
permitted_recids = intbitset()
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection in permitted_restricted_collections:
permitted_recids |= get_collection_reclist(collection)
else:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - (notpermitted_recids - permitted_recids)
else:# the user needs to have access to all collections that restrict a records
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection not in permitted_restricted_collections:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - notpermitted_recids
if records_that_can_be_displayed.is_infinite():
# We should not return infinite results for user.
records_that_can_be_displayed = intbitset()
for coll in colls_to_be_displayed:
records_that_can_be_displayed |= get_collection_reclist(coll)
return records_that_can_be_displayed
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {} # all final results
results_nbhits = 0
# calculate the list of recids (restricted or not) that the user has rights to access and we should display (only those)
if not req or isinstance(req, cStringIO.OutputType): # called from CLI
user_info = {}
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
records_that_can_be_displayed = hitset_in_any_collection
permitted_restricted_collections = []
else:
user_info = collect_user_info(req)
# let's get the restricted collections the user has rights to view
if user_info['guest'] == '1':
## For guest users that are actually authorized to some restricted
## collection (by virtue of the IP address in a FireRole rule)
## we explicitly build the list of permitted_restricted_collections
permitted_restricted_collections = get_permitted_restricted_collections(user_info)
else:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
# let's build the list of the both public and restricted
# child collections of the collection from which the user
# started his/her search. This list of children colls will be
# used in the warning proposing a search in that collections
try:
current_coll = req.argd['cc'] # current_coll: coll from which user started his/her search
except:
from flask import request
current_coll = request.args.get('cc', CFG_SITE_NAME) # current_coll: coll from which user started his/her search
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
records_that_can_be_displayed = get_records_that_can_be_displayed(
user_info,
hitset_in_any_collection,
current_coll,
colls,
permitted_restricted_collections)
for coll in colls_to_be_displayed:
results[coll] = results.get(coll, intbitset()) | (records_that_can_be_displayed & get_collection_reclist(coll))
results_nbhits += len(results[coll])
if results_nbhits == 0:
# no hits found, try to search in Home and restricted and/or hidden collections:
results = {}
results_in_Home = records_that_can_be_displayed & get_collection_reclist(CFG_SITE_NAME)
results_in_restricted_collections = intbitset()
results_in_hidden_collections = intbitset()
for coll in permitted_restricted_collections:
if not get_coll_ancestors(coll): # hidden collection
results_in_hidden_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
else:
results_in_restricted_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
# in this way, we do not count twice, records that are both in Home collection and in a restricted collection
total_results = len(results_in_Home.union(results_in_restricted_collections))
if total_results > 0:
# some hits found in Home and/or restricted collections, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
len_colls_to_display = len(colls_to_be_displayed)
# trim the list of collections to first two, since it might get very large
write_warning(_("No match found in collection %(x_collection)s. Other collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %
{'x_collection': '<em>' +
string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed[:2]], ', ') +
(len_colls_to_display > 2 and ' et al' or '') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': total_results,
'x_url_close': '</a>'}, req=req)
# display the hole list of collections in a comment
if len_colls_to_display > 2:
write_warning("<!--No match found in collection <em>%(x_collection)s</em>.-->" %
{'x_collection': string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed], ', ')},
req=req)
else:
# no hits found, either user is looking for a document and he/she has not rights
# or user is looking for a hidden document:
if of.startswith("h") and display_nearest_terms_box:
if len(results_in_hidden_collections) > 0:
write_warning(_("No public collection matched your query. "
"If you were looking for a hidden document, please type "
"the correct URL for this record."), req=req)
else:
write_warning(_("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."), req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits, req=req)
write_warning("Search stage 4: execution took %.2f seconds." % (t2 - t1), req=req)
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
final_results = {}
for coll in results.keys():
final_results[coll] = results[coll].intersection(hitset)
nb_total += len(final_results[coll])
if nb_total == 0:
if of.startswith("h"):
write_warning(aptext, req=req)
final_results = results_ap
return final_results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:] == '%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto' or f == 'referstoexcludingselfcites':
return _("There are no records referring to %(x_rec)s.", x_rec=cgi.escape(p))
if f == 'cataloguer':
return _("There are no records modified by %(x_rec)s.", x_rec=cgi.escape(p))
if f == 'citedby' or f == 'citedbyexcludingselfcites':
return _("There are no records cited by %(x_rec)s.", x_rec=cgi.escape(p))
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %(x_name)s.",
x_name=('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>'))
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f == 'datecreated' or f == 'datemodified':
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %(x_name)s.",
x_name=('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>'))
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f == 'datecreated' or f == 'datemodified':
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for px, dummy_fx in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %(x_name)s did not match any record. Nearest terms in any collection are:",
x_name=("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>"))
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = [x[0] for x in res_above]
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = [x[0] for x in res_below]
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_bibwords(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:] == '%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except ValueError:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f, in_hitset=None):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_bibwords(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
if in_hitset is None:
nbhits = len(recIDs)
else:
nbhits = len(intbitset(recIDs.keys()).intersection(in_hitset))
return nbhits
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
recID = int(recID)
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': ['Current Price Enquiries', 'Archived Price Enquiries'],
'IT': ['Current Invitation for Tenders', 'Archived Invitation for Tenders'],
'MS': ['Current Market Surveys', 'Archived Market Surveys']}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
for coll_name in FP_collections[coll]:
if recID in get_collection_reclist(coll_name):
return coll_name
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]), "\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
return bibrecord.record_empty(get_record(recID))
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False, em=""):
"""Prints results overview box with links to particular collections below."""
if em != "" and EM_REPOSITORY["overview"] not in em:
return ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, em = ""):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit,
display_body = em == "" or EM_REPOSITORY["body"] in em,
display_add_to_basket = em == "" or EM_REPOSITORY["basket"] in em)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except IndexError:
data_dict_ordered = {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except IndexError:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
res = run_sql("""SELECT m.name, m.definition
FROM bsrMETHOD m, bsrMETHODDATA md
WHERE m.id = md.id_bsrMETHOD""")
return dict(res)
SORTING_METHODS = get_sorting_methods()
CACHE_SORTED_DATA = {}
for sorting_method in SORTING_METHODS:
try:
CACHE_SORTED_DATA[sorting_method].is_ok_p
except KeyError:
CACHE_SORTED_DATA[sorting_method] = BibSortDataCacher(sorting_method)
def get_tags_from_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and (len(sort_field) > 1 and str(sort_field[0:2]).isdigit()):
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, field='', sorting_methods=SORTING_METHODS):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
# Special case: sorting by citations is fast because we store the
# ranking dictionary in memory, so we do not use bibsort buckets.
if CFG_BIBSORT_ENABLED and sorting_methods and rank_method_code != 'citation':
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:', '').strip().lower() == rank_method_code.lower():
solution_recs, solution_scores = \
sort_records_bibsort(req, hitset_global, sort_method,
'', sort_order, verbose, of, ln,
rg, jrec, 'r')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' % [[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return solution_recs, solution_scores, '(', ')', comment
if rank_method_code.lower() == 'citation':
related_to = []
else:
related_to = pattern
solution_recs, solution_scores, prefix, suffix, comment = \
rank_records_bibrank(rank_method_code=rank_method_code,
rank_limit_relevance=rank_limit_relevance,
hitset=hitset_global,
verbose=verbose,
field=field,
related_to=related_to,
rg=rg,
jrec=jrec)
# Solution recs can be None, in case of error or other cases
# which should be all be changed to return an empty list.
if solution_recs and sort_order == 'd':
solution_recs.reverse()
solution_scores.reverse()
return solution_recs, solution_scores, prefix, suffix, comment
def sort_records_latest(recIDs, jrec, rg, sort_order):
if sort_order == 'd':
recIDs.reverse()
return slice_records(recIDs, jrec, rg)
def sort_or_rank_records(req, recIDs, rm, sf, so, sp, p, verbose=0, of='hb',
ln=CFG_SITE_LANG, rg=None, jrec=None, field='',
sorting_methods=SORTING_METHODS):
"""Sort or rank records.
Entry point for deciding to either sort or rank records."""
if rm:
ranking_result = rank_records(req, rm, 0, recIDs, p, verbose, so,
of, ln, rg, jrec, field,
sorting_methods=sorting_methods)
if ranking_result[0]:
return ranking_result[0] # ranked recids
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS):
return sort_records(req, recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
return recIDs.tolist()
def sort_records(req, recIDs, sort_field='', sort_order='a', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sorting_methods=SORTING_METHODS):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
# bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order,
sort_pattern, verbose, of, ln, rg, jrec)
# ignore the use of buckets, use old fashion sorting
use_sorting_buckets = CFG_BIBSORT_ENABLED and sorting_methods
# Default sorting
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, CFG_BIBSORT_DEFAULT_FIELD, sort_field, CFG_BIBSORT_DEFAULT_FIELD_ORDER, verbose, of, ln, rg, jrec)
else:
return sort_records_latest(recIDs, jrec, rg, sort_order)
sort_fields = sort_field.split(",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and
definition.replace('FIELD:', '').strip().lower() == sort_fields[0].lower()) or
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, CFG_BIBSORT_DEFAULT_FIELD, sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
write_warning(_("Sorry, %(x_option)s does not seem to be a valid sort option. The records will not be sorted.", x_option=cgi.escape(error_field)), "Error", req=req)
return slice_records(recIDs, jrec, rg)
elif tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:', '').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
else:
return slice_records(recIDs, jrec, rg)
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=1, sort_or_rank='s', sorting_methods=SORTING_METHODS):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
if not jrec:
jrec = 1
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting (using BibSort cache) by method %s (definition %s)."
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))), req=req)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset()
input_recids = intbitset(recIDs)
CACHE_SORTED_DATA[sort_method].recreate_cache_if_needed()
sort_cache = CACHE_SORTED_DATA[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
write_warning("Not all buckets have been constructed.. switching to old fashion sorting.", req=req)
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field,
sort_order, '', verbose, of, ln, rg,
jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(
input_recids & sort_cache['bucket_data'][bucket_no]
)
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = intbitset()
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
# recid is in buckets, but not in the bsrMETHODDATA,
# maybe because the value has been deleted, but the change has not
# yet been propagated to the buckets
missing_records.add(recid)
# check if there are recids that are not in any bucket -> to be added at
# the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records += input_recids - solution
reverse = sort_order == 'd'
if sort_method.strip().lower() == CFG_BIBSORT_DEFAULT_FIELD and reverse:
# If we want to sort the records on their insertion date, add the
# missing records at the top.
solution = sorted(missing_records, reverse=True) + \
sorted(dict_solution, key=dict_solution.__getitem__, reverse=True)
else:
solution = sorted(dict_solution, key=dict_solution.__getitem__,
reverse=reverse) + sorted(missing_records)
# Only keep records, we are going to display
solution = slice_records(solution, jrec, rg)
if sort_or_rank == 'r':
# We need the recids, with their ranking score
return solution, [dict_solution.get(record, 0) for record in solution]
else:
return solution
def slice_records(recIDs, jrec, rg):
if not jrec:
jrec = 1
if rg:
recIDs = recIDs[jrec-1:jrec-1+rg]
else:
recIDs = recIDs[jrec-1:]
return recIDs
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
## check arguments:
if not sort_field:
return slice_records(recIDs, jrec, rg)
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
write_warning(_("Sorry, sorting is allowed on sets of up to %(x_name)d records only. Using default sort order.", x_name=CFG_WEBSEARCH_NB_RECORDS_TO_SORT), "Warning", req=req)
return slice_records(recIDs, jrec, rg)
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = sort_field.split(',')
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
write_warning(_("Sorry, %(x_name)s does not seem to be a valid sort option. The records will not be sorted.", x_name=cgi.escape(error_field)), "Error", req=req)
return slice_records(recIDs, jrec, rg)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting by tags %s." % cgi.escape(repr(tags)), req=req)
if sort_pattern:
write_warning("Sorting preferentially by %s." % cgi.escape(sort_pattern), req=req)
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-", 1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + ''.join(vals)
else:
# no sort pattern defined, so join them all together
val = ''.join(vals)
val = strip_accents(val.lower()) # sort values regardless of accents and case
if val in recIDs_dict:
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# create output array:
for k in sorted(recIDs_dict.keys()):
recIDs_out.extend(recIDs_dict[k])
# ascending or descending?
if sort_order == 'd':
recIDs_out.reverse()
recIDs = recIDs_out
# return only up to the maximum that we need
return slice_records(recIDs, jrec, rg)
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, format='hb', ot='', ln=CFG_SITE_LANG,
relevances=[], relevances_prologue="(", relevances_epilogue="%%)",
decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True,
print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='',
rm='', em='', nb_found=-1):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if em != "" and EM_REPOSITORY["body"] not in em:
return
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if nb_found == -1:
nb_found = len(recIDs)
if nb_found:
if not rg or rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if len(recIDs) > rg and rg != -9999:
recIDs = slice_records(recIDs, jrec, rg)
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
if ot:
# asked to print some filtered fields only, so call print_record() on the fly:
for recid in recIDs:
x = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(x)
if x:
req.write('\n')
else:
format_records(recIDs,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for recid in recIDs:
x = print_record(recid, format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format.startswith('recjson'):
# we are doing recjson output:
req.write('[')
for idx, recid in enumerate(recIDs):
if idx > 0:
req.write(',')
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
req.write(']')
elif format == 'excel':
create_excel(recIDs=recIDs, req=req, ot=ot, user_info=user_info)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for recid in recIDs:
req.write(print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
if em != "" and EM_REPOSITORY["basket"] not in em:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(ln=ln))
for irec, recid in enumerate(recIDs):
row_number = jrec+irec
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln=ln,
recid=recid,
row_number=row_number,
relevance=relevance,
record=record,
relevances_prologue=relevances_prologue,
relevances_epilogue=relevances_epilogue,
display_add_to_basket=display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln=ln,
display_add_to_basket=display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
referer = user_info.get('referer', '')
for recid in recIDs:
if record_exists(recid) == -1:
write_warning(_("The record has been deleted."), req=req)
merged_recid = get_merged_recid(recid)
if merged_recid:
write_warning(_("The record %(x_rec)d replaces it.", x_rec=merged_recid), req=req)
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_collection_of_a_record(recid, referer, False)),
recid, ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in iteritems(unordered_tabs)]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'],
'%s/%s/%s/%s%s' % (CFG_BASE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln),
tab_id == tab,
unordered_tabs[tab_id]['enabled'])
for (tab_id, dummy_order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] is True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recid, "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recid, ln)
r = calculate_reading_similarity_list(recid, "pageviews")
viewsimilarity = None
if r:
viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recid,
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'citations':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
selfcited = rank_by_citations(get_self_cited_by(recid), verbose=verbose)
selfcited = reversed(selfcited[0])
selfcited = [recid for recid, dummy in selfcited]
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
write_warning("Citation graph debug: " +
str(len(citationhistory)), req=req)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(ln, citationhistory))
# Citation log
entries = get_citers_log(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_log(ln, entries))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recid, 'HDREF', ln=ln, user_info=user_info, verbose=verbose, force_2nd_pass=True))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'keywords':
from invenio.legacy.bibclassify.webinterface import main_page
main_page(req, recid, tabs, ln,
webstyle_templates)
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recid,
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'hepdata':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
include_jquery=True,
include_mathjax=True))
from invenio.utils import hepdata as hepdatautils
from invenio.utils.hepdata import display as hepdatadisplayutils
data = hepdatautils.retrieve_data_for_record(recid)
if data:
content = websearch_templates.tmpl_record_hepdata(data, recid, True)
else:
content = websearch_templates.tmpl_record_no_hepdata()
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(
recid,
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
if record_exists(recid) == 1:
creationdate = get_creation_date(recid)
modificationdate = get_modification_date(recid)
content = print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID=recid,
ln=ln,
format=format,
creationdate=creationdate,
modificationdate=modificationdate,
content=content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recid,
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.modules.comments.api import get_mini_reviews
reviews = get_mini_reviews(recid=recid, ln=ln)
else:
reviews = ''
actions = format_record(recid, 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recid, 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recid,
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for recid in recIDs:
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
write_warning(_("Use different search terms."), req=req)
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe8x'):
prologue = websearch_templates.tmpl_xml_endnote_8x_prologue()
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe8x'):
epilogue = websearch_templates.tmpl_xml_endnote_8x_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
val = value[0][0]
except IndexError:
### In case it does not exist, let's build it!
pass
else:
return deserialize_via_marshal(val)
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d',
sp='', rm='', brief_links=True):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = False
if user_info:
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if format == 'recjson':
import json
from invenio.modules.records.api import get_record as get_recjson
ot = ot if ot and len(ot) else None
return json.dumps(get_recjson(recID).dumps(
keywords=ot, filter_hidden=not can_see_hidden))
_ = gettext_set_language(ln)
# The 'attribute this paper' link is shown only if the session states it should and
# the record is included in the collections to which bibauthorid is limited.
if user_info:
display_claim_this_paper = (user_info.get("precached_viewclaimlink", False) and
recID in intbitset.union(*[get_collection_reclist(x)
for x in BIBAUTHORID_LIMIT_TO_COLLECTIONS]))
else:
display_claim_this_paper = False
can_edit_record = False
if check_user_can_edit_record(user_info, recID):
can_edit_record = True
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (format.lower().startswith('t')
or format.lower().startswith('hm')
or str(format[0:3]).isdigit()
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %(x_rec)d replaces it.", x_rec=merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if brief_links and format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
return out
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1 and not ot:
# record 'recID' is formatted in 'format', and we are not
# asking for field-filtered output; so print it:
out += "%s" % decompress(res[0][0])
elif ot:
# field-filtered output was asked for; print only some fields
record = get_record(recID)
if not can_see_hidden:
for tag in cfg['CFG_BIBFORMAT_HIDDEN_TAGS']:
del record[tag]
ot = list(set(ot) - set(cfg['CFG_BIBFORMAT_HIDDEN_TAGS']))
out += record_xml_output(record, ot)
else:
# record 'recID' is not formatted in 'format' or we ask
# for field-filtered output -- they are not in "bibfmt"
# table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s">%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in cfg['CFG_BIBFORMAT_HIDDEN_TAGS']:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.modules.formatter.utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, str(search_pattern), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri'].lower():
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = ''
try:
snippets = get_pdf_snippets(recID, keywords, user_info)
except:
register_exception()
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except IndexError:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
def clean_dictionary(dictionary, list_of_items):
"""Returns a copy of the dictionary with all the items
in the list_of_items as empty strings"""
out_dictionary = dictionary.copy()
out_dictionary.update((item, '') for item in list_of_items)
return out_dictionary
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=None, sf="", so="a", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="",
wl=0, em=""):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed. (Note that `rg' is ignored in case of `of=id'.)
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found, "intbitset" means to return an intbitset
representation of the recIDs found (no sorting or ranking
will be performed). (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
em - output only part of the page.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results. (Note that `jrec' is ignored
in case of `of=id'.)
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
kwargs = prs_wash_arguments(req=req, cc=cc, c=c, p=p, f=f, rg=rg, sf=sf, so=so, sp=sp, rm=rm, of=of, ot=ot, aas=aas,
p1=p1, f1=f1, m1=m1, op1=op1, p2=p2, f2=f2, m2=m2, op2=op2, p3=p3, f3=f3, m3=m3, sc=sc, jrec=jrec,
recid=recid, recidb=recidb, sysno=sysno, id=id, idb=idb, sysnb=sysnb, action=action, d1=d1,
d1y=d1y, d1m=d1m, d1d=d1d, d2=d2, d2y=d2y, d2m=d2m, d2d=d2d, dt=dt, verbose=verbose, ap=ap, ln=ln, ec=ec,
tab=tab, wl=wl, em=em)
return prs_perform_search(kwargs=kwargs, **kwargs)
def prs_perform_search(kwargs=None, **dummy):
"""Internal call which does the search, it is calling standard Invenio;
Unless you know what you are doing, don't use this call as an API
"""
# separately because we can call it independently
out = prs_wash_arguments_colls(kwargs=kwargs, **kwargs)
if not out:
return out
return prs_search(kwargs=kwargs, **kwargs)
def prs_wash_arguments_colls(kwargs=None, of=None, req=None, cc=None, c=None, sc=None, verbose=None,
aas=None, ln=None, em="", **dummy):
"""
Check and wash collection list argument before we start searching.
If there are troubles, e.g. a collection is not defined, print
warning to the browser.
@return: True if collection list is OK, and various False values
(empty string, empty list) if there was an error.
"""
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
kwargs['colls_to_display'] = colls_to_display
kwargs['colls_to_search'] = colls_to_search
kwargs['hosted_colls'] = hosted_colls
kwargs['wash_colls_debug'] = wash_colls_debug
except InvenioWebSearchUnknownCollectionError as exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
page_end(req, of, ln, em)
return ''
elif of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
page_end(req, of, ln, em)
return ''
else:
page_end(req, of, ln, em)
return ''
return True
def prs_wash_arguments(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="",
sc=0, jrec=0, recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG,
ec=None, tab="", uid=None, wl=0, em="", **dummy):
"""
Sets the (default) values and checks others for the PRS call
"""
# wash output format:
of = wash_output_format(of)
# wash all arguments requiring special care
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
(d1y, d1m, d1d, d2y, d2m, d2d) = map(int, (d1y, d1m, d1d, d2y, d2m, d2d))
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, (cStringIO.OutputType, dict)) \
and getattr(req, 'args', None): # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldcode in fieldargs:
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
if uid is None:
try:
uid = getUid(req)
except:
uid = 0
_ = gettext_set_language(ln)
if aas == 2: #add-to-search interface
p = create_add_to_search_pattern(p, p1, f1, m1, op1)
default_addtosearch_args = websearch_templates.restore_search_args_to_default(['p1', 'f1', 'm1', 'op1'])
if req:
req.argd.update(default_addtosearch_args)
req.argd['p'] = p
kwargs = {'req': req, 'cc': cc, 'c': c, 'p': p, 'f': f, 'rg': rg, 'sf': sf,
'so': so, 'sp': sp, 'rm': rm, 'of': of, 'ot': ot, 'aas': aas,
'p1': p1, 'f1': f1, 'm1': m1, 'op1': op1, 'p2': p2, 'f2': f2,
'm2': m2, 'op2': op2, 'p3': p3, 'f3': f3, 'm3': m3, 'sc': sc,
'jrec': jrec, 'recid': recid, 'recidb': recidb, 'sysno': sysno,
'id': id, 'idb': idb, 'sysnb': sysnb, 'action': action, 'd1': d1,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d, 'd2': d2, 'd2y': d2y,
'd2m': d2m, 'd2d': d2d, 'dt': dt, 'verbose': verbose, 'ap': ap,
'ln': ln, 'ec': ec, 'tab': tab, 'wl': wl, 'em': em,
'datetext1': datetext1, 'datetext2': datetext2, 'uid': uid,
'pl': pl, 'pl_in_url': pl_in_url, '_': _,
'selected_external_collections_infos': None,
}
kwargs.update(**dummy)
return kwargs
def prs_search(kwargs=None, recid=0, req=None, cc=None, p=None, p1=None, p2=None, p3=None,
f=None, ec=None, verbose=None, ln=None, selected_external_collections_infos=None,
action=None, rm=None, of=None, em=None,
**dummy):
"""
This function write various bits into the req object as the search
proceeds (so that pieces of a page are rendered even before the
search ended)
"""
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
output = prs_detailed_record(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif action == "browse":
## 2 - browse needed
of = 'hb'
output = prs_browse(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
output = prs_search_similar_records(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
output = prs_search_cocitedwith(kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3 - common search needed
output = prs_search_common(kwargs=kwargs, **kwargs)
if output is not None:
return output
# External searches
if of.startswith("h"):
if not of in ['hcs', 'hcs2']:
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_detailed_record(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, recid=None, recidb=None,
p=None, verbose=None, tab=None, sf=None, so=None, sp=None, rm=None, ot=None, _=None, em=None,
**dummy):
"""Formats and prints one record"""
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab, em)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of in ["id", "intbitset"]:
result = [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
if of == "intbitset":
return intbitset(result)
else:
return result
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln,
search_pattern=p, verbose=verbose, tab=tab, sf=sf,
so=so, sp=sp, rm=rm, em=em, nb_found=len(range(recid, recidb)))
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
def prs_browse(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
colls_to_search=None, verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
write_warning(create_exact_author_browse_help_link(p, p1, p2, p3, f, f1, f2, f3,
rm, cc, ln, jrec, rg, aas, action),
req=req)
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_search_similar_records(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, em=None,
verbose=None, **dummy):
if req and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recid = p[6:]
if record_exists(recid) != 1:
# record does not exist
if of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
if of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
(results_similar_recIDs,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
results_similar_comments) = \
rank_records_bibrank(rank_method_code=rm,
rank_limit_relevance=0,
hitset=get_collection_reclist(cc),
related_to=[p],
verbose=verbose,
field=f,
rg=rg,
jrec=jrec)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
write_warning(results_similar_comments, req=req)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
elif of == "id":
return results_similar_recIDs
elif of == "intbitset":
return intbitset(results_similar_recIDs)
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning(results_similar_relevances_prologue, req=req)
write_warning(results_similar_relevances_epilogue, req=req)
write_warning(results_similar_comments, req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_cocitedwith(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = [x[0] for x in calculate_co_cited_with_list(int(recID))]
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
elif of == "id":
return results_cocited_recIDs
elif of == "intbitset":
return intbitset(results_cocited_recIDs)
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning("nothing found", req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_hosted_collections(kwargs=None, req=None, of=None, ln=None, _=None, p=None,
p1=None, p2=None, p3=None, hosted_colls=None, f=None,
colls_to_search=None, hosted_colls_actual_or_potential_results_p=None,
verbose=None, **dummy):
hosted_colls_results = hosted_colls_timeouts = hosted_colls_true_results = None
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] is None or result[1] is False:
# these are the searches the returned no or zero results
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned no results" % result[0][1].name, req=req)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]), req=req)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time", req=req)
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
write_warning("Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name, req=req)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections to be searched", req=req)
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
kwargs['hosted_colls_actual_or_potential_results_p'] = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
kwargs['hosted_colls_potential_results_p'] = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
kwargs['only_hosted_colls_actual_or_potential_results_p'] = not colls_to_search and hosted_colls_actual_or_potential_results_p
kwargs['hosted_colls_results'] = hosted_colls_results
kwargs['hosted_colls_timeouts'] = hosted_colls_timeouts
kwargs['hosted_colls_true_results'] = hosted_colls_true_results
def prs_advanced_search(results_in_any_collection, kwargs=None, req=None, of=None,
cc=None, ln=None, _=None, p=None, p1=None, p2=None, p3=None,
f=None, f1=None, m1=None, op1=None, f2=None, m2=None,
op2=None, f3=None, m3=None, ap=None, ec=None,
selected_external_collections_infos=None, verbose=None,
wl=None, em=None, **dummy):
len_results_p1 = 0
len_results_p2 = 0
len_results_p3 = 0
try:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl))
len_results_p1 = len(results_in_any_collection)
if len_results_p1 == 0:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec,
verbose, ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p2 = len(results_tmp)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op1), "Error", req=req)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
if len_results_p2:
#each individual query returned results, but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p3 = len(results_tmp)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op2), "Error", req=req)
if len(results_in_any_collection) == 0 and len_results_p3 and of.startswith("h"):
#each individual query returned results but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
if p2:
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
nearestterms.append((p3, len_results_p3, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p2', 'f2', 'm2'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_simple_search(results_in_any_collection, kwargs=None, req=None, of=None, cc=None, ln=None, p=None, f=None,
p1=None, p2=None, p3=None, ec=None, verbose=None, selected_external_collections_infos=None,
only_hosted_colls_actual_or_potential_results_p=None, query_representation_in_cache=None,
ap=None, hosted_colls_actual_or_potential_results_p=None, wl=None, em=None,
**dummy):
try:
results_in_cache = intbitset().fastload(
search_results_cache.get(query_representation_in_cache))
except:
results_in_cache = None
if results_in_cache is not None:
# query is not in the cache already, so reuse it:
results_in_any_collection.union_update(results_in_cache)
if verbose and of.startswith("h"):
write_warning("Search stage 0: query found in cache, reusing cached results.", req=req)
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln,
display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p,
wl=wl))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_intersect_results_with_collrecs(results_final, results_in_any_collection,
kwargs=None, colls_to_search=None,
req=None, of=None, ln=None,
cc=None, p=None, p1=None, p2=None, p3=None, f=None,
ec=None, verbose=None, selected_external_collections_infos=None,
em=None, **dummy):
display_nearest_terms_box=not kwargs['hosted_colls_actual_or_potential_results_p']
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final.update(intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, of,
verbose, ln, display_nearest_terms_box=display_nearest_terms_box))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, req=None, verbose=None, of=None, **dummy):
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE > 0:
search_results_cache.set(query_representation_in_cache,
results_in_any_collection.fastdump(),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
search_results_cache.set(query_representation_in_cache + '::cc',
dummy.get('cc', CFG_SITE_NAME),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if req:
from flask import request
req = request
search_results_cache.set(query_representation_in_cache + '::p',
req.values.get('p', ''),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if verbose and of.startswith("h"):
write_warning(req, "Search stage 3: storing query results in cache.", req=req)
def prs_apply_search_limits(results_final, kwargs=None, req=None, of=None, cc=None, ln=None, _=None,
p=None, p1=None, p2=None, p3=None, f=None, pl=None, ap=None, dt=None,
ec=None, selected_external_collections_infos=None,
hosted_colls_actual_or_potential_results_p=None,
datetext1=None, datetext2=None, verbose=None, wl=None, em=None,
**dummy):
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying search pattern limit %s..." % cgi.escape(pl), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_split_into_collections(kwargs=None, results_final=None, colls_to_search=None, hosted_colls_results=None,
cpu_time=0, results_final_nb_total=None, hosted_colls_actual_or_potential_results_p=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, **dummy):
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if results_final.keys() == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
kwargs['results_final_nb'] = results_final_nb
kwargs['results_final_nb_total'] = results_final_nb_total
kwargs['results_final_for_all_selected_colls'] = results_final_for_all_selected_colls
kwargs['cpu_time'] = cpu_time #rca TODO: check where the cpu_time is used, this line was missing
return (results_final_nb, results_final_nb_total, results_final_for_all_selected_colls)
def prs_summarize_records(kwargs=None, req=None, p=None, f=None, aas=None,
p1=None, p2=None, p3=None, f1=None, f2=None, f3=None, op1=None, op2=None,
ln=None, results_final_for_all_selected_colls=None, of='hcs', **dummy):
# feed the current search to be summarized:
from invenio.legacy.search_engine.summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, of, ln, search_p, search_f, req)
def prs_print_records(kwargs=None, results_final=None, req=None, of=None, cc=None, pl_in_url=None,
ln=None, _=None, p=None, p1=None, p2=None, p3=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, d1y=None, d1m=None,
d1d=None, d2y=None, d2m=None, d2d=None, dt=None, jrec=None, colls_to_search=None,
hosted_colls_actual_or_potential_results_p=None, hosted_colls_results=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, results_final_nb=None,
cpu_time=None, verbose=None, em=None, **dummy):
if len(colls_to_search) > 1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if coll in results_final and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
results_final_recIDs = list(results_final[coll])
results_final_nb_found = len(results_final_recIDs)
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec, kwargs['f'])
if of.startswith("h"):
write_warning(results_final_comments, req=req)
if results_final_recIDs_ranked:
results_final_recIDs = results_final_recIDs_ranked
else:
# rank_records failed and returned some error message to display:
write_warning(results_final_relevances_prologue, req=req)
write_warning(results_final_relevances_epilogue, req=req)
else:
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm,
em=em,
nb_found=results_final_nb_found)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1, em=em))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] is None or result[1] is False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
def prs_log_query(kwargs=None, req=None, uid=None, of=None, ln=None, p=None, f=None,
colls_to_search=None, results_final_nb_total=None, em=None, **dummy):
# FIXME move query logging to signal receiver
# log query:
try:
from flask.ext.login import current_user
if req:
from flask import request
req = request
id_query = log_query(req.host,
'&'.join(map(lambda (k,v): k+'='+v, request.values.iteritems(multi=True))),
uid)
#id_query = log_query(req.remote_host, req.args, uid)
#of = request.values.get('of', 'hb')
if of.startswith("h") and id_query and (em == '' or EM_REPOSITORY["alert"] in em):
if not of in ['hcs', 'hcs2']:
# display alert/RSS teaser for non-summary formats:
display_email_alert_part = True
if current_user:
if current_user['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not current_user['precached_usealerts']:
display_email_alert_part = False
from flask import flash
flash(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query, \
ln=ln, display_email_alert_part=display_email_alert_part), 'search-results-after')
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
def prs_search_common(kwargs=None, req=None, of=None, cc=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, colls_to_search=None, wash_colls_debug=None,
verbose=None, wl=None, em=None, **dummy):
query_representation_in_cache = get_search_results_cache_key(**kwargs)
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
if of.startswith("h") and verbose and wash_colls_debug:
write_warning("wash_colls debugging info : %s" % wash_colls_debug, req=req)
prs_search_hosted_collections(kwargs=kwargs, **kwargs)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
# WebSearch services
if jrec <= 1 and \
(em == "" and True or (EM_REPOSITORY["search_services"] in em)):
user_info = collect_user_info(req)
# display only on first search page, and only if wanted
# when 'em' param set.
for answer_relevance, answer_html in services.get_answers(
req, user_info, of, cc, colls_to_search, p, f, ln):
req.write('<div class="searchservicebox">')
req.write(answer_html)
if verbose > 8:
write_warning("Service relevance: %i" % answer_relevance, req=req)
req.write('</div>')
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 2 and not (p2 or p3):
## 3A add-to-search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
elif aas == 1 or (p1 or p2 or p3):
## 3B - advanced search
output = prs_advanced_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3C - simple search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
if len(results_in_any_collection) == 0 and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return None
# store this search query results into search results cache if needed:
prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, **kwargs)
# search stage 4 and 5: intersection with collection universe and sorting/limiting
try:
output = prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except: # no results to display
return None
t2 = os.times()[4]
cpu_time = t2 - t1
kwargs['cpu_time'] = cpu_time
## search stage 6: display results:
return prs_display_results(kwargs=kwargs, **kwargs)
def prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection,
kwargs=None, req=None, of=None,
**dummy):
# search stage 4: intersection with collection universe:
results_final = {}
output = prs_intersect_results_with_collrecs(results_final, results_in_any_collection, kwargs, **kwargs)
if output is not None:
return output
# another external search if we still don't have something
if results_final == {} and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
kwargs['results_final'] = results_final
raise Exception
# search stage 5: apply search option limits and restrictions:
output = prs_apply_search_limits(results_final, kwargs=kwargs, **kwargs)
kwargs['results_final'] = results_final
if output is not None:
return output
def prs_display_results(kwargs=None, results_final=None, req=None, of=None, sf=None,
so=None, sp=None, verbose=None, p=None, p1=None, p2=None, p3=None,
cc=None, ln=None, _=None, ec=None, colls_to_search=None, rm=None, cpu_time=None,
f=None, em=None, jrec=None, rg=None, **dummy
):
## search stage 6: display results:
# split result set into collections
(results_final_nb, results_final_nb_total, results_final_for_all_selected_colls) = prs_split_into_collections(kwargs=kwargs, **kwargs)
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total == 0 and not kwargs['hosted_colls_potential_results_p']:
if of.startswith("h"):
write_warning("No match found, please enter different search terms.", req=req)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
prs_log_query(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "intbitset":
#return the result as an intbitset
return results_final_for_all_selected_colls
elif of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, ln, kwargs['rg'], kwargs['jrec'], kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln)
return slice_records(recIDs, jrec, rg)
elif of.startswith("h"):
if of not in ['hcs', 'hcs2', 'hcv', 'htcv', 'tlcv']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time,
ln, ec, hosted_colls_potential_results_p=kwargs['hosted_colls_potential_results_p'], em=em))
kwargs['selected_external_collections_infos'] = print_external_results_overview(req, cc, [p, p1, p2, p3],
f, ec, verbose, ln, print_overview=em == "" or EM_REPOSITORY["overview"] in em)
# print number of hits found for XML outputs:
if of.startswith("x") or of == 'mobb':
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % kwargs['results_final_nb_total'])
# print records:
if of in ['hcs', 'hcs2']:
prs_summarize_records(kwargs=kwargs, **kwargs)
elif of in ['hcv', 'htcv', 'tlcv'] and CFG_INSPIRE_SITE:
from invenio.legacy.search_engine.cvifier import cvify_records
cvify_records(results_final_for_all_selected_colls, of, req, so)
else:
prs_print_records(kwargs=kwargs, **kwargs)
# this is a copy of the prs_display_results with output parts removed, needed for external modules
def prs_rank_results(kwargs=None, results_final=None, req=None, colls_to_search=None,
sf=None, so=None, sp=None, of=None, rm=None, p=None, p1=None, p2=None, p3=None,
verbose=None, **dummy
):
## search stage 6: display results:
# split result set into collections
dummy_results_final_nb, dummy_results_final_nb_total, results_final_for_all_selected_colls = prs_split_into_collections(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, field=kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of)
return recIDs
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = line.split("#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>"
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" %
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True, split_by=0):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
@return: list of tuples containing tag and its frequency
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
[('PREPRINT', 10), ('THESIS', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
[('Ellis, J', 10), ('Ellis, N', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
[('Ellis, N', 7), ...]
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"""Compare VAL1 and VAL2 according to, firstly, frequency, then
secondly, alphabetically."""
compared_via_frequencies = cmp(valuefreqdict[val2],
valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, string_types):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False,
split_by=split_by))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if val in valuefreqdict:
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
if not CFG_NUMPY_IMPORTABLE:
## original version
out = []
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if val in displaytmp:
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out.append((tmpdisplv, valuefreqdict[val]))
return out
else:
f = [] # frequencies
n = [] # original names
ln = [] # lowercased names
## build lists within one iteration
for (val, freq) in iteritems(valuefreqdict):
f.append(-1 * freq)
if val in displaytmp:
n.append(displaytmp[val])
else:
n.append(val)
ln.append(val.lower())
## sort by frequency (desc) and then by lowercased name.
return [(n[i], -1 * f[i]) for i in numpy.lexsort([ln, f])]
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile as pyprofile
import pstats
pyprofile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
def perform_external_collection_search_with_em(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, em=""):
perform_external_collection_search(req, current_collection, pattern_list, field, external_collection,
verbosity_level, lang, selected_external_collections_infos,
print_overview=em == "" or EM_REPOSITORY["overview"] in em,
print_search_info=em == "" or EM_REPOSITORY["search_info"] in em,
print_see_also_box=em == "" or EM_REPOSITORY["see_also_box"] in em,
print_body=em == "" or EM_REPOSITORY["body"] in em)
@cache.memoize(timeout=5)
def get_fulltext_terms_from_search_pattern(search_pattern):
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, search_pattern.encode('utf-8'), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
return keywords
def check_user_can_edit_record(req, recid):
""" Check if user has authorization to modify a collection
the recid belongs to
"""
record_collections = get_all_collections_of_a_record(recid)
if not record_collections:
# Check if user has access to all collections
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection='')
if auth_code == 0:
return True
else:
for collection in record_collections:
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection=collection)
if auth_code == 0:
return True
return False
| gpl-2.0 |
shubhdev/edx-platform | lms/djangoapps/verify_student/tests/test_views.py | 3 | 87809 | # encoding: utf-8
"""
Tests of verify_student views.
"""
import json
import urllib
from datetime import timedelta, datetime
from uuid import uuid4
import ddt
import httpretty
import mock
import pytz
from bs4 import BeautifulSoup
from mock import patch, Mock, ANY
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.core import mail
from django.test import TestCase
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.utils import timezone
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from opaque_keys.edx.keys import UsageKey
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from courseware.url_helpers import get_redirect_url
from commerce.tests import TEST_PAYMENT_DATA, TEST_API_URL, TEST_API_SIGNING_KEY
from embargo.test_utils import restrict_course
from microsite_configuration import microsite
from openedx.core.djangoapps.user_api.accounts.api import get_account_settings
from shoppingcart.models import Order, CertificateItem
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from student.models import CourseEnrollment
from util.date_utils import get_default_time_display
from util.testing import UrlResetMixin
from verify_student.views import (
checkout_with_ecommerce_service,
render_to_response, PayAndVerifyView, EVENT_NAME_USER_ENTERED_INCOURSE_REVERIFY_VIEW,
EVENT_NAME_USER_SUBMITTED_INCOURSE_REVERIFY, _send_email, _compose_message_reverification_email
)
from verify_student.models import (
SoftwareSecurePhotoVerification, VerificationCheckpoint,
InCourseReverificationConfiguration, VerificationStatus
)
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import check_mongo_calls
def mock_render_to_response(*args, **kwargs):
return render_to_response(*args, **kwargs)
render_mock = Mock(side_effect=mock_render_to_response)
PAYMENT_DATA_KEYS = {'payment_processor_name', 'payment_page_url', 'payment_form_data'}
class StartView(TestCase):
def start_url(self, course_id=""):
return "/verify_student/{0}".format(urllib.quote(course_id))
def test_start_new_verification(self):
"""
Test the case where the user has no pending `PhotoVerificationAttempts`,
but is just starting their first.
"""
user = UserFactory.create(username="rusty", password="test")
self.client.login(username="rusty", password="test")
def must_be_logged_in(self):
self.assertHttpForbidden(self.client.get(self.start_url()))
@ddt.ddt
class TestPayAndVerifyView(UrlResetMixin, ModuleStoreTestCase):
"""
Tests for the payment and verification flow views.
"""
MIN_PRICE = 12
USERNAME = "test_user"
PASSWORD = "test_password"
NOW = datetime.now(pytz.UTC)
YESTERDAY = NOW - timedelta(days=1)
TOMORROW = NOW + timedelta(days=1)
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(TestPayAndVerifyView, self).setUp('embargo')
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
@ddt.data("verified", "professional")
def test_start_flow_not_verified(self, course_mode):
course = self._create_course(course_mode)
self._enroll(course.id, "honor")
response = self._get_page('verify_student_start_flow', course.id)
self._assert_displayed_mode(response, course_mode)
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS + PayAndVerifyView.VERIFICATION_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP
)
self._assert_messaging(response, PayAndVerifyView.FIRST_TIME_VERIFY_MSG)
self._assert_requirements_displayed(response, [
PayAndVerifyView.PHOTO_ID_REQ,
PayAndVerifyView.WEBCAM_REQ,
])
self._assert_upgrade_session_flag(False)
@ddt.data("no-id-professional")
def test_start_flow_with_no_id_professional(self, course_mode):
course = self._create_course(course_mode)
# by default enrollment is honor
self._enroll(course.id, "honor")
response = self._get_page('verify_student_start_flow', course.id)
self._assert_displayed_mode(response, course_mode)
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP
)
self._assert_messaging(response, PayAndVerifyView.FIRST_TIME_VERIFY_MSG)
self._assert_requirements_displayed(response, [])
@ddt.data("expired", "denied")
def test_start_flow_expired_or_denied_verification(self, verification_status):
course = self._create_course("verified")
self._enroll(course.id, "verified")
self._set_verification_status(verification_status)
response = self._get_page('verify_student_start_flow', course.id)
# Expect the same content as when the user has not verified
self._assert_steps_displayed(
response,
[PayAndVerifyView.INTRO_STEP] + PayAndVerifyView.VERIFICATION_STEPS,
PayAndVerifyView.INTRO_STEP
)
self._assert_messaging(response, PayAndVerifyView.FIRST_TIME_VERIFY_MSG)
self._assert_requirements_displayed(response, [
PayAndVerifyView.PHOTO_ID_REQ,
PayAndVerifyView.WEBCAM_REQ,
])
@ddt.data(
("verified", "submitted"),
("verified", "approved"),
("verified", "error"),
("professional", "submitted"),
("no-id-professional", None),
)
@ddt.unpack
def test_start_flow_already_verified(self, course_mode, verification_status):
course = self._create_course(course_mode)
self._enroll(course.id, "honor")
self._set_verification_status(verification_status)
response = self._get_page('verify_student_start_flow', course.id)
self._assert_displayed_mode(response, course_mode)
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP
)
self._assert_messaging(response, PayAndVerifyView.FIRST_TIME_VERIFY_MSG)
self._assert_requirements_displayed(response, [])
@ddt.data("verified", "professional")
def test_start_flow_already_paid(self, course_mode):
course = self._create_course(course_mode)
self._enroll(course.id, course_mode)
response = self._get_page('verify_student_start_flow', course.id)
self._assert_displayed_mode(response, course_mode)
self._assert_steps_displayed(
response,
[PayAndVerifyView.INTRO_STEP] + PayAndVerifyView.VERIFICATION_STEPS,
PayAndVerifyView.INTRO_STEP
)
self._assert_messaging(response, PayAndVerifyView.FIRST_TIME_VERIFY_MSG)
self._assert_requirements_displayed(response, [
PayAndVerifyView.PHOTO_ID_REQ,
PayAndVerifyView.WEBCAM_REQ,
])
def test_start_flow_not_enrolled(self):
course = self._create_course("verified")
self._set_verification_status("submitted")
response = self._get_page('verify_student_start_flow', course.id)
# This shouldn't happen if the student has been auto-enrolled,
# but if they somehow end up on this page without enrolling,
# treat them as if they need to pay
response = self._get_page('verify_student_start_flow', course.id)
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP
)
self._assert_requirements_displayed(response, [])
def test_start_flow_unenrolled(self):
course = self._create_course("verified")
self._set_verification_status("submitted")
self._enroll(course.id, "verified")
self._unenroll(course.id)
# If unenrolled, treat them like they haven't paid at all
# (we assume that they've gotten a refund or didn't pay initially)
response = self._get_page('verify_student_start_flow', course.id)
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP
)
self._assert_requirements_displayed(response, [])
@ddt.data(
("verified", "submitted"),
("verified", "approved"),
("professional", "submitted")
)
@ddt.unpack
def test_start_flow_already_verified_and_paid(self, course_mode, verification_status):
course = self._create_course(course_mode)
self._enroll(course.id, course_mode)
self._set_verification_status(verification_status)
response = self._get_page(
'verify_student_start_flow',
course.id,
expected_status_code=302
)
self._assert_redirects_to_dashboard(response)
def test_verify_now(self):
# We've already paid, and now we're trying to verify
course = self._create_course("verified")
self._enroll(course.id, "verified")
response = self._get_page('verify_student_verify_now', course.id)
self._assert_messaging(response, PayAndVerifyView.VERIFY_NOW_MSG)
# Expect that *all* steps are displayed,
# but we start after the payment step (because it's already completed).
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS + PayAndVerifyView.VERIFICATION_STEPS,
PayAndVerifyView.FACE_PHOTO_STEP
)
# These will be hidden from the user anyway since they're starting
# after the payment step.
self._assert_requirements_displayed(response, [
PayAndVerifyView.PHOTO_ID_REQ,
PayAndVerifyView.WEBCAM_REQ,
])
def test_verify_now_already_verified(self):
course = self._create_course("verified")
self._enroll(course.id, "verified")
self._set_verification_status("submitted")
# Already verified, so if we somehow end up here,
# redirect immediately to the dashboard
response = self._get_page(
'verify_student_verify_now',
course.id,
expected_status_code=302
)
self._assert_redirects_to_dashboard(response)
def test_verify_now_user_details(self):
course = self._create_course("verified")
self._enroll(course.id, "verified")
response = self._get_page('verify_student_verify_now', course.id)
self._assert_user_details(response, self.user.profile.name)
@ddt.data(
"verify_student_verify_now",
"verify_student_payment_confirmation"
)
def test_verify_now_not_enrolled(self, page_name):
course = self._create_course("verified")
response = self._get_page(page_name, course.id, expected_status_code=302)
self._assert_redirects_to_start_flow(response, course.id)
@ddt.data(
"verify_student_verify_now",
"verify_student_payment_confirmation"
)
def test_verify_now_unenrolled(self, page_name):
course = self._create_course("verified")
self._enroll(course.id, "verified")
self._unenroll(course.id)
response = self._get_page(page_name, course.id, expected_status_code=302)
self._assert_redirects_to_start_flow(response, course.id)
@ddt.data(
"verify_student_verify_now",
"verify_student_payment_confirmation"
)
def test_verify_now_not_paid(self, page_name):
course = self._create_course("verified")
self._enroll(course.id, "honor")
response = self._get_page(page_name, course.id, expected_status_code=302)
self._assert_redirects_to_upgrade(response, course.id)
def test_verify_later(self):
""" The deprecated verify-later page should redirect to the verification start page. """
course = self._create_course("verified")
course_key = course.id
self._enroll(course_key, "verified")
response = self._get_page("verify_student_verify_later", course_key, expected_status_code=301)
self._assert_redirects_to_verify_start(response, course_key, 301)
def test_payment_confirmation(self):
course = self._create_course("verified")
self._enroll(course.id, "verified")
response = self._get_page('verify_student_payment_confirmation', course.id)
self._assert_messaging(response, PayAndVerifyView.PAYMENT_CONFIRMATION_MSG)
# Expect that *all* steps are displayed,
# but we start at the payment confirmation step
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS + PayAndVerifyView.VERIFICATION_STEPS,
PayAndVerifyView.PAYMENT_CONFIRMATION_STEP,
)
# These will be hidden from the user anyway since they're starting
# after the payment step. We're already including the payment
# steps, so it's easier to include these as well.
self._assert_requirements_displayed(response, [
PayAndVerifyView.PHOTO_ID_REQ,
PayAndVerifyView.WEBCAM_REQ,
])
def test_payment_cannot_skip(self):
"""
Simple test to verify that certain steps cannot be skipped. This test sets up
a scenario where the user should be on the MAKE_PAYMENT_STEP, but is trying to
skip it. Despite setting the parameter, the current step should still be
MAKE_PAYMENT_STEP.
"""
course = self._create_course("verified")
response = self._get_page(
'verify_student_start_flow',
course.id,
skip_first_step=True
)
self._assert_messaging(response, PayAndVerifyView.FIRST_TIME_VERIFY_MSG)
# Expect that *all* steps are displayed,
# but we start on the first verify step
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS + PayAndVerifyView.VERIFICATION_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP,
)
def test_payment_confirmation_already_verified(self):
course = self._create_course("verified")
self._enroll(course.id, "verified")
self._set_verification_status("submitted")
response = self._get_page('verify_student_payment_confirmation', course.id)
# Other pages would redirect to the dashboard at this point,
# because the user has paid and verified. However, we want
# the user to see the confirmation page even if there
# isn't anything for them to do here except return
# to the dashboard.
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS,
PayAndVerifyView.PAYMENT_CONFIRMATION_STEP,
)
def test_payment_confirmation_already_verified_skip_first_step(self):
course = self._create_course("verified")
self._enroll(course.id, "verified")
self._set_verification_status("submitted")
response = self._get_page(
'verify_student_payment_confirmation',
course.id,
skip_first_step=True
)
# There are no other steps, so stay on the
# payment confirmation step
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS,
PayAndVerifyView.PAYMENT_CONFIRMATION_STEP,
)
@ddt.data(
(YESTERDAY, True),
(TOMORROW, False)
)
@ddt.unpack
def test_payment_confirmation_course_details(self, course_start, show_courseware_url):
course = self._create_course("verified", course_start=course_start)
self._enroll(course.id, "verified")
response = self._get_page('verify_student_payment_confirmation', course.id)
courseware_url = (
reverse("course_root", kwargs={'course_id': unicode(course.id)})
if show_courseware_url else ""
)
self._assert_course_details(
response,
unicode(course.id),
course.display_name,
course.start_datetime_text(),
courseware_url
)
@ddt.data("verified", "professional")
def test_upgrade(self, course_mode):
course = self._create_course(course_mode)
self._enroll(course.id, "honor")
response = self._get_page('verify_student_upgrade_and_verify', course.id)
self._assert_displayed_mode(response, course_mode)
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS + PayAndVerifyView.VERIFICATION_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP
)
self._assert_messaging(response, PayAndVerifyView.UPGRADE_MSG)
self._assert_requirements_displayed(response, [
PayAndVerifyView.PHOTO_ID_REQ,
PayAndVerifyView.WEBCAM_REQ,
])
self._assert_upgrade_session_flag(True)
def test_upgrade_already_verified(self):
course = self._create_course("verified")
self._enroll(course.id, "honor")
self._set_verification_status("submitted")
response = self._get_page('verify_student_upgrade_and_verify', course.id)
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP
)
self._assert_messaging(response, PayAndVerifyView.UPGRADE_MSG)
self._assert_requirements_displayed(response, [])
def test_upgrade_already_paid(self):
course = self._create_course("verified")
self._enroll(course.id, "verified")
# If we've already paid, then the upgrade messaging
# won't make much sense. Redirect them to the
# "verify later" page instead.
response = self._get_page(
'verify_student_upgrade_and_verify',
course.id,
expected_status_code=302
)
self._assert_redirects_to_verify_start(response, course.id)
def test_upgrade_already_verified_and_paid(self):
course = self._create_course("verified")
self._enroll(course.id, "verified")
self._set_verification_status("submitted")
# Already verified and paid, so redirect to the dashboard
response = self._get_page(
'verify_student_upgrade_and_verify',
course.id,
expected_status_code=302
)
self._assert_redirects_to_dashboard(response)
def test_upgrade_not_enrolled(self):
course = self._create_course("verified")
response = self._get_page(
'verify_student_upgrade_and_verify',
course.id,
expected_status_code=302
)
self._assert_redirects_to_start_flow(response, course.id)
def test_upgrade_unenrolled(self):
course = self._create_course("verified")
self._enroll(course.id, "verified")
self._unenroll(course.id)
response = self._get_page(
'verify_student_upgrade_and_verify',
course.id,
expected_status_code=302
)
self._assert_redirects_to_start_flow(response, course.id)
@ddt.data([], ["honor"], ["honor", "audit"])
def test_no_verified_mode_for_course(self, modes_available):
course = self._create_course(*modes_available)
pages = [
'verify_student_start_flow',
'verify_student_verify_now',
'verify_student_upgrade_and_verify',
]
for page_name in pages:
self._get_page(
page_name,
course.id,
expected_status_code=404
)
@ddt.data([], ["no-id-professional", "professional"], ["honor", "audit"])
def test_no_id_professional_entry_point(self, modes_available):
course = self._create_course(*modes_available)
if "no-id-professional" in modes_available or "professional" in modes_available:
self._get_page("verify_student_start_flow", course.id, expected_status_code=200)
else:
self._get_page("verify_student_start_flow", course.id, expected_status_code=404)
@ddt.data(
"verify_student_start_flow",
"verify_student_verify_now",
"verify_student_upgrade_and_verify",
)
def test_require_login(self, url_name):
self.client.logout()
course = self._create_course("verified")
response = self._get_page(url_name, course.id, expected_status_code=302)
original_url = reverse(url_name, kwargs={'course_id': unicode(course.id)})
login_url = u"{login_url}?next={original_url}".format(
login_url=reverse('accounts_login'),
original_url=original_url
)
self.assertRedirects(response, login_url)
@ddt.data(
"verify_student_start_flow",
"verify_student_verify_now",
"verify_student_upgrade_and_verify",
)
def test_no_such_course(self, url_name):
non_existent_course = CourseLocator(course="test", org="test", run="test")
self._get_page(
url_name,
non_existent_course,
expected_status_code=404
)
def test_account_not_active(self):
self.user.is_active = False
self.user.save()
course = self._create_course("verified")
response = self._get_page('verify_student_start_flow', course.id)
self._assert_steps_displayed(
response,
PayAndVerifyView.PAYMENT_STEPS + PayAndVerifyView.VERIFICATION_STEPS,
PayAndVerifyView.MAKE_PAYMENT_STEP
)
self._assert_requirements_displayed(response, [
PayAndVerifyView.ACCOUNT_ACTIVATION_REQ,
PayAndVerifyView.PHOTO_ID_REQ,
PayAndVerifyView.WEBCAM_REQ,
])
def test_no_contribution(self):
# Do NOT specify a contribution for the course in a session var.
course = self._create_course("verified")
response = self._get_page("verify_student_start_flow", course.id)
self._assert_contribution_amount(response, "")
def test_contribution_other_course(self):
# Specify a contribution amount for another course in the session
course = self._create_course("verified")
other_course_id = CourseLocator(org="other", run="test", course="test")
self._set_contribution("12.34", other_course_id)
# Expect that the contribution amount is NOT pre-filled,
response = self._get_page("verify_student_start_flow", course.id)
self._assert_contribution_amount(response, "")
def test_contribution(self):
# Specify a contribution amount for this course in the session
course = self._create_course("verified")
self._set_contribution("12.34", course.id)
# Expect that the contribution amount is pre-filled,
response = self._get_page("verify_student_start_flow", course.id)
self._assert_contribution_amount(response, "12.34")
def test_verification_deadline(self):
# Set a deadline on the course mode
course = self._create_course("verified")
mode = CourseMode.objects.get(
course_id=course.id,
mode_slug="verified"
)
expiration = datetime(2999, 1, 2, tzinfo=pytz.UTC)
mode.expiration_datetime = expiration
mode.save()
# Expect that the expiration date is set
response = self._get_page("verify_student_start_flow", course.id)
data = self._get_page_data(response)
self.assertEqual(data['verification_deadline'], "Jan 02, 2999 at 00:00 UTC")
def test_course_mode_expired(self):
course = self._create_course("verified")
mode = CourseMode.objects.get(
course_id=course.id,
mode_slug="verified"
)
expiration = datetime(1999, 1, 2, tzinfo=pytz.UTC)
mode.expiration_datetime = expiration
mode.save()
# Need to be enrolled
self._enroll(course.id, "verified")
# The course mode has expired, so expect an explanation
# to the student that the deadline has passed
response = self._get_page("verify_student_verify_now", course.id)
self.assertContains(response, "verification deadline")
self.assertContains(response, "Jan 02, 1999 at 00:00 UTC")
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_restrict(self):
course = self._create_course("verified")
with restrict_course(course.id) as redirect_url:
# Simulate that we're embargoed from accessing this
# course based on our IP address.
response = self._get_page('verify_student_start_flow', course.id, expected_status_code=302)
self.assertRedirects(response, redirect_url)
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_allow(self):
course = self._create_course("verified")
self._get_page('verify_student_start_flow', course.id)
def _create_course(self, *course_modes, **kwargs):
"""Create a new course with the specified course modes. """
course = CourseFactory.create()
if kwargs.get('course_start'):
course.start = kwargs.get('course_start')
modulestore().update_item(course, ModuleStoreEnum.UserID.test)
mode_kwargs = {}
if kwargs.get('sku'):
mode_kwargs['sku'] = kwargs['sku']
for course_mode in course_modes:
min_price = (0 if course_mode in ["honor", "audit"] else self.MIN_PRICE)
CourseModeFactory(
course_id=course.id,
mode_slug=course_mode,
mode_display_name=course_mode,
min_price=min_price,
**mode_kwargs
)
return course
def _enroll(self, course_key, mode):
"""Enroll the user in a course. """
CourseEnrollmentFactory.create(
user=self.user,
course_id=course_key,
mode=mode
)
def _unenroll(self, course_key):
"""Unenroll the user from a course. """
CourseEnrollment.unenroll(self.user, course_key)
def _set_verification_status(self, status):
"""Set the user's photo verification status. """
attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
if status in ["submitted", "approved", "expired", "denied", "error"]:
attempt.mark_ready()
attempt.submit()
if status in ["approved", "expired"]:
attempt.approve()
elif status == "denied":
attempt.deny("Denied!")
elif status == "error":
attempt.system_error("Error!")
if status == "expired":
days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
attempt.created_at = datetime.now(pytz.UTC) - timedelta(days=(days_good_for + 1))
attempt.save()
def _set_contribution(self, amount, course_id):
"""Set the contribution amount pre-filled in a session var. """
session = self.client.session
session["donation_for_course"] = {
unicode(course_id): amount
}
session.save()
def _get_page(self, url_name, course_key, expected_status_code=200, skip_first_step=False):
"""Retrieve one of the verification pages. """
url = reverse(url_name, kwargs={"course_id": unicode(course_key)})
if skip_first_step:
url += "?skip-first-step=1"
response = self.client.get(url)
self.assertEqual(response.status_code, expected_status_code)
return response
def _assert_displayed_mode(self, response, expected_mode):
"""Check whether a course mode is displayed. """
response_dict = self._get_page_data(response)
self.assertEqual(response_dict['course_mode_slug'], expected_mode)
def _assert_steps_displayed(self, response, expected_steps, expected_current_step):
"""Check whether steps in the flow are displayed to the user. """
response_dict = self._get_page_data(response)
self.assertEqual(response_dict['current_step'], expected_current_step)
self.assertEqual(expected_steps, [
step['name'] for step in
response_dict['display_steps']
])
def _assert_messaging(self, response, expected_message):
"""Check the messaging on the page. """
response_dict = self._get_page_data(response)
self.assertEqual(response_dict['message_key'], expected_message)
def _assert_requirements_displayed(self, response, requirements):
"""Check that requirements are displayed on the page. """
response_dict = self._get_page_data(response)
for req, displayed in response_dict['requirements'].iteritems():
if req in requirements:
self.assertTrue(displayed, msg="Expected '{req}' requirement to be displayed".format(req=req))
else:
self.assertFalse(displayed, msg="Expected '{req}' requirement to be hidden".format(req=req))
def _assert_course_details(self, response, course_key, display_name, start_text, url):
"""Check the course information on the page. """
response_dict = self._get_page_data(response)
self.assertEqual(response_dict['course_key'], course_key)
self.assertEqual(response_dict['course_name'], display_name)
self.assertEqual(response_dict['course_start_date'], start_text)
self.assertEqual(response_dict['courseware_url'], url)
def _assert_user_details(self, response, full_name):
"""Check the user detail information on the page. """
response_dict = self._get_page_data(response)
self.assertEqual(response_dict['full_name'], full_name)
def _assert_contribution_amount(self, response, expected_amount):
"""Check the pre-filled contribution amount. """
response_dict = self._get_page_data(response)
self.assertEqual(response_dict['contribution_amount'], expected_amount)
def _get_page_data(self, response):
"""Retrieve the data attributes rendered on the page. """
soup = BeautifulSoup(response.content)
pay_and_verify_div = soup.find(id="pay-and-verify-container")
return {
'full_name': pay_and_verify_div['data-full-name'],
'course_key': pay_and_verify_div['data-course-key'],
'course_name': pay_and_verify_div['data-course-name'],
'course_start_date': pay_and_verify_div['data-course-start-date'],
'courseware_url': pay_and_verify_div['data-courseware-url'],
'course_mode_name': pay_and_verify_div['data-course-mode-name'],
'course_mode_slug': pay_and_verify_div['data-course-mode-slug'],
'display_steps': json.loads(pay_and_verify_div['data-display-steps']),
'current_step': pay_and_verify_div['data-current-step'],
'requirements': json.loads(pay_and_verify_div['data-requirements']),
'message_key': pay_and_verify_div['data-msg-key'],
'contribution_amount': pay_and_verify_div['data-contribution-amount'],
'verification_deadline': pay_and_verify_div['data-verification-deadline']
}
def _assert_upgrade_session_flag(self, is_upgrade):
"""Check that the session flag for attempting an upgrade is set. """
self.assertEqual(self.client.session.get('attempting_upgrade'), is_upgrade)
def _assert_redirects_to_dashboard(self, response):
"""Check that the page redirects to the student dashboard. """
self.assertRedirects(response, reverse('dashboard'))
def _assert_redirects_to_start_flow(self, response, course_id):
"""Check that the page redirects to the start of the payment/verification flow. """
url = reverse('verify_student_start_flow', kwargs={'course_id': unicode(course_id)})
self.assertRedirects(response, url)
def _assert_redirects_to_verify_start(self, response, course_id, status_code=302):
"""Check that the page redirects to the "verify later" part of the flow. """
url = reverse('verify_student_verify_now', kwargs={'course_id': unicode(course_id)})
self.assertRedirects(response, url, status_code)
def _assert_redirects_to_upgrade(self, response, course_id):
"""Check that the page redirects to the "upgrade" part of the flow. """
url = reverse('verify_student_upgrade_and_verify', kwargs={'course_id': unicode(course_id)})
self.assertRedirects(response, url)
def test_course_upgrade_page_with_unicode_and_special_values_in_display_name(self):
"""Check the course information on the page. """
mode_display_name = u"Introduction à l'astrophysique"
course = CourseFactory.create(display_name=mode_display_name)
for course_mode in ["honor", "verified"]:
min_price = (self.MIN_PRICE if course_mode != "honor" else 0)
CourseModeFactory(
course_id=course.id,
mode_slug=course_mode,
mode_display_name=mode_display_name,
min_price=min_price
)
self._enroll(course.id, "honor")
response_dict = self._get_page_data(self._get_page('verify_student_start_flow', course.id))
self.assertEqual(response_dict['course_name'], mode_display_name)
@httpretty.activate
@override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY)
def test_processors_api(self):
"""
Check that when working with a product being processed by the
ecommerce api, we correctly call to that api for the list of
available payment processors.
"""
# setting a nonempty sku on the course will a trigger calls to
# the ecommerce api to get payment processors.
course = self._create_course("verified", sku='nonempty-sku')
self._enroll(course.id, "honor")
# mock out the payment processors endpoint
httpretty.register_uri(
httpretty.GET,
"{}/payment/processors/".format(TEST_API_URL),
body=json.dumps(['foo', 'bar']),
content_type="application/json",
)
# make the server request
response = self._get_page('verify_student_start_flow', course.id)
self.assertEqual(response.status_code, 200)
# ensure the mock api call was made. NOTE: the following line
# approximates the check - if the headers were empty it means
# there was no last request.
self.assertNotEqual(httpretty.last_request().headers, {})
class CheckoutTestMixin(object):
"""
Mixin implementing test methods that should behave identically regardless
of which backend is used (shoppingcart or ecommerce service). Subclasses
immediately follow for each backend, which inherit from TestCase and
define methods needed to customize test parameters, and patch the
appropriate checkout method.
Though the view endpoint under test is named 'create_order' for backward-
compatibility, the effect of using this endpoint is to choose a specific product
(i.e. course mode) and trigger immediate checkout.
"""
def setUp(self):
""" Create a user and course. """
super(CheckoutTestMixin, self).setUp()
self.user = UserFactory.create(username="test", password="test")
self.course = CourseFactory.create()
for mode, min_price in (('audit', 0), ('honor', 0), ('verified', 100)):
CourseModeFactory(mode_slug=mode, course_id=self.course.id, min_price=min_price, sku=self.make_sku())
self.client.login(username="test", password="test")
def _assert_checked_out(
self,
post_params,
patched_create_order,
expected_course_key,
expected_mode_slug,
expected_status_code=200
):
"""
DRY helper.
Ensures that checkout functions were invoked as
expected during execution of the create_order endpoint.
"""
post_params.setdefault('processor', None)
response = self.client.post(reverse('verify_student_create_order'), post_params)
self.assertEqual(response.status_code, expected_status_code)
if expected_status_code == 200:
# ensure we called checkout at all
self.assertTrue(patched_create_order.called)
# ensure checkout args were correct
args = self._get_checkout_args(patched_create_order)
self.assertEqual(args['user'], self.user)
self.assertEqual(args['course_key'], expected_course_key)
self.assertEqual(args['course_mode'].slug, expected_mode_slug)
# ensure response data was correct
data = json.loads(response.content)
self.assertEqual(set(data.keys()), PAYMENT_DATA_KEYS)
else:
self.assertFalse(patched_create_order.called)
def test_create_order(self, patched_create_order):
# Create an order
params = {
'course_id': unicode(self.course.id),
'contribution': 100,
}
self._assert_checked_out(params, patched_create_order, self.course.id, 'verified')
def test_create_order_prof_ed(self, patched_create_order):
# Create a prof ed course
course = CourseFactory.create()
CourseModeFactory(mode_slug="professional", course_id=course.id, min_price=10, sku=self.make_sku())
# Create an order for a prof ed course
params = {'course_id': unicode(course.id)}
self._assert_checked_out(params, patched_create_order, course.id, 'professional')
def test_create_order_no_id_professional(self, patched_create_order):
# Create a no-id-professional ed course
course = CourseFactory.create()
CourseModeFactory(mode_slug="no-id-professional", course_id=course.id, min_price=10, sku=self.make_sku())
# Create an order for a prof ed course
params = {'course_id': unicode(course.id)}
self._assert_checked_out(params, patched_create_order, course.id, 'no-id-professional')
def test_create_order_for_multiple_paid_modes(self, patched_create_order):
# Create a no-id-professional ed course
course = CourseFactory.create()
CourseModeFactory(mode_slug="no-id-professional", course_id=course.id, min_price=10, sku=self.make_sku())
CourseModeFactory(mode_slug="professional", course_id=course.id, min_price=10, sku=self.make_sku())
# Create an order for a prof ed course
params = {'course_id': unicode(course.id)}
# TODO jsa - is this the intended behavior?
self._assert_checked_out(params, patched_create_order, course.id, 'no-id-professional')
def test_create_order_bad_donation_amount(self, patched_create_order):
# Create an order
params = {
'course_id': unicode(self.course.id),
'contribution': '99.9'
}
self._assert_checked_out(params, patched_create_order, None, None, expected_status_code=400)
def test_create_order_good_donation_amount(self, patched_create_order):
# Create an order
params = {
'course_id': unicode(self.course.id),
'contribution': '100.0'
}
self._assert_checked_out(params, patched_create_order, self.course.id, 'verified')
def test_old_clients(self, patched_create_order):
# ensure the response to a request from a stale js client is modified so as
# not to break behavior in the browser.
# (XCOM-214) remove after release.
expected_payment_data = TEST_PAYMENT_DATA.copy()
expected_payment_data['payment_form_data'].update({'foo': 'bar'})
patched_create_order.return_value = expected_payment_data
# there is no 'processor' parameter in the post payload, so the response should only contain payment form data.
params = {'course_id': unicode(self.course.id), 'contribution': 100}
response = self.client.post(reverse('verify_student_create_order'), params)
self.assertEqual(response.status_code, 200)
self.assertTrue(patched_create_order.called)
# ensure checkout args were correct
args = self._get_checkout_args(patched_create_order)
self.assertEqual(args['user'], self.user)
self.assertEqual(args['course_key'], self.course.id)
self.assertEqual(args['course_mode'].slug, 'verified')
# ensure response data was correct
data = json.loads(response.content)
self.assertEqual(data, {'foo': 'bar'})
@patch('verify_student.views.checkout_with_shoppingcart', return_value=TEST_PAYMENT_DATA)
class TestCreateOrderShoppingCart(CheckoutTestMixin, ModuleStoreTestCase):
""" Test view behavior when the shoppingcart is used. """
def make_sku(self):
""" Checkout is handled by shoppingcart when the course mode's sku is empty. """
return ''
def _get_checkout_args(self, patched_create_order):
""" Assuming patched_create_order was called, return a mapping containing the call arguments."""
return dict(zip(('request', 'user', 'course_key', 'course_mode', 'amount'), patched_create_order.call_args[0]))
@override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY)
@patch('verify_student.views.checkout_with_ecommerce_service', return_value=TEST_PAYMENT_DATA)
class TestCreateOrderEcommerceService(CheckoutTestMixin, ModuleStoreTestCase):
""" Test view behavior when the ecommerce service is used. """
def make_sku(self):
""" Checkout is handled by the ecommerce service when the course mode's sku is nonempty. """
return uuid4().hex.decode('ascii')
def _get_checkout_args(self, patched_create_order):
""" Assuming patched_create_order was called, return a mapping containing the call arguments."""
return dict(zip(('user', 'course_key', 'course_mode', 'processor'), patched_create_order.call_args[0]))
class TestCheckoutWithEcommerceService(ModuleStoreTestCase):
"""
Ensures correct behavior in the function `checkout_with_ecommerce_service`.
"""
@httpretty.activate
@override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY)
def test_create_basket(self):
"""
Check that when working with a product being processed by the
ecommerce api, we correctly call to that api to create a basket.
"""
user = UserFactory.create(username="test-username")
course_mode = CourseModeFactory(sku="test-sku")
expected_payment_data = {'foo': 'bar'}
# mock out the payment processors endpoint
httpretty.register_uri(
httpretty.POST,
"{}/baskets/".format(TEST_API_URL),
body=json.dumps({'payment_data': expected_payment_data}),
content_type="application/json",
)
# call the function
actual_payment_data = checkout_with_ecommerce_service(user, 'dummy-course-key', course_mode, 'test-processor')
# check the api call
self.assertEqual(json.loads(httpretty.last_request().body), {
'products': [{'sku': 'test-sku'}],
'checkout': True,
'payment_processor_name': 'test-processor',
})
# check the response
self.assertEqual(actual_payment_data, expected_payment_data)
class TestCreateOrderView(ModuleStoreTestCase):
"""
Tests for the create_order view of verified course enrollment process.
"""
def setUp(self):
super(TestCreateOrderView, self).setUp()
self.user = UserFactory.create(username="rusty", password="test")
self.client.login(username="rusty", password="test")
self.course_id = 'Robot/999/Test_Course'
self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course')
verified_mode = CourseMode(
course_id=SlashSeparatedCourseKey("Robot", "999", 'Test_Course'),
mode_slug="verified",
mode_display_name="Verified Certificate",
min_price=50
)
verified_mode.save()
course_mode_post_data = {
'certificate_mode': 'Select Certificate',
'contribution': 50,
'contribution-other-amt': '',
'explain': ''
}
self.client.post(
reverse("course_modes_choose", kwargs={'course_id': self.course_id}),
course_mode_post_data
)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_invalid_amount(self):
response = self._create_order('1.a', self.course_id, expect_status_code=400)
self.assertIn('Selected price is not valid number.', response.content)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_invalid_mode(self):
# Create a course that does not have a verified mode
course_id = 'Fake/999/Test_Course'
CourseFactory.create(org='Fake', number='999', display_name='Test Course')
response = self._create_order('50', course_id, expect_status_code=400)
self.assertIn('This course doesn\'t support paid certificates', response.content)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_create_order_fail_with_get(self):
create_order_post_data = {
'contribution': 50,
'course_id': self.course_id,
}
# Use the wrong HTTP method
response = self.client.get(reverse('verify_student_create_order'), create_order_post_data)
self.assertEqual(response.status_code, 405)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_create_order_success(self):
response = self._create_order(50, self.course_id)
json_response = json.loads(response.content)
self.assertIsNotNone(json_response['payment_form_data'].get('orderNumber')) # TODO not canonical
# Verify that the order exists and is configured correctly
order = Order.objects.get(user=self.user)
self.assertEqual(order.status, 'paying')
item = CertificateItem.objects.get(order=order)
self.assertEqual(item.status, 'paying')
self.assertEqual(item.course_id, self.course.id)
self.assertEqual(item.mode, 'verified')
def _create_order(self, contribution, course_id, expect_success=True, expect_status_code=200):
"""Create a new order.
Arguments:
contribution (int): The contribution amount.
course_id (CourseKey): The course to purchase.
Keyword Arguments:
expect_success (bool): If True, verify that the response was successful.
expect_status_code (int): The expected HTTP status code
Returns:
HttpResponse
"""
url = reverse('verify_student_create_order')
data = {
'contribution': contribution,
'course_id': course_id,
'processor': None,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, expect_status_code)
if expect_status_code == 200:
json_response = json.loads(response.content)
if expect_success:
self.assertEqual(set(json_response.keys()), PAYMENT_DATA_KEYS)
else:
self.assertFalse(json_response['success'])
return response
@ddt.ddt
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
class TestSubmitPhotosForVerification(TestCase):
"""
Tests for submitting photos for verification.
"""
USERNAME = "test_user"
PASSWORD = "test_password"
IMAGE_DATA = "abcd,1234"
FULL_NAME = u"Ḟüḷḷ Ṅäṁë"
def setUp(self):
super(TestSubmitPhotosForVerification, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
def test_submit_photos(self):
# Submit the photos
self._submit_photos(
face_image=self.IMAGE_DATA,
photo_id_image=self.IMAGE_DATA
)
# Verify that the attempt is created in the database
attempt = SoftwareSecurePhotoVerification.objects.get(user=self.user)
self.assertEqual(attempt.status, "submitted")
# Verify that the user's name wasn't changed
self._assert_user_name(self.user.profile.name)
def test_submit_photos_and_change_name(self):
# Submit the photos, along with a name change
self._submit_photos(
face_image=self.IMAGE_DATA,
photo_id_image=self.IMAGE_DATA,
full_name=self.FULL_NAME
)
# Check that the user's name was changed in the database
self._assert_user_name(self.FULL_NAME)
@ddt.data('face_image', 'photo_id_image')
def test_invalid_image_data(self, invalid_param):
params = {
'face_image': self.IMAGE_DATA,
'photo_id_image': self.IMAGE_DATA
}
params[invalid_param] = ""
response = self._submit_photos(expected_status_code=400, **params)
self.assertEqual(response.content, "Image data is not valid.")
def test_invalid_name(self):
response = self._submit_photos(
face_image=self.IMAGE_DATA,
photo_id_image=self.IMAGE_DATA,
full_name="a",
expected_status_code=400
)
self.assertEqual(response.content, "Name must be at least 2 characters long.")
@ddt.data('face_image', 'photo_id_image')
def test_missing_required_params(self, missing_param):
params = {
'face_image': self.IMAGE_DATA,
'photo_id_image': self.IMAGE_DATA
}
del params[missing_param]
response = self._submit_photos(expected_status_code=400, **params)
self.assertEqual(
response.content,
"Missing required parameters: {missing}".format(missing=missing_param)
)
def _submit_photos(self, face_image=None, photo_id_image=None, full_name=None, expected_status_code=200):
"""Submit photos for verification.
Keyword Arguments:
face_image (str): The base-64 encoded face image data.
photo_id_image (str): The base-64 encoded ID image data.
full_name (unicode): The full name of the user, if the user is changing it.
expected_status_code (int): The expected response status code.
Returns:
HttpResponse
"""
url = reverse("verify_student_submit_photos")
params = {}
if face_image is not None:
params['face_image'] = face_image
if photo_id_image is not None:
params['photo_id_image'] = photo_id_image
if full_name is not None:
params['full_name'] = full_name
response = self.client.post(url, params)
self.assertEqual(response.status_code, expected_status_code)
if expected_status_code == 200:
# Verify that photo submission confirmation email was sent
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("Verification photos received", mail.outbox[0].subject)
else:
# Verify that photo submission confirmation email was not sent
self.assertEqual(len(mail.outbox), 0)
return response
def _assert_user_name(self, full_name):
"""Check the user's name.
Arguments:
full_name (unicode): The user's full name.
Raises:
AssertionError
"""
account_settings = get_account_settings(self.user)
self.assertEqual(account_settings['name'], full_name)
class TestPhotoVerificationResultsCallback(ModuleStoreTestCase):
"""
Tests for the results_callback view.
"""
def setUp(self):
super(TestPhotoVerificationResultsCallback, self).setUp()
self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course')
self.course_id = self.course.id
self.user = UserFactory.create()
self.attempt = SoftwareSecurePhotoVerification(
status="submitted",
user=self.user
)
self.attempt.save()
self.receipt_id = self.attempt.receipt_id
self.client = Client()
def mocked_has_valid_signature(method, headers_dict, body_dict, access_key, secret_key): # pylint: disable=no-self-argument, unused-argument
"""
Used as a side effect when mocking `verify_student.ssencrypt.has_valid_signature`.
"""
return True
def test_invalid_json(self):
"""
Test for invalid json being posted by software secure.
"""
data = {"Testing invalid"}
response = self.client.post(
reverse('verify_student_results_callback'),
data=data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB: testing',
HTTP_DATE='testdate'
)
self.assertIn('Invalid JSON', response.content)
self.assertEqual(response.status_code, 400)
def test_invalid_dict(self):
"""
Test for invalid dictionary being posted by software secure.
"""
data = '"\\"Test\\tTesting"'
response = self.client.post(
reverse('verify_student_results_callback'),
data=data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
self.assertIn('JSON should be dict', response.content)
self.assertEqual(response.status_code, 400)
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_invalid_access_key(self):
"""
Test for invalid access key.
"""
data = {
"EdX-ID": self.receipt_id,
"Result": "Testing",
"Reason": "Testing",
"MessageType": "Testing"
}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'),
data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test testing:testing',
HTTP_DATE='testdate'
)
self.assertIn('Access key invalid', response.content)
self.assertEqual(response.status_code, 400)
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_wrong_edx_id(self):
"""
Test for wrong id of Software secure verification attempt.
"""
data = {
"EdX-ID": "Invalid-Id",
"Result": "Testing",
"Reason": "Testing",
"MessageType": "Testing"
}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'),
data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
self.assertIn('edX ID Invalid-Id not found', response.content)
self.assertEqual(response.status_code, 400)
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_pass_result(self):
"""
Test for verification passed.
"""
data = {
"EdX-ID": self.receipt_id,
"Result": "PASS",
"Reason": "",
"MessageType": "You have been verified."
}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'), data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=self.receipt_id)
self.assertEqual(attempt.status, u'approved')
self.assertEquals(response.content, 'OK!')
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_fail_result(self):
"""
Test for failed verification.
"""
data = {
"EdX-ID": self.receipt_id,
"Result": 'FAIL',
"Reason": 'Invalid photo',
"MessageType": 'Your photo doesn\'t meet standards.'
}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'),
data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=self.receipt_id)
self.assertEqual(attempt.status, u'denied')
self.assertEqual(attempt.error_code, u'Your photo doesn\'t meet standards.')
self.assertEqual(attempt.error_msg, u'"Invalid photo"')
self.assertEquals(response.content, 'OK!')
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_system_fail_result(self):
"""
Test for software secure result system failure.
"""
data = {"EdX-ID": self.receipt_id,
"Result": 'SYSTEM FAIL',
"Reason": 'Memory overflow',
"MessageType": 'You must retry the verification.'}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'),
data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=self.receipt_id)
self.assertEqual(attempt.status, u'must_retry')
self.assertEqual(attempt.error_code, u'You must retry the verification.')
self.assertEqual(attempt.error_msg, u'"Memory overflow"')
self.assertEquals(response.content, 'OK!')
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_unknown_result(self):
"""
test for unknown software secure result
"""
data = {
"EdX-ID": self.receipt_id,
"Result": 'Unknown',
"Reason": 'Unknown reason',
"MessageType": 'Unknown message'
}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'),
data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
self.assertIn('Result Unknown not understood', response.content)
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_in_course_reverify_disabled(self):
"""
Test for verification passed.
"""
data = {
"EdX-ID": self.receipt_id,
"Result": "PASS",
"Reason": "",
"MessageType": "You have been verified."
}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'), data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=self.receipt_id)
self.assertEqual(attempt.status, u'approved')
self.assertEquals(response.content, 'OK!')
# Verify that photo submission confirmation email was sent
self.assertEqual(len(mail.outbox), 0)
user_status = VerificationStatus.objects.filter(user=self.user).count()
self.assertEqual(user_status, 0)
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_pass_in_course_reverify_result(self):
"""
Test for verification passed.
"""
self.create_reverification_xblock()
incourse_reverify_enabled = InCourseReverificationConfiguration.current()
incourse_reverify_enabled.enabled = True
incourse_reverify_enabled.save()
data = {
"EdX-ID": self.receipt_id,
"Result": "PASS",
"Reason": "",
"MessageType": "You have been verified."
}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'), data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=self.receipt_id)
self.assertEqual(attempt.status, u'approved')
self.assertEquals(response.content, 'OK!')
# Verify that photo re-verification status email was sent
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("Re-verification Status", mail.outbox[0].subject)
@mock.patch('verify_student.views._send_email')
@mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature))
def test_reverification_on_callback(self, mock_send_email):
"""
Test software secure callback flow for re-verification.
"""
# Create the 'edx-reverification-block' in course tree
self.create_reverification_xblock()
# create dummy data for software secure photo verification result callback
data = {
"EdX-ID": self.receipt_id,
"Result": "PASS",
"Reason": "",
"MessageType": "You have been verified."
}
json_data = json.dumps(data)
response = self.client.post(
reverse('verify_student_results_callback'),
data=json_data,
content_type='application/json',
HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',
HTTP_DATE='testdate'
)
self.assertEqual(response.content, 'OK!')
# now check that '_send_email' method is called on result callback
# with required parameters
subject = "Re-verification Status"
mock_send_email.assert_called_once_with(self.user.id, subject, ANY)
def create_reverification_xblock(self):
"""
Create the reverification XBlock.
"""
# Create the 'edx-reverification-block' in course tree
section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
subsection = ItemFactory.create(parent=section, category='sequential', display_name='Test Subsection')
vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='Test Unit')
reverification = ItemFactory.create(
parent=vertical,
category='edx-reverification-block',
display_name='Test Verification Block'
)
# Create checkpoint
checkpoint = VerificationCheckpoint(course_id=self.course_id, checkpoint_location=reverification.location)
checkpoint.save()
# Add a re-verification attempt
checkpoint.add_verification_attempt(self.attempt)
# Add a re-verification attempt status for the user
VerificationStatus.add_verification_status(checkpoint, self.user, "submitted")
class TestReverifyView(ModuleStoreTestCase):
"""
Tests for the reverification views.
"""
def setUp(self):
super(TestReverifyView, self).setUp()
self.user = UserFactory.create(username="rusty", password="test")
self.user.profile.name = u"Røøsty Bøøgins"
self.user.profile.save()
self.client.login(username="rusty", password="test")
self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
self.course_key = self.course.id
@patch('verify_student.views.render_to_response', render_mock)
def test_reverify_get(self):
url = reverse('verify_student_reverify')
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
((_template, context), _kwargs) = render_mock.call_args # pylint: disable=unpacking-non-sequence
self.assertFalse(context['error'])
@patch('verify_student.views.render_to_response', render_mock)
def test_reverify_post_failure(self):
url = reverse('verify_student_reverify')
response = self.client.post(url, {'face_image': '',
'photo_id_image': ''})
self.assertEquals(response.status_code, 200)
((template, context), _kwargs) = render_mock.call_args # pylint: disable=unpacking-non-sequence
self.assertIn('photo_reverification', template)
self.assertTrue(context['error'])
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_reverify_post_success(self):
url = reverse('verify_student_reverify')
response = self.client.post(url, {'face_image': ',',
'photo_id_image': ','})
self.assertEquals(response.status_code, 302)
try:
verification_attempt = SoftwareSecurePhotoVerification.objects.get(user=self.user)
self.assertIsNotNone(verification_attempt)
except ObjectDoesNotExist:
self.fail('No verification object generated')
((template, context), _kwargs) = render_mock.call_args # pylint: disable=unpacking-non-sequence
self.assertIn('photo_reverification', template)
self.assertTrue(context['error'])
class TestInCourseReverifyView(ModuleStoreTestCase):
"""
Tests for the incourse reverification views.
"""
IMAGE_DATA = "abcd,1234"
def build_course(self):
"""
Build up a course tree with a Reverificaiton xBlock.
"""
# pylint: disable=attribute-defined-outside-init
self.course_key = SlashSeparatedCourseKey("Robot", "999", "Test_Course")
self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course')
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
min_price = 0 if mode in ["honor", "audit"] else 1
CourseModeFactory(mode_slug=mode, course_id=self.course_key, min_price=min_price)
# Create the 'edx-reverification-block' in course tree
section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
subsection = ItemFactory.create(parent=section, category='sequential', display_name='Test Subsection')
vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='Test Unit')
self.reverification = ItemFactory.create(
parent=vertical,
category='edx-reverification-block',
display_name='Test Verification Block'
)
self.section_location = section.location
self.subsection_location = subsection.location
self.vertical_location = vertical.location
self.reverification_location = unicode(self.reverification.location)
self.reverification_assessment = self.reverification.related_assessment
def setUp(self):
super(TestInCourseReverifyView, self).setUp()
self.build_course()
self.user = UserFactory.create(username="rusty", password="test")
self.client.login(username="rusty", password="test")
# Enroll the user in the default mode (honor) to emulate
CourseEnrollment.enroll(self.user, self.course_key, mode="verified")
self.config = InCourseReverificationConfiguration(enabled=True)
self.config.save()
# mocking and patching for bi events
analytics_patcher = patch('verify_student.views.analytics')
self.mock_tracker = analytics_patcher.start()
self.addCleanup(analytics_patcher.stop)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_incourse_reverify_feature_flag_get(self):
self.config.enabled = False
self.config.save()
response = self.client.get(self._get_url(self.course_key, self.reverification_location))
self.assertEquals(response.status_code, 404)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_incourse_reverify_invalid_course_get(self):
response = self.client.get(self._get_url("invalid/course/key", self.reverification_location))
self.assertEquals(response.status_code, 404)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_incourse_reverify_invalid_checkpoint_get(self):
response = self.client.get(self._get_url(self.course_key, "invalid_checkpoint"))
self.assertEquals(response.status_code, 404)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_incourse_reverify_initial_redirect_get(self):
self._create_checkpoint()
response = self.client.get(self._get_url(self.course_key, self.reverification_location))
url = reverse('verify_student_verify_now', kwargs={"course_id": unicode(self.course_key)})
self.assertRedirects(response, url)
@override_settings(SEGMENT_IO_LMS_KEY="foobar")
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True, 'SEGMENT_IO_LMS': True})
def test_incourse_reverify_get(self):
"""
Test incourse reverification.
"""
self._create_checkpoint()
self._create_initial_verification()
response = self.client.get(self._get_url(self.course_key, self.reverification_location))
self.assertEquals(response.status_code, 200)
# verify that Google Analytics event fires after successfully
# submitting the photo verification
self.mock_tracker.track.assert_called_once_with( # pylint: disable=no-member
self.user.id, # pylint: disable=no-member
EVENT_NAME_USER_ENTERED_INCOURSE_REVERIFY_VIEW,
{
'category': "verification",
'label': unicode(self.course_key),
'checkpoint': self.reverification_assessment
},
context={
'Google Analytics':
{'clientId': None}
}
)
self.mock_tracker.reset_mock()
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_checkpoint_post(self):
"""Verify that POST requests including an invalid checkpoint location
results in a 400 response.
"""
response = self.client.post(self._get_url(self.course_key, self.reverification_location))
self.assertEquals(response.status_code, 400)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_incourse_reverify_initial_redirect_post(self):
self._create_checkpoint()
response = self.client.post(self._get_url(self.course_key, self.reverification_location))
url = reverse('verify_student_verify_now', kwargs={"course_id": unicode(self.course_key)})
self.assertRedirects(response, url)
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_incourse_reverify_index_error_post(self):
self._create_checkpoint()
self._create_initial_verification()
response = self.client.post(self._get_url(self.course_key, self.reverification_location), {"face_image": ""})
self.assertEqual(response.status_code, 400)
@override_settings(SEGMENT_IO_LMS_KEY="foobar")
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True, 'SEGMENT_IO_LMS': True})
def test_incourse_reverify_post(self):
self._create_checkpoint()
self._create_initial_verification()
response = self.client.post(
self._get_url(self.course_key, self.reverification_location),
{"face_image": self.IMAGE_DATA}
)
self.assertEqual(response.status_code, 200)
# test that Google Analytics event firs after successfully submitting
# photo verification
self.mock_tracker.track.assert_called_once_with( # pylint: disable=no-member
self.user.id,
EVENT_NAME_USER_SUBMITTED_INCOURSE_REVERIFY,
{
'category': "verification",
'label': unicode(self.course_key),
'checkpoint': self.reverification_assessment
},
context={
'Google Analytics':
{'clientId': None}
}
)
self.mock_tracker.reset_mock()
@patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True})
def test_incourse_reverify_feature_flag_post(self):
self.config.enabled = False
self.config.save()
response = self.client.post(self._get_url(self.course_key, self.reverification_location))
self.assertEquals(response.status_code, 404)
def _create_checkpoint(self):
"""
Helper method for creating a reverification checkpoint.
"""
checkpoint = VerificationCheckpoint(course_id=self.course_key, checkpoint_location=self.reverification_location)
checkpoint.save()
def _create_initial_verification(self):
"""
Helper method for initial verification.
"""
attempt = SoftwareSecurePhotoVerification(user=self.user)
attempt.mark_ready()
attempt.save()
attempt.submit()
def _get_url(self, course_key, checkpoint_location):
"""
Construct the reverification url.
Arguments:
course_key (unicode): The ID of the course
checkpoint_location (str): Location of verification checkpoint
Returns:
url
"""
return reverse(
'verify_student_incourse_reverify',
kwargs={
"course_id": unicode(course_key),
"usage_id": checkpoint_location
}
)
class TestEmailMessageWithCustomICRVBlock(ModuleStoreTestCase):
"""
Test email sending on re-verification
"""
def build_course(self):
"""
Build up a course tree with a Reverificaiton xBlock.
"""
self.course_key = SlashSeparatedCourseKey("Robot", "999", "Test_Course")
self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course')
self.due_date = datetime(2015, 6, 22, tzinfo=pytz.UTC)
self.allowed_attempts = 1
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
min_price = 0 if mode in ["honor", "audit"] else 1
CourseModeFactory(mode_slug=mode, course_id=self.course_key, min_price=min_price)
# Create the 'edx-reverification-block' in course tree
section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
subsection = ItemFactory.create(parent=section, category='sequential', display_name='Test Subsection')
vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='Test Unit')
self.reverification = ItemFactory.create(
parent=vertical,
category='edx-reverification-block',
display_name='Test Verification Block',
metadata={'attempts': self.allowed_attempts, 'due': self.due_date}
)
self.section_location = section.location
self.subsection_location = subsection.location
self.vertical_location = vertical.location
self.reverification_location = unicode(self.reverification.location)
self.assessment = self.reverification.related_assessment
self.re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(self.course_key),
self.reverification_location
)
)
def setUp(self):
"""
Setup method for testing photo verification email messages.
"""
super(TestEmailMessageWithCustomICRVBlock, self).setUp()
self.build_course()
self.check_point = VerificationCheckpoint.objects.create(
course_id=self.course.id, checkpoint_location=self.reverification_location
)
self.check_point.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
VerificationStatus.add_verification_status(
checkpoint=self.check_point,
user=self.user,
status='submitted'
)
self.attempt = SoftwareSecurePhotoVerification.objects.filter(user=self.user)
location_id = VerificationStatus.get_location_id(self.attempt)
usage_key = UsageKey.from_string(location_id)
redirect_url = get_redirect_url(self.course_key, usage_key.replace(course_key=self.course_key))
self.request = RequestFactory().get('/url')
self.course_link = self.request.build_absolute_uri(redirect_url)
def test_approved_email_message(self):
subject, body = _compose_message_reverification_email(
self.course.id, self.user.id, self.reverification_location, "approved", self.request
)
self.assertIn(
"We have successfully verified your identity for the {assessment} "
"assessment in the {course_name} course.".format(
assessment=self.assessment,
course_name=self.course.display_name_with_default
),
body
)
self.check_courseware_link_exists(body)
self.assertIn("Re-verification Status", subject)
def test_denied_email_message_with_valid_due_date_and_attempts_allowed(self):
subject, body = _compose_message_reverification_email(
self.course.id, self.user.id, self.reverification_location, "denied", self.request
)
self.assertIn(
"We could not verify your identity for the {assessment} assessment "
"in the {course_name} course. You have used "
"{used_attempts} out of {allowed_attempts} attempts to "
"verify your identity.".format(
course_name=self.course.display_name_with_default,
assessment=self.assessment,
used_attempts=1,
allowed_attempts=self.allowed_attempts + 1
),
body
)
self.assertIn(
"You must verify your identity before the assessment "
"closes on {due_date}".format(
due_date=get_default_time_display(self.due_date)
),
body
)
reverify_link = self.request.build_absolute_uri(self.re_verification_link)
self.assertIn(
"To try to verify your identity again, select the following link:",
body
)
self.assertIn(reverify_link, body)
self.assertIn("Re-verification Status", subject)
def test_denied_email_message_with_due_date_and_no_attempts(self):
""" Denied email message if due date is still open but user has no
attempts available.
"""
VerificationStatus.add_verification_status(
checkpoint=self.check_point,
user=self.user,
status='submitted'
)
__, body = _compose_message_reverification_email(
self.course.id, self.user.id, self.reverification_location, "denied", self.request
)
self.assertIn(
"We could not verify your identity for the {assessment} assessment "
"in the {course_name} course. You have used "
"{used_attempts} out of {allowed_attempts} attempts to "
"verify your identity, and verification is no longer "
"possible".format(
course_name=self.course.display_name_with_default,
assessment=self.assessment,
used_attempts=2,
allowed_attempts=self.allowed_attempts + 1
),
body
)
self.check_courseware_link_exists(body)
def test_denied_email_message_with_close_verification_dates(self):
# Due date given and expired
return_value = datetime(2016, 1, 1, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=return_value):
__, body = _compose_message_reverification_email(
self.course.id, self.user.id, self.reverification_location, "denied", self.request
)
self.assertIn(
"We could not verify your identity for the {assessment} assessment "
"in the {course_name} course. You have used "
"{used_attempts} out of {allowed_attempts} attempts to "
"verify your identity, and verification is no longer "
"possible".format(
course_name=self.course.display_name_with_default,
assessment=self.assessment,
used_attempts=1,
allowed_attempts=self.allowed_attempts + 1
),
body
)
def test_check_num_queries(self):
# Get the re-verification block to check the call made
with check_mongo_calls(1):
ver_block = modulestore().get_item(self.reverification.location)
# Expect that the verification block is fetched
self.assertIsNotNone(ver_block)
def check_courseware_link_exists(self, body):
"""Checking courseware url and signature information of EDX"""
self.assertIn(
"To go to the courseware, select the following link:",
body
)
self.assertIn(
"{course_link}".format(
course_link=self.course_link
),
body
)
self.assertIn("Thanks,", body)
self.assertIn(
"The {platform_name} team".format(
platform_name=settings.PLATFORM_NAME
),
body
)
class TestEmailMessageWithDefaultICRVBlock(ModuleStoreTestCase):
"""
Test for In-course Re-verification
"""
def build_course(self):
"""
Build up a course tree with a Reverificaiton xBlock.
"""
self.course_key = SlashSeparatedCourseKey("Robot", "999", "Test_Course")
self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course')
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
min_price = 0 if mode in ["honor", "audit"] else 1
CourseModeFactory(mode_slug=mode, course_id=self.course_key, min_price=min_price)
# Create the 'edx-reverification-block' in course tree
section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
subsection = ItemFactory.create(parent=section, category='sequential', display_name='Test Subsection')
vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='Test Unit')
self.reverification = ItemFactory.create(
parent=vertical,
category='edx-reverification-block',
display_name='Test Verification Block'
)
self.section_location = section.location
self.subsection_location = subsection.location
self.vertical_location = vertical.location
self.reverification_location = unicode(self.reverification.location)
self.assessment = self.reverification.related_assessment
self.re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(self.course_key),
self.reverification_location
)
)
def setUp(self):
super(TestEmailMessageWithDefaultICRVBlock, self).setUp()
self.build_course()
self.check_point = VerificationCheckpoint.objects.create(
course_id=self.course.id, checkpoint_location=self.reverification_location
)
self.check_point.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
self.attempt = SoftwareSecurePhotoVerification.objects.filter(user=self.user)
self.request = RequestFactory().get('/url')
def test_denied_email_message_with_no_attempt_allowed(self):
VerificationStatus.add_verification_status(
checkpoint=self.check_point,
user=self.user,
status='submitted'
)
__, body = _compose_message_reverification_email(
self.course.id, self.user.id, self.reverification_location, "denied", self.request
)
self.assertIn(
"We could not verify your identity for the {assessment} assessment "
"in the {course_name} course. You have used "
"{used_attempts} out of {allowed_attempts} attempts to "
"verify your identity, and verification is no longer "
"possible".format(
course_name=self.course.display_name_with_default,
assessment=self.assessment,
used_attempts=1,
allowed_attempts=1
),
body
)
def test_error_on_compose_email(self):
resp = _compose_message_reverification_email(
self.course.id, self.user.id, self.reverification_location, "denied", True
)
self.assertIsNone(resp)
| agpl-3.0 |
ashray/VTK-EVM | Filters/Core/Testing/Python/TestSynchronizedTemplates2D.py | 20 | 1438 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
iso = vtk.vtkContourFilter()
iso.SetInputConnection(reader.GetOutputPort())
iso.GenerateValues(12,500,1150)
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInputConnection(iso.GetOutputPort())
isoMapper.ScalarVisibilityOff()
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetColor(0,0,0)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineProp = outlineActor.GetProperty()
#eval $outlineProp SetColor 0 0 0
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(outlineActor)
ren1.AddActor(isoActor)
ren1.SetBackground(0.8,0.8,1)
renWin.SetSize(400,400)
ren1.ResetCamera()
ren1.GetActiveCamera().Zoom(1.3)
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause |
danakj/chromium | third_party/pycoverage/coverage/files.py | 209 | 10724 | """File wrangling."""
from coverage.backward import to_string
from coverage.misc import CoverageException
import fnmatch, os, os.path, re, sys
import ntpath, posixpath
class FileLocator(object):
"""Understand how filenames work."""
def __init__(self):
# The absolute path to our current directory.
self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep)
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
self.canonical_filename_cache = {}
def relative_filename(self, filename):
"""Return the relative form of `filename`.
The filename will be relative to the current directory when the
`FileLocator` was constructed.
"""
fnorm = os.path.normcase(filename)
if fnorm.startswith(self.relative_dir):
filename = filename[len(self.relative_dir):]
return filename
def canonical_filename(self, filename):
"""Return a canonical filename for `filename`.
An absolute path with no redundant components and normalized case.
"""
if filename not in self.canonical_filename_cache:
if not os.path.isabs(filename):
for path in [os.curdir] + sys.path:
if path is None:
continue
f = os.path.join(path, filename)
if os.path.exists(f):
filename = f
break
cf = abs_file(filename)
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
def get_zip_data(self, filename):
"""Get data from `filename` if it is a zip file path.
Returns the string data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
import zipimport
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return to_string(data)
return None
if sys.platform == 'win32':
def actual_path(path):
"""Get the actual path of `path`, including the correct case."""
if path in actual_path.cache:
return actual_path.cache[path]
head, tail = os.path.split(path)
if not tail:
actpath = head
elif not head:
actpath = tail
else:
head = actual_path(head)
if head in actual_path.list_cache:
files = actual_path.list_cache[head]
else:
try:
files = os.listdir(head)
except OSError:
files = []
actual_path.list_cache[head] = files
normtail = os.path.normcase(tail)
for f in files:
if os.path.normcase(f) == normtail:
tail = f
break
actpath = os.path.join(head, tail)
actual_path.cache[path] = actpath
return actpath
actual_path.cache = {}
actual_path.list_cache = {}
else:
def actual_path(filename):
"""The actual path for non-Windows platforms."""
return filename
def abs_file(filename):
"""Return the absolute normalized form of `filename`."""
path = os.path.expandvars(os.path.expanduser(filename))
path = os.path.abspath(os.path.realpath(path))
path = actual_path(path)
return path
def isabs_anywhere(filename):
"""Is `filename` an absolute path on any OS?"""
return ntpath.isabs(filename) or posixpath.isabs(filename)
def prep_patterns(patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
prepped = []
for p in patterns or []:
if p.startswith("*") or p.startswith("?"):
prepped.append(p)
else:
prepped.append(abs_file(p))
return prepped
class TreeMatcher(object):
"""A matcher for files in a tree."""
def __init__(self, directories):
self.dirs = directories[:]
def __repr__(self):
return "<TreeMatcher %r>" % self.dirs
def info(self):
"""A list of strings for displaying when dumping state."""
return self.dirs
def add(self, directory):
"""Add another directory to the list we match for."""
self.dirs.append(directory)
def match(self, fpath):
"""Does `fpath` indicate a file in one of our trees?"""
for d in self.dirs:
if fpath.startswith(d):
if fpath == d:
# This is the same file!
return True
if fpath[len(d)] == os.sep:
# This is a file in the directory
return True
return False
class FnmatchMatcher(object):
"""A matcher for files by filename pattern."""
def __init__(self, pats):
self.pats = pats[:]
def __repr__(self):
return "<FnmatchMatcher %r>" % self.pats
def info(self):
"""A list of strings for displaying when dumping state."""
return self.pats
def match(self, fpath):
"""Does `fpath` match one of our filename patterns?"""
for pat in self.pats:
if fnmatch.fnmatch(fpath, pat):
return True
return False
def sep(s):
"""Find the path separator used in this string, or os.sep if none."""
sep_match = re.search(r"[\\/]", s)
if sep_match:
the_sep = sep_match.group(0)
else:
the_sep = os.sep
return the_sep
class PathAliases(object):
"""A collection of aliases for paths.
When combining data files from remote machines, often the paths to source
code are different, for example, due to OS differences, or because of
serialized checkouts on continuous integration machines.
A `PathAliases` object tracks a list of pattern/result pairs, and can
map a path through those aliases to produce a unified path.
`locator` is a FileLocator that is used to canonicalize the results.
"""
def __init__(self, locator=None):
self.aliases = []
self.locator = locator
def add(self, pattern, result):
"""Add the `pattern`/`result` pair to the list of aliases.
`pattern` is an `fnmatch`-style pattern. `result` is a simple
string. When mapping paths, if a path starts with a match against
`pattern`, then that match is replaced with `result`. This models
isomorphic source trees being rooted at different places on two
different machines.
`pattern` can't end with a wildcard component, since that would
match an entire tree, and not just its root.
"""
# The pattern can't end with a wildcard component.
pattern = pattern.rstrip(r"\/")
if pattern.endswith("*"):
raise CoverageException("Pattern must not end with wildcards.")
pattern_sep = sep(pattern)
# The pattern is meant to match a filepath. Let's make it absolute
# unless it already is, or is meant to match any prefix.
if not pattern.startswith('*') and not isabs_anywhere(pattern):
pattern = abs_file(pattern)
pattern += pattern_sep
# Make a regex from the pattern. fnmatch always adds a \Z or $ to
# match the whole string, which we don't want.
regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
if regex_pat.endswith("$"):
regex_pat = regex_pat[:-1]
# We want */a/b.py to match on Windows too, so change slash to match
# either separator.
regex_pat = regex_pat.replace(r"\/", r"[\\/]")
# We want case-insensitive matching, so add that flag.
regex = re.compile(r"(?i)" + regex_pat)
# Normalize the result: it must end with a path separator.
result_sep = sep(result)
result = result.rstrip(r"\/") + result_sep
self.aliases.append((regex, result, pattern_sep, result_sep))
def map(self, path):
"""Map `path` through the aliases.
`path` is checked against all of the patterns. The first pattern to
match is used to replace the root of the path with the result root.
Only one pattern is ever used. If no patterns match, `path` is
returned unchanged.
The separator style in the result is made to match that of the result
in the alias.
"""
for regex, result, pattern_sep, result_sep in self.aliases:
m = regex.match(path)
if m:
new = path.replace(m.group(0), result)
if pattern_sep != result_sep:
new = new.replace(pattern_sep, result_sep)
if self.locator:
new = self.locator.canonical_filename(new)
return new
return path
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but subdirectories are checked for a __init__.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
# We're only interested in files that look like reasonable Python
# files: Must end with .py or .pyw, and must not have certain funny
# characters that probably mean they are editor junk.
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename)
| bsd-3-clause |
mozman/ezdxf | tests/test_05_tools/test_510_byte_stream.py | 1 | 1134 | # Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
import struct
from ezdxf.tools.binarydata import ByteStream
def test_init():
bs = ByteStream(b'ABCDABC\x00')
assert bs.index == 0
assert len(bs.buffer) == 8
def test_read_ps():
bs = ByteStream(b'ABCDABC\x00')
s = bs.read_padded_string()
assert s == 'ABCDABC'
assert bs.index == 8
assert bs.has_data is False
def test_read_ps_align():
bs = ByteStream(b'ABCD\x00')
s = bs.read_padded_string()
assert s == 'ABCD'
assert bs.index == 8
assert bs.has_data is False
def test_read_pus():
bs = ByteStream(b'A\x00B\x00C\x00D\x00\x00\x00')
s = bs.read_padded_unicode_string()
assert s == 'ABCD'
assert bs.index == 12
assert bs.has_data is False
def test_read_doubles():
data = struct.pack('3d', 1.0, 2.0, 3.0)
bs = ByteStream(data)
x = bs.read_struct('d')[0]
y = bs.read_struct('d')[0]
z = bs.read_struct('d')[0]
assert (x, y, z) == (1.0, 2.0, 3.0)
assert bs.index == 24
assert bs.has_data is False
if __name__ == '__main__':
pytest.main([__file__])
| mit |
zvolsky/praha | languages/default.py | 180 | 4428 | # coding: utf8
{
'!langcode!': 'en-us',
'!langname!': 'English (US)',
'%s %%(shop)': '%s %%(shop)',
'%s %%(shop[0])': '%s %%(shop[0])',
'%s %%{quark[0]}': '%s %%{quark[0]}',
'%s %%{shop[0]}': '%s %%{shop[0]}',
'%s %%{shop}': '%s %%{shop}',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'@markmin\x01**Hello World**': '**Hello World**',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Ajax Recipes': 'Ajax Recipes',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Buy this book': 'Buy this book',
'Cannot be empty': 'Cannot be empty',
'Check to delete': 'Check to delete',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Created By': 'Created By',
'Created On': 'Created On',
'customize me!': 'customize me!',
'Database': 'Database',
'DB Model': 'DB Model',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'Download': 'Download',
'E-mail': 'E-mail',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'enter date and time as %(format)s': 'enter date and time as %(format)s',
'Errors': 'Errors',
'FAQ': 'FAQ',
'First name': 'First name',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'Group ID',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Groups': 'Groups',
'Hello World': 'Hello World',
'Hello World ## comment': 'Hello World ',
'Hello World## comment': 'Hello World',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Is Active': 'Is Active',
'Last name': 'Last name',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Logged in',
'Logged out': 'Logged out',
'Login': 'Login',
'Logout': 'Logout',
'Lost Password': 'Lost Password',
'Lost password?': 'Lost password?',
'Menu Model': 'Menu Model',
'Modified By': 'Modified By',
'Modified On': 'Modified On',
'My Sites': 'My Sites',
'Name': 'Name',
'Object or table name': 'Object or table name',
'Online examples': 'Online examples',
'Origin': 'Origin',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'please input your password again': 'please input your password again',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'Profile': 'Profile',
'Python': 'Python',
'Quick Examples': 'Quick Examples',
'Recipes': 'Recipes',
'Record ID': 'Record ID',
'Register': 'Register',
'Registration identifier': 'Registration identifier',
'Registration key': 'Registration key',
'Registration successful': 'Registration successful',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Semantic': 'Semantic',
'Services': 'Services',
'Stylesheet': 'Stylesheet',
'Support': 'Support',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'User ID',
'value already in database or empty': 'value already in database or empty',
'Verify Password': 'Verify Password',
'Videos': 'Videos',
'View': 'View',
'Welcome': 'Welcome',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| agpl-3.0 |
abyssxsy/gnuradio | gr-digital/python/digital/qa_correlate_and_sync.py | 54 | 3786 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
import pmt
from gnuradio import gr, gr_unittest, digital, blocks, filter
def make_parabolic_pulse_shape(sps, N=1, scale=1):
# Make L taps on each side.
L = int(float(N)/2 * sps)
n_taps = 2 * L + 1
taps = []
for tap_i in xrange(n_taps):
x = float(tap_i - L)/sps/N
if abs(x) > 1:
taps.append(0)
else:
taps.append(1-x*x)
return [tap*scale for tap in taps]
class test_correlate_and_sync(gr_unittest.TestCase):
def test_001(self):
# We're using a really simple preamble so that the correlation
# is straight forward.
preamble = [0, 0, 0, 1, 0, 0, 0]
# Our pulse shape has this width (in units of symbols).
pulse_width = 1.5
# The number of filters to use for resampling.
n_filters = 12
sps = 3
data = [0]*10 + preamble + [0]*40
src = blocks.vector_source_c(data)
# We want to generate taps with a sampling rate of sps=n_filters for resampling
# purposes.
pulse_shape = make_parabolic_pulse_shape(sps=n_filters, N=0.5, scale=35)
# Create our resampling filter to generate the data for the correlator.
shape = filter.pfb_arb_resampler_ccf(sps, pulse_shape, n_filters)
# Generate the correlator block itself.
correlator = digital.correlate_and_sync_cc(preamble, pulse_shape, sps, n_filters)
# Connect it all up and go.
snk = blocks.vector_sink_c()
null = blocks.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(src, shape, correlator, snk)
tb.connect((correlator, 1), null)
tb.run()
# Look at the tags. Retrieve the timing offset.
data = snk.data()
offset = None
timing_error = None
for tag in snk.tags():
key = pmt.symbol_to_string(tag.key)
if key == "time_est":
offset = tag.offset
timing_error = pmt.to_double(tag.value)
if offset is None:
raise ValueError("No tags found.")
# Detect where the middle of the preamble is.
# Assume we have only one peak and that it is symmetric.
sum_id = 0
sum_d = 0
for i, d in enumerate(data):
sum_id += i*abs(d)
sum_d += abs(d)
data_i = sum_id/sum_d
if offset is not None:
diff = data_i-offset
remainder = -(diff%sps)
if remainder < -sps/2.0:
remainder += sps
tol = 0.2
difference = timing_error - remainder
difference = difference % sps
if abs(difference) >= tol:
print("Tag gives timing estimate of {0}. QA calculates it as {1}. Tolerance is {2}".format(timing_error, remainder, tol))
self.assertTrue(abs(difference) < tol)
if __name__ == '__main__':
gr_unittest.run(test_correlate_and_sync, "test_correlate_and_sync.xml")
| gpl-3.0 |
robinro/ansible | lib/ansible/modules/cloud/amazon/rds_subnet_group.py | 71 | 5419 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rds_subnet_group
version_added: "1.5"
short_description: manage RDS database subnet groups
description:
- Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
default: null
aliases: []
description:
description:
- Database subnet group description. Only set when a new group is added.
required: false
default: null
aliases: []
subnets:
description:
- List of subnet IDs that make up the database subnet group.
required: false
default: null
aliases: []
author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Add or change a subnet group
- rds_subnet_group:
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- rds_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto.rds
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
try:
conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs)
except boto.exception.BotoServerError as e:
module.fail_json(msg = e.error_message)
try:
changed = False
exists = False
try:
matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError as e:
if e.error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_db_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets)
changed = True
else:
# Sort the subnet groups before we compare them
matching_groups[0].subnet_ids.sort()
group_subnets.sort()
if (matching_groups[0].name != group_name or
matching_groups[0].description != group_description or
matching_groups[0].subnet_ids != group_subnets):
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError as e:
module.fail_json(msg = e.error_message)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
alindt/Cinnamon | files/usr/lib/cinnamon-settings/modules/cs_default.py | 1 | 14784 | #!/usr/bin/env python
from SettingsWidgets import *
PREF_MEDIA_AUTORUN_NEVER = "autorun-never"
PREF_MEDIA_AUTORUN_X_CONTENT_START_APP = "autorun-x-content-start-app"
PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE = "autorun-x-content-ignore"
PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER = "autorun-x-content-open-folder"
CUSTOM_ITEM_ASK = "cc-item-ask"
CUSTOM_ITEM_DO_NOTHING = "cc-item-do-nothing"
CUSTOM_ITEM_OPEN_FOLDER = "cc-item-open-folder"
MEDIA_HANDLING_SCHEMA = "org.cinnamon.desktop.media-handling"
PREF_CONTENT_TYPE = 0
PREF_GEN_CONTENT_TYPE = 1
PREF_LABEL = 2
DEF_CONTENT_TYPE = 0
DEF_LABEL = 1
DEF_HEADING = 2
preferred_app_defs = [
# for web, we need to support text/html,
# application/xhtml+xml and x-scheme-handler/https,
# hence the "*" pattern
( "x-scheme-handler/http", "x-scheme-handler/http", _("_Web") ),
( "x-scheme-handler/mailto", "x-scheme-handler/mailto", _("_Mail") ),
( "text/plain", "text", _("Text") ), #TODO: Add mnemonic once we're out of M16 release to preserve i18n for now
# 1st mimetype is to let us find apps
# 2nd mimetype is to set default handler for (so we handle all of that type, not just a specific format)
( "audio/x-vorbis+ogg", "audio", _("M_usic") ),
( "video/x-ogm+ogg", "video", _("_Video") ),
( "image/jpeg", "image", _("_Photos") )
]
removable_media_defs = [
( "x-content/audio-cdda", _("CD _audio") , _("Select an application for audio CDs")),
( "x-content/video-dvd", _("_DVD video"), _("Select an application for video DVDs") ),
( "x-content/audio-player", _("_Music player"), _("Select an application to run when a music player is connected") ),
( "x-content/image-dcf", _("_Photos"), _("Select an application to run when a camera is connected") ),
( "x-content/unix-software", _("_Software"), _("Select an application for software CDs") )
]
other_defs = [
# translators: these strings are duplicates of shared-mime-info
# strings, just here to fix capitalization of the English originals.
# If the shared-mime-info translation works for your language,
# simply leave these untranslated.
( "x-content/audio-dvd", _("audio DVD") ),
( "x-content/blank-bd", _("blank Blu-ray disc") ),
( "x-content/blank-cd", _("blank CD disc") ),
( "x-content/blank-dvd", _("blank DVD disc") ),
( "x-content/blank-hddvd", _("blank HD DVD disc") ),
( "x-content/video-bluray", _("Blu-ray video disc") ),
( "x-content/ebook-reader", _("e-book reader") ),
( "x-content/video-hddvd", _("HD DVD video disc") ),
( "x-content/image-picturecd", _("Picture CD") ),
( "x-content/video-svcd", _("Super Video CD") ),
( "x-content/video-vcd", _("Video CD") ),
( "x-content/win32-software", _("Windows software") ),
( "x-content/software", _("Software") )
]
class ColumnBox(Gtk.VBox):
def __init__(self, title, content):
super(ColumnBox, self).__init__()
label = Gtk.Label("")
label.set_markup('<b>%s\n</b>' % title)
label.set_alignment(0.5, 0.5)
self.set_homogeneous(False)
self.pack_start(label, False, False, 0)
self.pack_end(content, True, True, 0)
class ButtonTable(Gtk.Table):
def __init__(self, lines):
super(ButtonTable, self).__init__(lines, 2, False)
self.set_row_spacings(8)
self.set_col_spacings(15)
self.attach(Gtk.Label(""), 2, 3, 0, lines, Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0, 0)
self.row = 0
def addRow(self, label, button):
if label:
label = MnemonicLabel(label, button)
self.attach(label, 0, 1, self.row, self.row+1, Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0, 0)
self.attach(button, 1, 2, self.row, self.row+1, Gtk.AttachOptions.FILL, 0, 0, 0)
self.row += 1
def forgetRow(self):
self.row -= 1
class MnemonicLabel(Gtk.Label):
def __init__(self, text, widget):
super(MnemonicLabel, self).__init__("")
self.set_text_with_mnemonic(text)
self.set_alignment(1, 0.5)
self.get_style_context().add_class("dim-label")
self.set_mnemonic_widget(widget)
class DefaultAppChooserButton(Gtk.AppChooserButton):
def __init__(self, content_type, gen_content_type):
super(DefaultAppChooserButton, self).__init__(content_type=content_type)
self.content_type = content_type
self.generic_content_type = gen_content_type
self.set_show_default_item(True)
self.connect("changed", self.onChanged)
def onChanged(self, button):
info = button.get_app_info()
if info:
types = info.get_supported_types()
for t in types:
if self.generic_content_type in t:
if not info.set_as_default_for_type(t):
print "Failed to set '%s' as the default application for '%s'" % (info.get_name(), self.generic_content_type)
if self.content_type == "x-scheme-handler/http":
if info.set_as_default_for_type ("x-scheme-handler/https") == False:
print "Failed to set '%s' as the default application for '%s'" % (info.get_name(), "x-scheme-handler/https")
class CustomAppChooserButton(Gtk.AppChooserButton):
def __init__(self, media_settings, content_type, heading=None):
super(CustomAppChooserButton, self).__init__(content_type=content_type)
self.media_settings = media_settings
content_type = self.get_content_type()
self.set_show_default_item(True)
#fetch preferences for this content type
(pref_start_app, pref_ignore, pref_open_folder) = self.getPreferences()
pref_ask = not pref_start_app and not pref_ignore and not pref_open_folder
info = self.get_app_info()
#append the separator only if we have >= 1 apps in the chooser
if info:
self.append_separator()
icon = Gio.ThemedIcon.new("gtk-dialog-question")
self.append_custom_item(CUSTOM_ITEM_ASK, _("Ask what to do"), icon)
icon = Gio.ThemedIcon.new("gtk-directory")
self.append_custom_item(CUSTOM_ITEM_OPEN_FOLDER, _("Open folder"), icon)
icon = Gio.ThemedIcon.new("gtk-cancel")
self.append_custom_item(CUSTOM_ITEM_DO_NOTHING, _("Do nothing"), icon)
self.set_show_dialog_item(True)
self.set_heading(heading)
if pref_ask:
self.set_active_custom_item(CUSTOM_ITEM_ASK)
elif pref_ignore:
self.set_active_custom_item(CUSTOM_ITEM_DO_NOTHING)
elif pref_open_folder:
self.set_active_custom_item(CUSTOM_ITEM_OPEN_FOLDER)
self.connect("changed", self.onChanged)
self.connect("custom-item-activated", self.onCustomItemActivated)
def onChanged(self, button):
info = self.get_app_info()
if info:
content_type = self.get_content_type()
self.setPreferences(True, False, False)
info.set_as_default_for_type(content_type)
def onCustomItemActivated(self, button, item):
content_type = self.get_content_type()
if item == CUSTOM_ITEM_ASK:
self.setPreferences(False, False, False)
elif item == CUSTOM_ITEM_OPEN_FOLDER:
self.setPreferences(False, False, True)
elif item == CUSTOM_ITEM_DO_NOTHING:
self.setPreferences(False, True, False)
def getPreference(self, settings_key):
strv = self.media_settings.get_strv(settings_key)
return strv != None and self.get_content_type() in strv
def getPreferences(self):
pref_start_app = self.getPreference( PREF_MEDIA_AUTORUN_X_CONTENT_START_APP)
pref_ignore = self.getPreference(PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE)
pref_open_folder = self.getPreference(PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER)
return (pref_start_app, pref_ignore, pref_open_folder)
def setPreference(self, pref_value, settings_key):
array = self.media_settings.get_strv(settings_key)
content_type = self.get_content_type()
array = [ v for v in array if v != content_type ]
if pref_value:
array.append(content_type)
self.media_settings.set_strv(settings_key, array)
def setPreferences(self, pref_start_app, pref_ignore, pref_open_folder):
self.setPreference(pref_start_app, PREF_MEDIA_AUTORUN_X_CONTENT_START_APP)
self.setPreference(pref_ignore, PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE)
self.setPreference(pref_open_folder, PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER)
class OtherTypeDialog(Gtk.Dialog):
def __init__(self, media_settings):
super(OtherTypeDialog, self).__init__(_("Other Media"),
None,
0,
(_("Close"), Gtk.ResponseType.OK))
self.set_default_size(350, 100)
self.media_settings = media_settings
list_store = Gtk.ListStore(str, str)
list_store.set_sort_column_id (1, Gtk.SortType.ASCENDING)
self.type_combo = Gtk.ComboBox.new_with_model(list_store)
self.application_combo = None
content_types = Gio.content_types_get_registered()
for content_type in content_types:
if self.acceptContentType(content_type):
list_store.append([self.getDescription(content_type), content_type])
renderer = Gtk.CellRendererText()
self.type_combo.pack_start(renderer, True)
self.type_combo.add_attribute (renderer,"text", 0)
self.type_combo.set_active(False)
table = ButtonTable(2)
table.addRow(_("_Type:"), self.type_combo)
self.table = table
self.vbox.pack_start(ColumnBox(_("Select how other media should be handled"), table), True, True, 2)
self.vbox.show()
self.type_combo.connect("changed", self.onTypeComboChanged)
def acceptContentType(self, content_type):
if not content_type.startswith("x-content/"):
return False
for d in removable_media_defs:
if Gio.content_type_is_a(content_type, d[DEF_CONTENT_TYPE]):
return False
return True
def getDescription(self, content_type):
for d in other_defs:
if content_type == d[DEF_CONTENT_TYPE]:
s = d[DEF_LABEL]
if s == _(s):
description = Gio.content_type_get_description(content_type)
else:
description = s
break
if description == None:
print "Content type '%s' is missing from the info panel" % content_type
return Gio.content_type_get_description(content_type)
return description
def doShow(self, topLevel):
self.set_transient_for(topLevel)
self.set_modal(True)
self.connect("response", self.onResponse)
self.connect("delete-event", self.onDelete)
self.onTypeComboChanged(self.type_combo)
self.present()
self.show_all()
def onDelete(self, *args):
return self.hide_on_delete()
def doHide(self):
self.hide()
if self.application_combo != None:
self.application_combo.destroy()
self.application_combo = None
self.table.forgetRow()
def onResponse(self, dialog, response):
self.doHide()
def onTypeComboChanged(self, type_combo):
iter = type_combo.get_active_iter()
if not iter:
return
model = type_combo.get_model()
if not model:
return
x_content_type = model.get_value(iter, 1)
heading = model.get_value(iter, 0)
action_container = Gtk.HBox()
if self.application_combo != None:
self.application_combo.destroy()
self.table.forgetRow()
self.application_combo = CustomAppChooserButton(self.media_settings, x_content_type, heading)
self.application_combo.show()
self.table.addRow(_("_Action:"), self.application_combo)
class Module:
def __init__(self, content_box):
keywords = _("media, defaults, applications, programs, removable, browser, email, calendar, music, videos, photos, images, cd, autostart")
advanced = False
sidePage = SidePage(_("Applications & Removable Media"), "default-applications.svg", keywords, advanced, content_box)
self.sidePage = sidePage
self.name = "default"
self.category = "prefs"
hbox = Gtk.HBox()
hbox.set_homogeneous(True)
sidePage.add_widget(hbox, False)
hbox.pack_start(self.setupDefaultApps(), False, False, 0)
hbox.pack_start(self.setupMedia(), False, False, 0)
def setupDefaultApps(self):
table = ButtonTable(len(preferred_app_defs))
for d in preferred_app_defs:
table.addRow(d[PREF_LABEL], DefaultAppChooserButton(d[PREF_CONTENT_TYPE], d[PREF_GEN_CONTENT_TYPE]))
return ColumnBox(_("Default Applications"), table)
def onMoreClicked(self, button):
self.other_type_dialog.doShow(button.get_toplevel())
def setupMedia(self):
self.media_settings = Gio.Settings.new(MEDIA_HANDLING_SCHEMA)
self.other_type_dialog = OtherTypeDialog(self.media_settings)
hbox = Gtk.VBox()
hboxToggle = Gtk.VBox()
hbox.add(hboxToggle)
table = ButtonTable(len(removable_media_defs)+1)
hboxToggle.add(table)
for d in removable_media_defs:
table.addRow(d[DEF_LABEL], CustomAppChooserButton(self.media_settings, d[DEF_CONTENT_TYPE], d[DEF_HEADING]))
more = Gtk.Button.new_with_mnemonic(_("_Other Media..."))
more.connect("clicked", self.onMoreClicked)
table.addRow(None, more)
never = Gtk.CheckButton.new_with_mnemonic(_("_Never prompt or start programs on media insertion"))
hbox.add(never)
self.media_settings.bind(PREF_MEDIA_AUTORUN_NEVER, never, "active", Gio.SettingsBindFlags.DEFAULT)
self.media_settings.bind(PREF_MEDIA_AUTORUN_NEVER, hboxToggle, "sensitive", Gio.SettingsBindFlags.INVERT_BOOLEAN)
return ColumnBox(_("Select how media should be handled"), hbox)
| gpl-2.0 |
andrewtholt/My-amforth-6.1 | avr8/devices/at90usb647/device.py | 5 | 13009 | # Partname: AT90USB647
# generated automatically, do not edit
MCUREGS = {
'WDTCSR': '&96',
'WDTCSR_WDIF': '$80',
'WDTCSR_WDIE': '$40',
'WDTCSR_WDP': '$27',
'WDTCSR_WDCE': '$10',
'WDTCSR_WDE': '$08',
'PORTA': '&34',
'DDRA': '&33',
'PINA': '&32',
'PORTB': '&37',
'DDRB': '&36',
'PINB': '&35',
'PORTC': '&40',
'DDRC': '&39',
'PINC': '&38',
'PORTD': '&43',
'DDRD': '&42',
'PIND': '&41',
'PORTE': '&46',
'DDRE': '&45',
'PINE': '&44',
'PORTF': '&49',
'DDRF': '&48',
'PINF': '&47',
'SREG': '&95',
'SREG_I': '$80',
'SREG_T': '$40',
'SREG_H': '$20',
'SREG_S': '$10',
'SREG_V': '$08',
'SREG_N': '$04',
'SREG_Z': '$02',
'SREG_C': '$01',
'SP': '&93',
'MCUCR': '&85',
'MCUCR_JTD': '$80',
'MCUCR_PUD': '$10',
'MCUCR_IVSEL': '$02',
'MCUCR_IVCE': '$01',
'MCUSR': '&84',
'MCUSR_JTRF': '$10',
'MCUSR_WDRF': '$08',
'MCUSR_BORF': '$04',
'MCUSR_EXTRF': '$02',
'MCUSR_PORF': '$01',
'XMCRA': '&116',
'XMCRA_SRE': '$80',
'XMCRA_SRL': '$70',
'XMCRA_SRW1': '$0C',
'XMCRA_SRW0': '$03',
'XMCRB': '&117',
'XMCRB_XMBK': '$80',
'XMCRB_XMM': '$07',
'OSCCAL': '&102',
'CLKPR': '&97',
'CLKPR_CLKPCE': '$80',
'CLKPR_CLKPS': '$0F',
'SMCR': '&83',
'SMCR_SM': '$0E',
'SMCR_SE': '$01',
'EIND': '&92',
'RAMPZ': '&91',
'GPIOR2': '&75',
'GPIOR2_GPIOR': '$FF',
'GPIOR1': '&74',
'GPIOR1_GPIOR': '$FF',
'GPIOR0': '&62',
'GPIOR0_GPIOR07': '$80',
'GPIOR0_GPIOR06': '$40',
'GPIOR0_GPIOR05': '$20',
'GPIOR0_GPIOR04': '$10',
'GPIOR0_GPIOR03': '$08',
'GPIOR0_GPIOR02': '$04',
'GPIOR0_GPIOR01': '$02',
'GPIOR0_GPIOR00': '$01',
'PRR1': '&101',
'PRR1_PRUSB': '$80',
'PRR1_PRTIM3': '$08',
'PRR1_PRUSART1': '$01',
'PRR0': '&100',
'PRR0_PRTWI': '$80',
'PRR0_PRTIM2': '$40',
'PRR0_PRTIM0': '$20',
'PRR0_PRTIM1': '$08',
'PRR0_PRSPI': '$04',
'PRR0_PRADC': '$01',
'TWAMR': '&189',
'TWAMR_TWAM': '$FE',
'TWBR': '&184',
'TWCR': '&188',
'TWCR_TWINT': '$80',
'TWCR_TWEA': '$40',
'TWCR_TWSTA': '$20',
'TWCR_TWSTO': '$10',
'TWCR_TWWC': '$08',
'TWCR_TWEN': '$04',
'TWCR_TWIE': '$01',
'TWSR': '&185',
'TWSR_TWS': '$F8',
'TWSR_TWPS': '$03',
'TWDR': '&187',
'TWAR': '&186',
'TWAR_TWA': '$FE',
'TWAR_TWGCE': '$01',
'SPCR': '&76',
'SPCR_SPIE': '$80',
'SPCR_SPE': '$40',
'SPCR_DORD': '$20',
'SPCR_MSTR': '$10',
'SPCR_CPOL': '$08',
'SPCR_CPHA': '$04',
'SPCR_SPR': '$03',
'SPSR': '&77',
'SPSR_SPIF': '$80',
'SPSR_WCOL': '$40',
'SPSR_SPI2X': '$01',
'SPDR': '&78',
'UDR1': '&206',
'UCSR1A': '&200',
'UCSR1A_RXC1': '$80',
'UCSR1A_TXC1': '$40',
'UCSR1A_UDRE1': '$20',
'UCSR1A_FE1': '$10',
'UCSR1A_DOR1': '$08',
'UCSR1A_UPE1': '$04',
'UCSR1A_U2X1': '$02',
'UCSR1A_MPCM1': '$01',
'UCSR1B': '&201',
'UCSR1B_RXCIE1': '$80',
'UCSR1B_TXCIE1': '$40',
'UCSR1B_UDRIE1': '$20',
'UCSR1B_RXEN1': '$10',
'UCSR1B_TXEN1': '$08',
'UCSR1B_UCSZ12': '$04',
'UCSR1B_RXB81': '$02',
'UCSR1B_TXB81': '$01',
'UCSR1C': '&202',
'UCSR1C_UMSEL1': '$C0',
'UCSR1C_UPM1': '$30',
'UCSR1C_USBS1': '$08',
'UCSR1C_UCSZ1': '$06',
'UCSR1C_UCPOL1': '$01',
'UBRR1': '&204',
'UEINT': '&244',
'UEBCHX': '&243',
'UEBCLX': '&242',
'UEDATX': '&241',
'UEIENX': '&240',
'UEIENX_FLERRE': '$80',
'UEIENX_NAKINE': '$40',
'UEIENX_NAKOUTE': '$10',
'UEIENX_RXSTPE': '$08',
'UEIENX_RXOUTE': '$04',
'UEIENX_STALLEDE': '$02',
'UEIENX_TXINE': '$01',
'UESTA1X': '&239',
'UESTA1X_CTRLDIR': '$04',
'UESTA1X_CURRBK': '$03',
'UESTA0X': '&238',
'UESTA0X_CFGOK': '$80',
'UESTA0X_OVERFI': '$40',
'UESTA0X_UNDERFI': '$20',
'UESTA0X_DTSEQ': '$0C',
'UESTA0X_NBUSYBK': '$03',
'UECFG1X': '&237',
'UECFG1X_EPSIZE': '$70',
'UECFG1X_EPBK': '$0C',
'UECFG1X_ALLOC': '$02',
'UECFG0X': '&236',
'UECFG0X_EPTYPE': '$C0',
'UECFG0X_EPDIR': '$01',
'UECONX': '&235',
'UECONX_STALLRQ': '$20',
'UECONX_STALLRQC': '$10',
'UECONX_RSTDT': '$08',
'UECONX_EPEN': '$01',
'UERST': '&234',
'UERST_EPRST': '$7F',
'UENUM': '&233',
'UEINTX': '&232',
'UEINTX_FIFOCON': '$80',
'UEINTX_NAKINI': '$40',
'UEINTX_RWAL': '$20',
'UEINTX_NAKOUTI': '$10',
'UEINTX_RXSTPI': '$08',
'UEINTX_RXOUTI': '$04',
'UEINTX_STALLEDI': '$02',
'UEINTX_TXINI': '$01',
'UDMFN': '&230',
'UDMFN_FNCERR': '$10',
'UDFNUM': '&228',
'UDADDR': '&227',
'UDADDR_ADDEN': '$80',
'UDADDR_UADD': '$7F',
'UDIEN': '&226',
'UDIEN_UPRSME': '$40',
'UDIEN_EORSME': '$20',
'UDIEN_WAKEUPE': '$10',
'UDIEN_EORSTE': '$08',
'UDIEN_SOFE': '$04',
'UDIEN_SUSPE': '$01',
'UDINT': '&225',
'UDINT_UPRSMI': '$40',
'UDINT_EORSMI': '$20',
'UDINT_WAKEUPI': '$10',
'UDINT_EORSTI': '$08',
'UDINT_SOFI': '$04',
'UDINT_SUSPI': '$01',
'UDCON': '&224',
'UDCON_LSM': '$04',
'UDCON_RMWKUP': '$02',
'UDCON_DETACH': '$01',
'OTGINT': '&223',
'OTGINT_STOI': '$20',
'OTGINT_HNPERRI': '$10',
'OTGINT_ROLEEXI': '$08',
'OTGINT_BCERRI': '$04',
'OTGINT_VBERRI': '$02',
'OTGINT_SRPI': '$01',
'OTGIEN': '&222',
'OTGIEN_STOE': '$20',
'OTGIEN_HNPERRE': '$10',
'OTGIEN_ROLEEXE': '$08',
'OTGIEN_BCERRE': '$04',
'OTGIEN_VBERRE': '$02',
'OTGIEN_SRPE': '$01',
'OTGCON': '&221',
'OTGCON_HNPREQ': '$20',
'OTGCON_SRPREQ': '$10',
'OTGCON_SRPSEL': '$08',
'OTGCON_VBUSHWC': '$04',
'OTGCON_VBUSREQ': '$02',
'OTGCON_VBUSRQC': '$01',
'OTGTCON': '&249',
'OTGTCON_OTGTCON_7': '$80',
'OTGTCON_PAGE': '$60',
'OTGTCON_VALUE_2': '$07',
'USBINT': '&218',
'USBINT_IDTI': '$02',
'USBINT_VBUSTI': '$01',
'USBSTA': '&217',
'USBSTA_SPEED': '$08',
'USBSTA_ID': '$02',
'USBSTA_VBUS': '$01',
'USBCON': '&216',
'USBCON_USBE': '$80',
'USBCON_HOST': '$40',
'USBCON_FRZCLK': '$20',
'USBCON_OTGPADE': '$10',
'USBCON_IDTE': '$02',
'USBCON_VBUSTE': '$01',
'UHWCON': '&215',
'UHWCON_UIMOD': '$80',
'UHWCON_UIDE': '$40',
'UHWCON_UVCONE': '$10',
'UHWCON_UVREGE': '$01',
'UPERRX': '&245',
'UPERRX_COUNTER': '$60',
'UPERRX_CRC16': '$10',
'UPERRX_TIMEOUT': '$08',
'UPERRX_PID': '$04',
'UPERRX_DATAPID': '$02',
'UPERRX_DATATGL': '$01',
'UPINT': '&248',
'UPBCHX': '&247',
'UPBCLX': '&246',
'UPDATX': '&175',
'UPIENX': '&174',
'UPIENX_FLERRE': '$80',
'UPIENX_NAKEDE': '$40',
'UPIENX_PERRE': '$10',
'UPIENX_TXSTPE': '$08',
'UPIENX_TXOUTE': '$04',
'UPIENX_RXSTALLE': '$02',
'UPIENX_RXINE': '$01',
'UPCFG2X': '&173',
'UPSTAX': '&172',
'UPSTAX_CFGOK': '$80',
'UPSTAX_OVERFI': '$40',
'UPSTAX_UNDERFI': '$20',
'UPSTAX_DTSEQ': '$0C',
'UPSTAX_NBUSYK': '$03',
'UPCFG1X': '&171',
'UPCFG1X_PSIZE': '$70',
'UPCFG1X_PBK': '$0C',
'UPCFG1X_ALLOC': '$02',
'UPCFG0X': '&170',
'UPCFG0X_PTYPE': '$C0',
'UPCFG0X_PTOKEN': '$30',
'UPCFG0X_PEPNUM': '$0F',
'UPCONX': '&169',
'UPCONX_PFREEZE': '$40',
'UPCONX_INMODE': '$20',
'UPCONX_RSTDT': '$08',
'UPCONX_PEN': '$01',
'UPRST': '&168',
'UPRST_PRST': '$7F',
'UPNUM': '&167',
'UPINTX': '&166',
'UPINTX_FIFOCON': '$80',
'UPINTX_NAKEDI': '$40',
'UPINTX_RWAL': '$20',
'UPINTX_PERRI': '$10',
'UPINTX_TXSTPI': '$08',
'UPINTX_TXOUTI': '$04',
'UPINTX_RXSTALLI': '$02',
'UPINTX_RXINI': '$01',
'UPINRQX': '&165',
'UHFLEN': '&164',
'UHFNUM': '&162',
'UHADDR': '&161',
'UHIEN': '&160',
'UHIEN_HWUPE': '$40',
'UHIEN_HSOFE': '$20',
'UHIEN_RXRSME': '$10',
'UHIEN_RSMEDE': '$08',
'UHIEN_RSTE': '$04',
'UHIEN_DDISCE': '$02',
'UHIEN_DCONNE': '$01',
'UHINT': '&159',
'UHINT_UHUPI': '$40',
'UHINT_HSOFI': '$20',
'UHINT_RXRSMI': '$10',
'UHINT_RSMEDI': '$08',
'UHINT_RSTI': '$04',
'UHINT_DDISCI': '$02',
'UHINT_DCONNI': '$01',
'UHCON': '&158',
'UHCON_RESUME': '$04',
'UHCON_RESET': '$02',
'UHCON_SOFEN': '$01',
'SPMCSR': '&87',
'SPMCSR_SPMIE': '$80',
'SPMCSR_RWWSB': '$40',
'SPMCSR_SIGRD': '$20',
'SPMCSR_RWWSRE': '$10',
'SPMCSR_BLBSET': '$08',
'SPMCSR_PGWRT': '$04',
'SPMCSR_PGERS': '$02',
'SPMCSR_SPMEN': '$01',
'EEAR': '&65',
'EEDR': '&64',
'EECR': '&63',
'EECR_EEPM': '$30',
'EECR_EERIE': '$08',
'EECR_EEMPE': '$04',
'EECR_EEPE': '$02',
'EECR_EERE': '$01',
'OCR0B': '&72',
'OCR0A': '&71',
'TCNT0': '&70',
'TCCR0B': '&69',
'TCCR0B_FOC0A': '$80',
'TCCR0B_FOC0B': '$40',
'TCCR0B_WGM02': '$08',
'TCCR0B_CS0': '$07',
'TCCR0A': '&68',
'TCCR0A_COM0A': '$C0',
'TCCR0A_COM0B': '$30',
'TCCR0A_WGM0': '$03',
'TIMSK0': '&110',
'TIMSK0_OCIE0B': '$04',
'TIMSK0_OCIE0A': '$02',
'TIMSK0_TOIE0': '$01',
'TIFR0': '&53',
'TIFR0_OCF0B': '$04',
'TIFR0_OCF0A': '$02',
'TIFR0_TOV0': '$01',
'GTCCR': '&67',
'GTCCR_TSM': '$80',
'GTCCR_PSRSYNC': '$01',
'TIMSK2': '&112',
'TIMSK2_OCIE2B': '$04',
'TIMSK2_OCIE2A': '$02',
'TIMSK2_TOIE2': '$01',
'TIFR2': '&55',
'TIFR2_OCF2B': '$04',
'TIFR2_OCF2A': '$02',
'TIFR2_TOV2': '$01',
'TCCR2A': '&176',
'TCCR2A_COM2A': '$C0',
'TCCR2A_COM2B': '$30',
'TCCR2A_WGM2': '$03',
'TCCR2B': '&177',
'TCCR2B_FOC2A': '$80',
'TCCR2B_FOC2B': '$40',
'TCCR2B_WGM22': '$08',
'TCCR2B_CS2': '$07',
'TCNT2': '&178',
'OCR2B': '&180',
'OCR2A': '&179',
'ASSR': '&182',
'ASSR_EXCLK': '$40',
'ASSR_AS2': '$20',
'ASSR_TCN2UB': '$10',
'ASSR_OCR2AUB': '$08',
'ASSR_OCR2BUB': '$04',
'ASSR_TCR2AUB': '$02',
'ASSR_TCR2BUB': '$01',
'TCCR3A': '&144',
'TCCR3A_COM3A': '$C0',
'TCCR3A_COM3B': '$30',
'TCCR3A_COM3C': '$0C',
'TCCR3A_WGM3': '$03',
'TCCR3B': '&145',
'TCCR3B_ICNC3': '$80',
'TCCR3B_ICES3': '$40',
'TCCR3B_WGM3': '$18',
'TCCR3B_CS3': '$07',
'TCCR3C': '&146',
'TCCR3C_FOC3A': '$80',
'TCCR3C_FOC3B': '$40',
'TCCR3C_FOC3C': '$20',
'TCNT3': '&148',
'OCR3A': '&152',
'OCR3B': '&154',
'OCR3C': '&156',
'ICR3': '&150',
'TIMSK3': '&113',
'TIMSK3_ICIE3': '$20',
'TIMSK3_OCIE3C': '$08',
'TIMSK3_OCIE3B': '$04',
'TIMSK3_OCIE3A': '$02',
'TIMSK3_TOIE3': '$01',
'TIFR3': '&56',
'TIFR3_ICF3': '$20',
'TIFR3_OCF3C': '$08',
'TIFR3_OCF3B': '$04',
'TIFR3_OCF3A': '$02',
'TIFR3_TOV3': '$01',
'TCCR1A': '&128',
'TCCR1A_COM1A': '$C0',
'TCCR1A_COM1B': '$30',
'TCCR1A_COM1C': '$0C',
'TCCR1A_WGM1': '$03',
'TCCR1B': '&129',
'TCCR1B_ICNC1': '$80',
'TCCR1B_ICES1': '$40',
'TCCR1B_WGM1': '$18',
'TCCR1B_CS1': '$07',
'TCCR1C': '&130',
'TCCR1C_FOC1A': '$80',
'TCCR1C_FOC1B': '$40',
'TCCR1C_FOC1C': '$20',
'TCNT1': '&132',
'OCR1A': '&136',
'OCR1B': '&138',
'OCR1C': '&140',
'ICR1': '&134',
'TIMSK1': '&111',
'TIMSK1_ICIE1': '$20',
'TIMSK1_OCIE1C': '$08',
'TIMSK1_OCIE1B': '$04',
'TIMSK1_OCIE1A': '$02',
'TIMSK1_TOIE1': '$01',
'TIFR1': '&54',
'TIFR1_ICF1': '$20',
'TIFR1_OCF1C': '$08',
'TIFR1_OCF1B': '$04',
'TIFR1_OCF1A': '$02',
'TIFR1_TOV1': '$01',
'OCDR': '&81',
'EICRA': '&105',
'EICRA_ISC3': '$C0',
'EICRA_ISC2': '$30',
'EICRA_ISC1': '$0C',
'EICRA_ISC0': '$03',
'EICRB': '&106',
'EICRB_ISC7': '$C0',
'EICRB_ISC6': '$30',
'EICRB_ISC5': '$0C',
'EICRB_ISC4': '$03',
'EIMSK': '&61',
'EIMSK_INT': '$FF',
'EIFR': '&60',
'EIFR_INTF': '$FF',
'PCMSK0': '&107',
'PCIFR': '&59',
'PCIFR_PCIF0': '$01',
'PCICR': '&104',
'PCICR_PCIE0': '$01',
'ADMUX': '&124',
'ADMUX_REFS': '$C0',
'ADMUX_ADLAR': '$20',
'ADMUX_MUX': '$1F',
'ADCSRA': '&122',
'ADCSRA_ADEN': '$80',
'ADCSRA_ADSC': '$40',
'ADCSRA_ADATE': '$20',
'ADCSRA_ADIF': '$10',
'ADCSRA_ADIE': '$08',
'ADCSRA_ADPS': '$07',
'ADC': '&120',
'ADCSRB': '&123',
'ADCSRB_ADHSM': '$80',
'ADCSRB_ADTS': '$07',
'DIDR0': '&126',
'DIDR0_ADC7D': '$80',
'DIDR0_ADC6D': '$40',
'DIDR0_ADC5D': '$20',
'DIDR0_ADC4D': '$10',
'DIDR0_ADC3D': '$08',
'DIDR0_ADC2D': '$04',
'DIDR0_ADC1D': '$02',
'DIDR0_ADC0D': '$01',
'ACSR': '&80',
'ACSR_ACD': '$80',
'ACSR_ACBG': '$40',
'ACSR_ACO': '$20',
'ACSR_ACI': '$10',
'ACSR_ACIE': '$08',
'ACSR_ACIC': '$04',
'ACSR_ACIS': '$03',
'DIDR1': '&127',
'DIDR1_AIN1D': '$02',
'DIDR1_AIN0D': '$01',
'PLLCSR': '&73',
'PLLCSR_PLLP': '$1C',
'PLLCSR_PLLE': '$02',
'PLLCSR_PLOCK': '$01',
'INT0Addr': '2',
'INT1Addr': '4',
'INT2Addr': '6',
'INT3Addr': '8',
'INT4Addr': '10',
'INT5Addr': '12',
'INT6Addr': '14',
'INT7Addr': '16',
'PCINT0Addr': '18',
'USB_GENAddr': '20',
'USB_COMAddr': '22',
'WDTAddr': '24',
'TIMER2_COMPAAddr': '26',
'TIMER2_COMPBAddr': '28',
'TIMER2_OVFAddr': '30',
'TIMER1_CAPTAddr': '32',
'TIMER1_COMPAAddr': '34',
'TIMER1_COMPBAddr': '36',
'TIMER1_COMPCAddr': '38',
'TIMER1_OVFAddr': '40',
'TIMER0_COMPAAddr': '42',
'TIMER0_COMPBAddr': '44',
'TIMER0_OVFAddr': '46',
'SPI__STCAddr': '48',
'USART1__RXAddr': '50',
'USART1__UDREAddr': '52',
'USART1__TXAddr': '54',
'ANALOG_COMPAddr': '56',
'ADCAddr': '58',
'EE_READYAddr': '60',
'TIMER3_CAPTAddr': '62',
'TIMER3_COMPAAddr': '64',
'TIMER3_COMPBAddr': '66',
'TIMER3_COMPCAddr': '68',
'TIMER3_OVFAddr': '70',
'TWIAddr': '72',
'SPM_READYAddr': '74'
} | gpl-3.0 |
bugzPDX/airmozilla | airmozilla/manage/views/dashboard.py | 1 | 5401 | import datetime
from django.contrib.auth.models import User
from django.shortcuts import render
from django.utils import timezone
from django.db.models import Sum
from jsonview.decorators import json_view
from airmozilla.main.models import (
Event,
SuggestedEvent,
Picture,
EventRevision,
)
from airmozilla.comments.models import Comment
from .decorators import staff_required
@staff_required
def dashboard(request):
"""Management home / explanation page."""
return render(request, 'manage/dashboard.html')
@staff_required
@json_view
def dashboard_data(request):
context = {}
now = timezone.now()
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
tomorrow = today + datetime.timedelta(days=1)
yesterday = today - datetime.timedelta(days=1)
this_week = today - datetime.timedelta(days=today.weekday())
next_week = this_week + datetime.timedelta(days=7)
last_week = this_week - datetime.timedelta(days=7)
this_month = today.replace(day=1)
next_month = this_month
while next_month.month == this_month.month:
next_month += datetime.timedelta(days=1)
last_month = (this_month - datetime.timedelta(days=1)).replace(day=1)
this_year = this_month.replace(month=1)
next_year = this_year.replace(year=this_year.year + 1)
last_year = this_year.replace(year=this_year.year - 1)
context['groups'] = []
def get_counts(qs, key):
counts = {}
def make_filter(gte=None, lt=None):
filter = {}
if gte is not None:
filter['%s__gte' % key] = gte
if lt is not None:
filter['%s__lt' % key] = lt
return filter
counts['today'] = qs.filter(
**make_filter(gte=today, lt=tomorrow)
).count()
counts['yesterday'] = qs.filter(
**make_filter(gte=yesterday, lt=today)).count()
counts['this_week'] = qs.filter(
**make_filter(gte=this_week, lt=next_week)).count()
counts['last_week'] = qs.filter(
**make_filter(gte=last_week, lt=this_week)).count()
counts['this_month'] = qs.filter(
**make_filter(gte=this_month, lt=next_month)).count()
counts['last_month'] = qs.filter(
**make_filter(gte=last_month, lt=this_month)).count()
counts['this_year'] = qs.filter(
**make_filter(gte=this_year, lt=next_year)).count()
counts['last_year'] = qs.filter(
**make_filter(gte=last_year, lt=this_year)).count()
counts['ever'] = qs.count()
return counts
# Events
events = Event.objects.exclude(status=Event.STATUS_REMOVED)
counts = get_counts(events, 'start_time')
context['groups'].append({
'name': 'New Events',
'counts': counts
})
# Suggested Events
counts = get_counts(SuggestedEvent.objects.all(), 'created')
context['groups'].append({
'name': 'Requested Events',
'counts': counts
})
# Users
counts = get_counts(User.objects.all(), 'date_joined')
context['groups'].append({
'name': 'New Users',
'counts': counts
})
# Comments
counts = get_counts(Comment.objects.all(), 'created')
context['groups'].append({
'name': 'Comments',
'counts': counts
})
# Event revisions
counts = get_counts(EventRevision.objects.all(), 'created')
context['groups'].append({
'name': 'Event Revisions',
'counts': counts
})
# Pictures
counts = get_counts(Picture.objects.all(), 'created')
context['groups'].append({
'name': 'Pictures',
'counts': counts
})
def get_duration_totals(qs):
key = 'start_time'
def make_filter(gte=None, lt=None):
filter = {}
if gte is not None:
filter['%s__gte' % key] = gte
if lt is not None:
filter['%s__lt' % key] = lt
return filter
counts = {}
def sum(elements):
seconds = elements.aggregate(Sum('duration'))['duration__sum']
seconds = seconds or 0 # in case it's None
minutes = seconds / 60
hours = minutes / 60
if hours > 1:
return "%dh" % hours
elif minutes > 1:
return "%dm" % minutes
return "%ds" % seconds
counts['today'] = sum(qs.filter(**make_filter(gte=today)))
counts['yesterday'] = sum(qs.filter(
**make_filter(gte=yesterday, lt=today)))
counts['this_week'] = sum(qs.filter(**make_filter(gte=this_week)))
counts['last_week'] = sum(qs.filter(
**make_filter(gte=last_week, lt=this_week)))
counts['this_month'] = sum(qs.filter(**make_filter(gte=this_month)))
counts['last_month'] = sum(qs.filter(
**make_filter(gte=last_month, lt=this_month)))
counts['this_year'] = sum(qs.filter(**make_filter(gte=this_year)))
counts['last_year'] = sum(qs.filter(
**make_filter(gte=last_year, lt=this_year)))
counts['ever'] = sum(qs)
return counts
# Exceptional
counts = get_duration_totals(Event.objects.exclude(duration__isnull=True))
context['groups'].append({
'name': 'Total Event Durations',
'counts': counts
})
return context
| bsd-3-clause |
jakevdp/scipy | scipy/signal/tests/test_cont2discrete.py | 38 | 12406 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import TestCase, run_module_suite, \
assert_array_almost_equal, assert_almost_equal, \
assert_allclose
import warnings
from scipy.signal import cont2discrete as c2d
from scipy.signal import dlsim, ss2tf, ss2zpk, lsim2, lti
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# March 29, 2011
class TestC2D(TestCase):
def test_zoh(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
ad_truth = 1.648721270700128 * np.eye(2)
bd_truth = 0.324360635350064 * np.ones((2, 1))
# c and d in discrete should be equal to their continuous counterparts
dt_requested = 0.5
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cc, cd)
assert_array_almost_equal(dc, dd)
assert_almost_equal(dt_requested, dt)
def test_gbt(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
alpha = 1.0 / 3.0
ad_truth = 1.6 * np.eye(2)
bd_truth = 0.3 * np.ones((2, 1))
cd_truth = np.array([[0.9, 1.2],
[1.2, 1.2],
[1.2, 0.3]])
dd_truth = np.array([[0.175],
[0.2],
[-0.205]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='gbt', alpha=alpha)
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
def test_euler(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = 1.5 * np.eye(2)
bd_truth = 0.25 * np.ones((2, 1))
cd_truth = np.array([[0.75, 1.0],
[1.0, 1.0],
[1.0, 0.25]])
dd_truth = dc
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='euler')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
def test_backward_diff(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = 2.0 * np.eye(2)
bd_truth = 0.5 * np.ones((2, 1))
cd_truth = np.array([[1.5, 2.0],
[2.0, 2.0],
[2.0, 0.5]])
dd_truth = np.array([[0.875],
[1.0],
[0.295]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='backward_diff')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
def test_bilinear(self):
ac = np.eye(2)
bc = 0.5 * np.ones((2, 1))
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = (5.0 / 3.0) * np.eye(2)
bd_truth = (1.0 / 3.0) * np.ones((2, 1))
cd_truth = np.array([[1.0, 4.0 / 3.0],
[4.0 / 3.0, 4.0 / 3.0],
[4.0 / 3.0, 1.0 / 3.0]])
dd_truth = np.array([[0.291666666666667],
[1.0 / 3.0],
[-0.121666666666667]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='bilinear')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
# Same continuous system again, but change sampling rate
ad_truth = 1.4 * np.eye(2)
bd_truth = 0.2 * np.ones((2, 1))
cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]])
dd_truth = np.array([[0.175], [0.2], [-0.205]])
dt_requested = 1.0 / 3.0
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='bilinear')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
def test_transferfunction(self):
numc = np.array([0.25, 0.25, 0.5])
denc = np.array([0.75, 0.75, 1.0])
numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]])
dend = np.array([1.0, -1.351394049721225, 0.606530659712634])
dt_requested = 0.5
num, den, dt = c2d((numc, denc), dt_requested, method='zoh')
assert_array_almost_equal(numd, num)
assert_array_almost_equal(dend, den)
assert_almost_equal(dt_requested, dt)
def test_zerospolesgain(self):
zeros_c = np.array([0.5, -0.5])
poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
k_c = 1.0
zeros_d = [1.23371727305860, 0.735356894461267]
polls_d = [0.938148335039729 + 0.346233593780536j,
0.938148335039729 - 0.346233593780536j]
k_d = 1.0
dt_requested = 0.5
zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested,
method='zoh')
assert_array_almost_equal(zeros_d, zeros)
assert_array_almost_equal(polls_d, poles)
assert_almost_equal(k_d, k)
assert_almost_equal(dt_requested, dt)
def test_gbt_with_sio_tf_and_zpk(self):
"""Test method='gbt' with alpha=0.25 for tf and zpk cases."""
# State space coefficients for the continuous SIO system.
A = -1.0
B = 1.0
C = 1.0
D = 0.5
# The continuous transfer function coefficients.
cnum, cden = ss2tf(A, B, C, D)
# Continuous zpk representation
cz, cp, ck = ss2zpk(A, B, C, D)
h = 1.0
alpha = 0.25
# Explicit formulas, in the scalar case.
Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A)
Bd = h * B / (1 - alpha * h * A)
Cd = C / (1 - alpha * h * A)
Dd = D + alpha * C * Bd
# Convert the explicit solution to tf
dnum, dden = ss2tf(Ad, Bd, Cd, Dd)
# Compute the discrete tf using cont2discrete.
c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha)
assert_allclose(dnum, c2dnum)
assert_allclose(dden, c2dden)
# Convert explicit solution to zpk.
dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd)
# Compute the discrete zpk using cont2discrete.
c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha)
assert_allclose(dz, c2dz)
assert_allclose(dp, c2dp)
assert_allclose(dk, c2dk)
def test_discrete_approx(self):
"""
Test that the solution to the discrete approximation of a continuous
system actually approximates the solution to the continuous system.
This is an indirect test of the correctness of the implementation
of cont2discrete.
"""
def u(t):
return np.sin(2.5 * t)
a = np.array([[-0.01]])
b = np.array([[1.0]])
c = np.array([[1.0]])
d = np.array([[0.2]])
x0 = 1.0
t = np.linspace(0, 10.0, 101)
dt = t[1] - t[0]
u1 = u(t)
# Use lsim2 to compute the solution to the continuous system.
t, yout, xout = lsim2((a, b, c, d), T=t, U=u1, X0=x0,
rtol=1e-9, atol=1e-11)
# Convert the continuous system to a discrete approximation.
dsys = c2d((a, b, c, d), dt, method='bilinear')
# Use dlsim with the pairwise averaged input to compute the output
# of the discrete system.
u2 = 0.5 * (u1[:-1] + u1[1:])
t2 = t[:-1]
td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0)
# ymid is the average of consecutive terms of the "exact" output
# computed by lsim2. This is what the discrete approximation
# actually approximates.
ymid = 0.5 * (yout[:-1] + yout[1:])
assert_allclose(yd2.ravel(), ymid, rtol=1e-4)
def test_simo_tf(self):
# See gh-5753
tf = ([[1, 0], [1, 1]], [1, 1])
num, den, dt = c2d(tf, 0.01)
self.assertEqual(dt, 0.01) # sanity check
assert_allclose(den, [1, -0.990404983], rtol=1e-3)
assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3)
def test_multioutput(self):
ts = 0.01 # time step
tf = ([[1, -3], [1, 5]], [1, 1])
num, den, dt = c2d(tf, ts)
tf1 = (tf[0][0], tf[1])
num1, den1, dt1 = c2d(tf1, ts)
tf2 = (tf[0][1], tf[1])
num2, den2, dt2 = c2d(tf2, ts)
# Sanity checks
self.assertEqual(dt, dt1)
self.assertEqual(dt, dt2)
# Check that we get the same results
assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13)
# Single input, so the denominator should
# not be multidimensional like the numerator
assert_allclose(den, den1, rtol=1e-13)
assert_allclose(den, den2, rtol=1e-13)
class TestC2dLti(TestCase):
def test_c2d_ss(self):
# StateSpace
A = np.array([[-0.3, 0.1], [0.2, -0.7]])
B = np.array([[0], [1]])
C = np.array([[1, 0]])
D = 0
A_res = np.array([[0.985136404135682, 0.004876671474795],
[0.009753342949590, 0.965629718236502]])
B_res = np.array([[0.000122937599964], [0.049135527547844]])
sys_ssc = lti(A, B, C, D)
sys_ssd = sys_ssc.to_discrete(0.05)
assert_allclose(sys_ssd.A, A_res)
assert_allclose(sys_ssd.B, B_res)
assert_allclose(sys_ssd.C, C)
assert_allclose(sys_ssd.D, D)
def test_c2d_tf(self):
sys = lti([0.5, 0.3], [1.0, 0.4])
sys = sys.to_discrete(0.005)
# Matlab results
num_res = np.array([0.5, -0.485149004980066])
den_res = np.array([1.0, -0.980198673306755])
# Somehow a lot of numerical errors
assert_allclose(sys.den, den_res, atol=0.02)
assert_allclose(sys.num, num_res, atol=0.02)
class TestC2dLti(TestCase):
def test_c2d_ss(self):
# StateSpace
A = np.array([[-0.3, 0.1], [0.2, -0.7]])
B = np.array([[0], [1]])
C = np.array([[1, 0]])
D = 0
A_res = np.array([[0.985136404135682, 0.004876671474795],
[0.009753342949590, 0.965629718236502]])
B_res = np.array([[0.000122937599964], [0.049135527547844]])
sys_ssc = lti(A, B, C, D)
sys_ssd = sys_ssc.to_discrete(0.05)
assert_allclose(sys_ssd.A, A_res)
assert_allclose(sys_ssd.B, B_res)
assert_allclose(sys_ssd.C, C)
assert_allclose(sys_ssd.D, D)
def test_c2d_tf(self):
sys = lti([0.5, 0.3], [1.0, 0.4])
sys = sys.to_discrete(0.005)
# Matlab results
num_res = np.array([0.5, -0.485149004980066])
den_res = np.array([1.0, -0.980198673306755])
# Somehow a lot of numerical errors
assert_allclose(sys.den, den_res, atol=0.02)
assert_allclose(sys.num, num_res, atol=0.02)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
cesarmarinhorj/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/stylequeuetask.py | 127 | 2897 | # Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
class StyleQueueTaskDelegate(PatchAnalysisTaskDelegate):
def parent_command(self):
return "style-queue"
class StyleQueueTask(PatchAnalysisTask):
def validate(self):
self._patch = self._delegate.refetch_patch(self._patch)
if self._patch.is_obsolete():
return False
if self._patch.bug().is_closed():
return False
if self._patch.review() == "-":
return False
return True
def _check_style(self):
return self._run_command([
"check-style-local",
"--non-interactive",
"--quiet",
],
"Style checked",
"Patch did not pass style check")
def _apply_watch_list(self):
return self._run_command([
"apply-watchlist-local",
self._patch.bug_id(),
],
"Watchlist applied",
"Unabled to apply watchlist")
def run(self):
if not self._clean():
return False
if not self._update():
return False
if not self._apply():
raise UnableToApplyPatch(self._patch)
self._apply_watch_list()
if not self._check_style():
return self.report_failure()
return True
| bsd-3-clause |
holachek/ecosense | app/webassets/filter/handlebars.py | 3 | 2658 | import subprocess
from os import path
from webassets.exceptions import FilterError
from webassets.filter.jst import JSTemplateFilter
__all__ = ('Handlebars',)
class Handlebars(JSTemplateFilter):
"""Compile `Handlebars <http://handlebarsjs.com/>`_ templates.
This filter assumes that the ``handlebars`` executable is in the path.
Otherwise, you may define a ``HANDLEBARS_BIN`` setting.
.. note::
Use this filter if you want to precompile Handlebars templates.
If compiling them in the browser is acceptable, you may use the
JST filter, which needs no external dependency.
.. warning::
Currently, this filter is not compatible with input filters. Any
filters that would run during the input-stage will simply be
ignored. Input filters tend to be other compiler-style filters,
so this is unlikely to be an issue.
"""
# TODO: We should fix the warning above. Either, me make this filter
# support input-processing (we'd have to work with the hunks given to
# us, rather than the original source files), or make webassets raise
# an error if the handlebars filter is combined with an input filter.
# I'm unsure about the best API design. We could support open()
# returning ``True`` to indicate "no input filters allowed" (
# surprisingly hard to implement) Or, use an attribute to declare
# as much.
name = 'handlebars'
options = {
'binary': 'HANDLEBARS_BIN',
'extra_args': 'HANDLEBARS_EXTRA_ARGS',
'root': 'HANDLEBARS_ROOT',
}
max_debug_level = None
def process_templates(self, out, hunks, **kw):
templates = [info['source_path'] for _, info in hunks]
if self.root is True:
root = self.get_config('directory')
elif self.root:
root = path.join(self.get_config('directory'), self.root)
else:
root = self._find_base_path(templates)
args = [self.binary or 'handlebars']
if root:
args.extend(['-r', root])
if self.extra_args:
args.extend(self.extra_args)
args.extend(templates)
proc = subprocess.Popen(
args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise FilterError(('handlebars: subprocess had error: stderr=%s, '+
'stdout=%s, returncode=%s') % (
stderr, stdout, proc.returncode))
out.write(stdout.decode('utf-8').strip() + ';')
| mit |
pmazurek/ansible-modules-extras | cloud/profitbricks/profitbricks_nic.py | 132 | 8769 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: profitbricks_nic
short_description: Create or Remove a NIC.
description:
- This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
datacenter:
description:
- The datacenter in which to operate.
required: true
server:
description:
- The server name or ID.
required: true
name:
description:
- The name or ID of the NIC. This is only required on deletes, but not on create.
required: true
lan:
description:
- The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
required: true
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
required: false
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
required: false
wait:
description:
- wait for the operation to complete before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
state:
description:
- Indicate desired state of the resource
required: false
default: 'present'
choices: ["present", "absent"]
requirements: [ "profitbricks" ]
author: Matt Baldwin (baldwin@stackpointcloud.com)
'''
EXAMPLES = '''
# Create a NIC
- profitbricks_nic:
datacenter: Tardis One
server: node002
lan: 2
wait_timeout: 500
state: present
# Remove a NIC
- profitbricks_nic:
datacenter: Tardis One
server: node002
name: 7341c2454f
wait_timeout: 500
state: absent
'''
import re
import uuid
import time
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService, NIC
except ImportError:
HAS_PB_SDK = False
uuid_match = re.compile(
'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise: return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def create_nic(module, profitbricks):
"""
Creates a NIC.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the nic creates, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
lan = module.params.get('lan')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server = s['id']
break
try:
n = NIC(
name=name,
lan=lan
)
nic_response = profitbricks.create_nic(datacenter, server, n)
if wait:
_wait_for_completion(profitbricks, nic_response,
wait_timeout, "create_nic")
return nic_response
except Exception as e:
module.fail_json(msg="failed to create the NIC: %s" % str(e))
def delete_nic(module, profitbricks):
"""
Removes a NIC
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the NIC was removed, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
name = module.params.get('name')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
server_found = False
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server_found = True
server = s['id']
break
if not server_found:
return False
# Locate UUID for NIC
nic_found = False
if not (uuid_match.match(name)):
nic_list = profitbricks.list_nics(datacenter, server)
for n in nic_list['items']:
if name == n['properties']['name']:
nic_found = True
name = n['id']
break
if not nic_found:
return False
try:
nic_response = profitbricks.delete_nic(datacenter, server, name)
return nic_response
except Exception as e:
module.fail_json(msg="failed to remove the NIC: %s" % str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
server=dict(),
name=dict(default=str(uuid.uuid4()).replace('-','')[:10]),
lan=dict(),
subscription_user=dict(),
subscription_password=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is required')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is required')
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required')
if not module.params.get('server'):
module.fail_json(msg='server parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
if not module.params.get('name'):
module.fail_json(msg='name parameter is required')
try:
(changed) = delete_nic(module, profitbricks)
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg='failed to set nic state: %s' % str(e))
elif state == 'present':
if not module.params.get('lan'):
module.fail_json(msg='lan parameter is required')
try:
(nic_dict) = create_nic(module, profitbricks)
module.exit_json(nics=nic_dict)
except Exception as e:
module.fail_json(msg='failed to set nic state: %s' % str(e))
from ansible.module_utils.basic import *
main() | gpl-3.0 |
mathemage/h2o-3 | h2o-docs/src/product/conf.py | 2 | 10519 | # -*- coding: utf-8 -*-
#
# documentation documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 26 14:35:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import recommonmark
from recommonmark.transform import AutoStructify
from recommonmark.parser import CommonMarkParser
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.osexample',
'sphinx.ext.mathjax',
'sphinx.ext.todo'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.md', '.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'H2O'
copyright = u'2016-2017 H2O.ai'
author = u'h2o'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
if os.path.exists("project_version"):
f = open("project_version", "r")
version = f.readline().strip()
else:
version = "AnonDeveloperBuild"
print "version is " + str(version)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = [
# Exclude rst files that aren't ready to be included yet.
"Advanced-Topics.rst",
"Computing-Frame-Works.rst",
"Configure-Environment.rst",
"Data-Science-Pipeline.rst",
"Getting-Started.rst",
"Trouble-Shooting.rst",
"ensembles.rst",
"pojo-quick-start.rst",
"hadoop.rst",
"ibm-dsx.rst",
"azure.rst",
"docker.rst",
"aws.rst",
"data-sources.rst",
# Exclude directories that were part of the old docs that might still need to be purged.
"flow",
"upgrade",
"howto",
"tutorials"
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Use a built-in theme with RTD.
# html_style = 'spinx_rtd_theme/static/css/theme.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"logo_only":"true"}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["sphinx_rtd_theme/source/css"]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = u'version no'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/logo.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx_rtd_theme/static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'documentationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'documentation.tex', u'documentation Documentation',
u'h2o', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'documentation', u'documentation Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'documentation', u'documentation Documentation',
author, 'documentation', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
sebastianwelsh/artifacts | tests/reader_test.py | 1 | 8292 | # -*- coding: utf-8 -*-
"""Tests for the artifact definitions readers."""
import io
import os
import unittest
from artifacts import definitions
from artifacts import errors
from artifacts import reader
class YamlArtifactsReaderTest(unittest.TestCase):
"""Class to test the YAML artifacts reader."""
def testReadFileObject(self):
"""Tests the ReadFileObject function."""
artifact_reader = reader.YamlArtifactsReader()
test_file = os.path.join('test_data', 'definitions.yaml')
with open(test_file, 'rb') as file_object:
artifact_definitions = list(artifact_reader.ReadFileObject(file_object))
self.assertEqual(len(artifact_definitions), 7)
# Artifact with file source type.
artifact_definition = artifact_definitions[0]
self.assertEqual(artifact_definition.name, 'SecurityEventLogEvtx')
expected_description = (
'Windows Security Event log for Vista or later systems.')
self.assertEqual(artifact_definition.description, expected_description)
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_FILE)
expected_paths = sorted([
'%%environ_systemroot%%\\System32\\winevt\\Logs\\Security.evtx'])
self.assertEqual(sorted(source_type.paths), expected_paths)
self.assertEqual(len(artifact_definition.conditions), 1)
expected_condition = 'os_major_version >= 6'
self.assertEqual(artifact_definition.conditions[0], expected_condition)
self.assertEqual(len(artifact_definition.labels), 1)
self.assertEqual(artifact_definition.labels[0], 'Logs')
self.assertEqual(len(artifact_definition.supported_os), 1)
self.assertEqual(artifact_definition.supported_os[0], 'Windows')
self.assertEqual(len(artifact_definition.urls), 1)
expected_url = (
'http://www.forensicswiki.org/wiki/Windows_XML_Event_Log_(EVTX)')
self.assertEqual(artifact_definition.urls[0], expected_url)
# Artifact with Windows Registry key source type.
artifact_definition = artifact_definitions[1]
self.assertEqual(
artifact_definition.name, 'AllUsersProfileEnvironmentVariable')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY)
expected_keys = sorted([
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\ProfilesDirectory'),
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\AllUsersProfile')])
self.assertEqual(sorted(source_type.keys), expected_keys)
# Artifact with Windows Registry value source type.
artifact_definition = artifact_definitions[2]
self.assertEqual(artifact_definition.name, 'CurrentControlSet')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE)
self.assertEqual(len(source_type.key_value_pairs), 1)
key_value_pair = source_type.key_value_pairs[0]
expected_key = 'HKEY_LOCAL_MACHINE\\SYSTEM\\Select'
self.assertEqual(key_value_pair['key'], expected_key)
self.assertEqual(key_value_pair['value'], 'Current')
# Artifact with WMI query source type.
artifact_definition = artifact_definitions[3]
self.assertEqual(artifact_definition.name, 'WMIProfileUsersHomeDir')
expected_provides = sorted(['users.homedir'])
self.assertEqual(sorted(artifact_definition.provides), expected_provides)
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_WMI_QUERY)
expected_query = (
'SELECT * FROM Win32_UserProfile WHERE SID=\'%%users.sid%%\'')
self.assertEqual(source_type.query, expected_query)
# Artifact with artifact definition source type.
artifact_definition = artifact_definitions[4]
self.assertEqual(artifact_definition.name, 'EventLogs')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_ARTIFACT)
# Artifact with command definition source type.
artifact_definition = artifact_definitions[5]
self.assertEqual(artifact_definition.name, 'RedhatPackagesList')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_COMMAND)
# Artifact with COMMAND definition collector definition.
artifact_definition = artifact_definitions[5]
self.assertEqual(artifact_definition.name, 'RedhatPackagesList')
self.assertEqual(len(artifact_definition.sources), 1)
collector_definition = artifact_definition.sources[0]
self.assertNotEqual(collector_definition, None)
self.assertEqual(
collector_definition.type_indicator,
definitions.TYPE_INDICATOR_COMMAND)
def testBadKey(self):
"""Tests top level keys are correct."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadKey
doc: bad extra key.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
extra_key: 'wrong'
labels: [Logs]
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testMissingSources(self):
"""Tests sources is present."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadSources
doc: must have one sources.
labels: [Logs]
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testBadSupportedOS(self):
"""Tests supported_os is checked correctly."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadSupportedOS
doc: supported_os should be an array of strings.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
labels: [Logs]
supported_os: Windows
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testBadLabels(self):
"""Tests labels is checked correctly."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadLabel
doc: badlabel.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
labels: Logs
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testMissingDoc(self):
"""Tests doc is required."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: NoDoc
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testReadFile(self):
"""Tests the ReadFile function."""
artifact_reader = reader.YamlArtifactsReader()
test_file = os.path.join('test_data', 'definitions.yaml')
artifact_definitions = list(artifact_reader.ReadFile(test_file))
self.assertEqual(len(artifact_definitions), 7)
def testReadDirectory(self):
"""Tests the ReadDirectory function."""
artifact_reader = reader.YamlArtifactsReader()
artifact_definitions = list(artifact_reader.ReadDirectory('test_data'))
self.assertEqual(len(artifact_definitions), 7)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
uclouvain/osis | ddd/logic/application/test/use_case/write/test_renew_multiple_attributions_service.py | 1 | 9080 | # ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# ############################################################################
import copy
import datetime
from decimal import Decimal
import mock
import uuid
from django.test import TestCase
from attribution.models.enums.function import Functions
from base.ddd.utils.business_validator import MultipleBusinessExceptions
from base.models.enums.vacant_declaration_type import VacantDeclarationType
from ddd.logic.application.commands import RenewMultipleAttributionsCommand
from ddd.logic.application.domain.builder.applicant_identity_builder import ApplicantIdentityBuilder
from ddd.logic.application.domain.model.applicant import Applicant
from ddd.logic.application.domain.model.application import Application, ApplicationIdentity
from ddd.logic.application.domain.model.application_calendar import ApplicationCalendar, ApplicationCalendarIdentity
from ddd.logic.application.domain.model._attribution import Attribution
from ddd.logic.application.domain.model._allocation_entity import AllocationEntity
from ddd.logic.application.domain.model.vacant_course import VacantCourse, VacantCourseIdentity
from ddd.logic.application.domain.validator.exceptions import AttributionAboutToExpireNotFound, \
VolumesAskedShouldBeLowerOrEqualToVolumeAvailable, ApplicationAlreadyExistsException
from ddd.logic.learning_unit.domain.model.learning_unit import LearningUnitIdentity
from ddd.logic.shared_kernel.academic_year.builder.academic_year_identity_builder import AcademicYearIdentityBuilder
from infrastructure.application.repository.applicant_in_memory import ApplicantInMemoryRepository
from infrastructure.application.repository.application_calendar_in_memory import ApplicationCalendarInMemoryRepository
from infrastructure.application.repository.application_in_memory import ApplicationInMemoryRepository
from infrastructure.application.repository.vacant_course_in_memory import VacantCourseInMemoryRepository
from infrastructure.messages_bus import message_bus_instance
class TestRenewMultipleAttributionsService(TestCase):
@classmethod
def setUpTestData(cls):
today = datetime.date.today()
cls.application_calendar = ApplicationCalendar(
entity_id=ApplicationCalendarIdentity(uuid=uuid.uuid4()),
authorized_target_year=AcademicYearIdentityBuilder.build_from_year(year=2019),
start_date=today - datetime.timedelta(days=5),
end_date=today + datetime.timedelta(days=10),
)
cls.attribution_about_to_expire = Attribution(
course_id=LearningUnitIdentity(
code='LDROI1200',
academic_year=AcademicYearIdentityBuilder.build_from_year(year=2018)
),
course_title="Introduction au droit",
function=Functions.HOLDER,
end_year=AcademicYearIdentityBuilder.build_from_year(year=2018),
start_year=AcademicYearIdentityBuilder.build_from_year(year=2017),
lecturing_volume=Decimal(5),
practical_volume=None,
is_substitute=False
)
cls.global_id = '123456789'
cls.applicant = Applicant(
entity_id=ApplicantIdentityBuilder.build_from_global_id(global_id=cls.global_id),
first_name="Thomas",
last_name="Durant",
attributions=[cls.attribution_about_to_expire]
)
cls.vacant_course = VacantCourse(
entity_id=VacantCourseIdentity(
code='LDROI1200',
academic_year=cls.application_calendar.authorized_target_year
),
lecturing_volume_available=Decimal(10),
practical_volume_available=Decimal(50),
title='Introduction au droit',
vacant_declaration_type=VacantDeclarationType.RESEVED_FOR_INTERNS,
is_in_team=False,
allocation_entity=AllocationEntity(code='DRT')
)
def setUp(self) -> None:
self.applicant_repository = ApplicantInMemoryRepository([self.applicant])
self.application_calendar_repository = ApplicationCalendarInMemoryRepository([self.application_calendar])
self.vacant_course_repository = VacantCourseInMemoryRepository([self.vacant_course])
self.application_repository = ApplicationInMemoryRepository([])
message_bus_patcher = mock.patch.multiple(
'infrastructure.messages_bus',
ApplicationRepository=lambda: self.application_repository,
ApplicantRepository=lambda: self.applicant_repository,
VacantCourseRepository=lambda: self.vacant_course_repository,
ApplicationCalendarRepository=lambda: self.application_calendar_repository
)
message_bus_patcher.start()
self.addCleanup(message_bus_patcher.stop)
self.message_bus = message_bus_instance
def test_assert_renewal_is_correctly_processed(self):
cmd = RenewMultipleAttributionsCommand(global_id=self.global_id, renew_codes=['LDROI1200'])
self.message_bus.invoke(cmd)
self.assertEqual(
len(self.application_repository.search(applicant_id=self.applicant.entity_id)),
1
)
def test_renewal_multiple_case_one_not_about_to_renewal_assert_applications_not_created_at_all(self):
cmd = RenewMultipleAttributionsCommand(global_id=self.global_id, renew_codes=['LDROI1200', 'LDROI2500'])
with self.assertRaises(MultipleBusinessExceptions) as cm:
self.message_bus.invoke(cmd)
exceptions_raised = cm.exception.exceptions
self.assertTrue(
any([
exception for exception in exceptions_raised
if isinstance(exception, AttributionAboutToExpireNotFound)
])
)
self.assertEqual(
len(self.application_repository.search(applicant_id=self.applicant.entity_id)),
0
)
def test_renewal_multiple_case_vacant_course_with_less_availability_than_attribution_assert_raise_exception(self):
vacant_course_with_less_availability = copy.deepcopy(self.vacant_course)
vacant_course_with_less_availability.practical_volume_available = Decimal(1)
vacant_course_with_less_availability.lecturing_volume_available = Decimal(1)
self.vacant_course_repository = VacantCourseInMemoryRepository([vacant_course_with_less_availability])
cmd = RenewMultipleAttributionsCommand(global_id=self.global_id, renew_codes=['LDROI1200'])
with self.assertRaises(MultipleBusinessExceptions) as cm:
self.message_bus.invoke(cmd)
exceptions_raised = cm.exception.exceptions
self.assertTrue(
any([
exception for exception in exceptions_raised
if isinstance(exception, VolumesAskedShouldBeLowerOrEqualToVolumeAvailable)
])
)
def test_renewal_multiple_case_already_applied_on_course_assert_raise_exception(self):
application = Application(
entity_id=ApplicationIdentity(uuid=uuid.uuid4()),
applicant_id=self.applicant.entity_id,
vacant_course_id=self.vacant_course.entity_id,
lecturing_volume=self.vacant_course.lecturing_volume_available,
practical_volume=self.vacant_course.practical_volume_available,
remark='',
course_summary='',
)
self.application_repository = ApplicationInMemoryRepository([application])
cmd = RenewMultipleAttributionsCommand(global_id=self.global_id, renew_codes=['LDROI1200'])
with self.assertRaises(MultipleBusinessExceptions) as cm:
self.message_bus.invoke(cmd)
exceptions_raised = cm.exception.exceptions
self.assertTrue(
any([
exception for exception in exceptions_raised
if isinstance(exception, ApplicationAlreadyExistsException)
])
)
| agpl-3.0 |
amyshi188/osf.io | website/project/decorators.py | 8 | 12504 | # -*- coding: utf-8 -*-
import functools
import httplib as http
from furl import furl
from flask import request
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework import status
from framework.auth import Auth, cas
from framework.flask import redirect # VOL-aware redirect
from framework.exceptions import HTTPError
from framework.auth.decorators import collect_auth
from framework.mongo.utils import get_or_http_error
from website.models import Node
from website import settings
_load_node_or_fail = lambda pk: get_or_http_error(Node, pk)
def _kwargs_to_nodes(kwargs):
"""Retrieve project and component objects from keyword arguments.
:param dict kwargs: Dictionary of keyword arguments
:return: Tuple of parent and node
"""
node = kwargs.get('node') or kwargs.get('project')
parent = kwargs.get('parent')
if node:
return parent, node
pid = kwargs.get('pid')
nid = kwargs.get('nid')
if pid and nid:
node = _load_node_or_fail(nid)
parent = _load_node_or_fail(pid)
elif pid and not nid:
node = _load_node_or_fail(pid)
elif nid and not pid:
node = _load_node_or_fail(nid)
elif not pid and not nid:
raise HTTPError(
http.NOT_FOUND,
data={
'message_short': 'Node not found',
'message_long': 'No Node with that primary key could be found',
}
)
return parent, node
def _inject_nodes(kwargs):
kwargs['parent'], kwargs['node'] = _kwargs_to_nodes(kwargs)
def must_not_be_rejected(func):
"""Ensures approval/disapproval requests can't reach Sanctions that have
already been rejected.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
node = get_or_http_error(Node, kwargs.get('nid', kwargs.get('pid')), allow_deleted=True)
if node.sanction and node.sanction.is_rejected:
raise HTTPError(http.GONE, data=dict(
message_long='This registration has been rejected'
))
return func(*args, **kwargs)
return wrapped
def must_be_valid_project(func=None, retractions_valid=False):
""" Ensures permissions to retractions are never implicitly granted. """
# TODO: Check private link
def must_be_valid_project_inner(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
_inject_nodes(kwargs)
if getattr(kwargs['node'], 'is_collection', True):
raise HTTPError(
http.NOT_FOUND
)
if not retractions_valid and getattr(kwargs['node'].retraction, 'is_retracted', False):
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='Viewing withdrawn registrations is not permitted')
)
else:
return func(*args, **kwargs)
return wrapped
if func:
return must_be_valid_project_inner(func)
return must_be_valid_project_inner
def must_be_public_registration(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
_inject_nodes(kwargs)
node = kwargs['node']
if not node.is_public or not node.is_registration:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='Must be a public registration to view')
)
return func(*args, **kwargs)
return wrapped
def must_not_be_registration(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
_inject_nodes(kwargs)
node = kwargs['node']
if node.is_registration and not node.archiving:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Registered Nodes are immutable',
'message_long': "The operation you're trying to do cannot be applied to registered Nodes, which are immutable",
}
)
return func(*args, **kwargs)
return wrapped
def must_be_registration(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
_inject_nodes(kwargs)
node = kwargs['node']
if not node.is_registration:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Registered Nodes only',
'message_long': 'This view is restricted to registered Nodes only',
}
)
return func(*args, **kwargs)
return wrapped
def check_can_access(node, user, key=None, api_node=None):
"""View helper that returns whether a given user can access a node.
If ``user`` is None, returns False.
:rtype: boolean
:raises: HTTPError (403) if user cannot access the node
"""
if user is None:
return False
if not node.can_view(Auth(user=user)) and api_node != node:
if key in node.private_link_keys_deleted:
status.push_status_message('The view-only links you used are expired.', trust=False)
raise HTTPError(http.FORBIDDEN)
return True
def check_key_expired(key, node, url):
"""check if key expired if is return url with args so it will push status message
else return url
:param str key: the private link key passed in
:param Node node: the node object wants to access
:param str url: the url redirect to
:return: url with pushed message added if key expired else just url
"""
if key in node.private_link_keys_deleted:
url = furl(url).add({'status': 'expired'}).url
return url
def _must_be_contributor_factory(include_public, include_view_only_anon=True):
"""Decorator factory for authorization wrappers. Decorators verify whether
the current user is a contributor on the current project, or optionally
whether the current project is public.
:param bool include_public: Check whether current project is public
:param bool include_view_only_anon: Checks view_only anonymized links
:return: Authorization decorator
"""
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
response = None
_inject_nodes(kwargs)
node = kwargs['node']
kwargs['auth'] = Auth.from_kwargs(request.args.to_dict(), kwargs)
user = kwargs['auth'].user
key = request.args.get('view_only', '').strip('/')
#if not login user check if the key is valid or the other privilege
kwargs['auth'].private_key = key
link_anon = None
if not include_view_only_anon:
from website.models import PrivateLink
try:
link_anon = PrivateLink.find_one(Q('key', 'eq', key)).anonymous
except ModularOdmException:
pass
if not node.is_public or not include_public:
if not include_view_only_anon and link_anon:
if not check_can_access(node=node, user=user):
raise HTTPError(http.UNAUTHORIZED)
elif key not in node.private_link_keys_active:
if not check_can_access(node=node, user=user, key=key):
redirect_url = check_key_expired(key=key, node=node, url=request.url)
if request.headers.get('Content-Type') == 'application/json':
raise HTTPError(http.UNAUTHORIZED)
else:
response = redirect(cas.get_login_url(redirect_url))
return response or func(*args, **kwargs)
return wrapped
return wrapper
# Create authorization decorators
must_be_contributor = _must_be_contributor_factory(False)
must_be_contributor_or_public = _must_be_contributor_factory(True)
must_be_contributor_or_public_but_not_anonymized = _must_be_contributor_factory(include_public=True, include_view_only_anon=False)
def must_have_addon(addon_name, model):
"""Decorator factory that ensures that a given addon has been added to
the target node. The decorated function will throw a 404 if the required
addon is not found. Must be applied after a decorator that adds `node` and
`project` to the target function's keyword arguments, such as
`must_be_contributor.
:param str addon_name: Name of addon
:param str model: Name of model
:returns: Decorator function
"""
def wrapper(func):
@functools.wraps(func)
@collect_auth
def wrapped(*args, **kwargs):
if model == 'node':
_inject_nodes(kwargs)
owner = kwargs['node']
elif model == 'user':
auth = kwargs.get('auth')
owner = auth.user if auth else None
if owner is None:
raise HTTPError(http.UNAUTHORIZED)
else:
raise HTTPError(http.BAD_REQUEST)
addon = owner.get_addon(addon_name)
if addon is None:
raise HTTPError(http.BAD_REQUEST)
kwargs['{0}_addon'.format(model)] = addon
return func(*args, **kwargs)
return wrapped
return wrapper
def must_be_addon_authorizer(addon_name):
"""
:param str addon_name: Name of addon
:returns: Decorator function
"""
def wrapper(func):
@functools.wraps(func)
@collect_auth
def wrapped(*args, **kwargs):
node_addon = kwargs.get('node_addon')
if not node_addon:
_inject_nodes(kwargs)
node = kwargs['node']
node_addon = node.get_addon(addon_name)
if not node_addon:
raise HTTPError(http.BAD_REQUEST)
if not node_addon.user_settings:
raise HTTPError(http.BAD_REQUEST)
auth = kwargs.get('auth')
user = kwargs.get('user') or (auth.user if auth else None)
if node_addon.user_settings.owner != user:
raise HTTPError(http.FORBIDDEN)
return func(*args, **kwargs)
return wrapped
return wrapper
def must_have_permission(permission):
"""Decorator factory for checking permissions. Checks that user is logged
in and has necessary permissions for node. Node must be passed in keyword
arguments to view function.
:param list permissions: List of accepted permissions
:returns: Decorator function for checking permissions
:raises: HTTPError(http.UNAUTHORIZED) if not logged in
:raises: HTTPError(http.FORBIDDEN) if missing permissions
"""
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
# Ensure `project` and `node` kwargs
_inject_nodes(kwargs)
node = kwargs['node']
kwargs['auth'] = Auth.from_kwargs(request.args.to_dict(), kwargs)
user = kwargs['auth'].user
# User must be logged in
if user is None:
raise HTTPError(http.UNAUTHORIZED)
# User must have permissions
if not node.has_permission(user, permission):
raise HTTPError(http.FORBIDDEN)
# Call view function
return func(*args, **kwargs)
# Return decorated function
return wrapped
# Return decorator
return wrapper
def must_have_write_permission_or_public_wiki(func):
""" Checks if user has write permission or wiki is public and publicly editable. """
@functools.wraps(func)
def wrapped(*args, **kwargs):
# Ensure `project` and `node` kwargs
_inject_nodes(kwargs)
wiki = kwargs['node'].get_addon('wiki')
if wiki and wiki.is_publicly_editable:
return func(*args, **kwargs)
else:
return must_have_permission('write')(func)(*args, **kwargs)
# Return decorated function
return wrapped
def http_error_if_disk_saving_mode(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
_inject_nodes(kwargs)
node = kwargs['node']
if settings.DISK_SAVING_MODE:
raise HTTPError(
http.METHOD_NOT_ALLOWED,
redirect_url=node.url
)
return func(*args, **kwargs)
return wrapper
| apache-2.0 |
batxes/4c2vhic | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/mtx1_models/SHH_WT_models_highres1537.py | 4 | 88211 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((2622.54, 4501.07, 4731.13), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((3001.17, 4573.73, 4981.75), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((2927.4, 4419.43, 4674.83), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2873.72, 4819.31, 4662.13), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((2821.33, 5273.57, 4518.94), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2896.92, 4797.45, 4134.98), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2863.26, 4437.63, 3712.34), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2409.84, 4737.06, 3540.45), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((1977.4, 5138.29, 3323.27), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2282.94, 4850.7, 3237.65), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2747.16, 4562.42, 3383.89), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((3232.97, 4128.74, 3549.09), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((3720.88, 4074.3, 3688.03), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((4118.42, 4167.71, 3751.64), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((4623.94, 4045.75, 3713), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((5153.17, 3750.23, 3934.83), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5580.5, 3358.61, 4265.69), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5659.18, 2961.84, 4535.4), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((5736.59, 2774.98, 4987.43), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((6040.2, 2770.91, 5619.55), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((5586.75, 2663.52, 5176.26), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((5592.64, 2635.41, 4741.14), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((5824.99, 2514.48, 4413.85), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5639.08, 2271.91, 4313.39), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5546.95, 2135.83, 4568.81), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((5382.13, 2331.03, 4782.1), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5177.88, 2585.3, 4503.05), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4622.29, 2696.77, 4333.32), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4124.84, 2735.95, 4122.4), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3935.02, 2984.42, 3872.3), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((3883.91, 3028.44, 3584.26), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4134.9, 2875.95, 3740.36), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3982.74, 2957.42, 4002.64), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((3876.22, 2979.86, 4247.17), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((3831.39, 2833.42, 4367.83), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((3858.63, 2782, 4730.19), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((3396.69, 1807.54, 4530.27), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((2781.3, 1347.36, 4070.49), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((2069.36, 1398.95, 3814.18), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1748.24, 2030.03, 3573.22), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((2340.31, 2436.49, 3289.01), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((3121.19, 2687.81, 3314.21), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((3387.7, 2708.68, 3689.14), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3087.54, 2709.98, 3782.84), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2537.52, 2902.72, 3581.96), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2137.32, 2951.42, 3743.93), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((2660.7, 2793.44, 4210.09), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((1950.58, 2878.68, 4224.32), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((1242.87, 2805.05, 4402.5), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((1535.45, 2450.02, 4111.98), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((2084.47, 2421.1, 3866), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2197.7, 2415.94, 3703.26), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((1579.57, 2313.04, 3698.24), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((1064.99, 2264.51, 3638.19), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((633.587, 2333.73, 3527.82), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((1041.44, 2563.2, 3095.62), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((1535.41, 2464.94, 3033.43), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2092.48, 2346.83, 3078.49), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2590.48, 2303.42, 3242.67), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((2553.55, 2162, 3513.95), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2490.22, 2132.38, 3601.02), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2117.35, 2046.94, 3361.97), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((1480.92, 1757.19, 2976.89), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((689.69, 1334.26, 2579.46), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((976.855, 1916.98, 2592.11), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((1220.02, 2228.78, 2819.81), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((1376.31, 2050.63, 3016.05), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((1645.23, 2142.44, 3180.63), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((1781.16, 2214.46, 3517.61), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((1807.92, 2264.33, 3971.9), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((1729.22, 2140.14, 4479.25), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((1425.62, 1972.37, 4600.21), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((1025.19, 1885.07, 3967.46), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((619.698, 1917.54, 3244.38), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((386.637, 2131.66, 2646.02), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((415.567, 2385.02, 2320.36), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((395.139, 1865.61, 2278.62), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((332.267, 1589.14, 2649.98), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((138.136, 1669.99, 2646.53), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((-65.2647, 1558.05, 2383.17), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((476.828, 1316.04, 1986.52), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((1179.25, 1529.73, 2044.64), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((1652.38, 1660.41, 2409.21), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((1854.45, 1493.06, 2571.16), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((1175.42, 1083.76, 2107.35), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((999.43, 1533.31, 2266.25), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((1121.21, 1998.55, 2506.58), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((1176.04, 2371.76, 2538.63), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((765.149, 2231, 2143.44), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((762.845, 1872.66, 2374.3), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((854.46, 1819.08, 2789.29), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((1166.35, 1927.9, 2808.37), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((1212.1, 2208.45, 2581.17), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((1155.2, 2633.07, 2492.88), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((704.075, 3049.18, 2603.4), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((239.176, 3076.59, 2883.63), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((465.025, 2526.89, 3149.83), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((864.585, 2241, 3016.08), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((812.334, 2485.22, 2769.71), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((271.916, 2438.06, 2714.12), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((1120.24, 2056.5, 3086.76), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((2113.76, 1801.15, 3300.46), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((2561.87, 1871.67, 3096.24), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((2256.69, 1803.62, 2863.51), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((1890.22, 1814.26, 2781.06), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((1516.28, 1908.1, 2785.08), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((1205.77, 2131.44, 2944.69), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((1076.96, 2470.27, 2797.59), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((994.354, 2750.03, 2631.63), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((1208.81, 2720.28, 2455.03), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((1383.15, 2571.72, 2340.34), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((1285.99, 2625.89, 2028.83), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((1700.63, 2574.55, 2059.23), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((2151.15, 2550.04, 2101.75), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2290.34, 2287.08, 2398.73), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((2268.23, 1746.8, 2531.29), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((2419.2, 1224.3, 2664.68), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((2264.59, 601.545, 2411.49), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((1947.82, 165.77, 2295.8), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((2071.35, 629.353, 2184.27), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((2059.58, 997.692, 2291.31), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((1891.4, 1429.91, 2379.14), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((1393.43, 1645.94, 2404.64), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((1215.59, 2089.8, 2480.27), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((1022.08, 2515.65, 2444.69), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((1007.19, 3065.08, 2443.63), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((659.498, 3624.37, 2459.99), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((236.76, 4028.55, 2607.56), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((176.597, 3573.19, 2772.82), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((391.794, 2986.61, 2977.6), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((495.731, 2651.75, 2759.48), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((794.781, 2395.42, 2572.71), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((1079.69, 2283.42, 2292.91), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((1048.26, 2155.36, 1896.93), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((1347.29, 2160.97, 1700.58), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((1593.19, 2009.26, 1763.74), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((1697.88, 2080.68, 2217.66), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2037.41, 2253.56, 2586.55), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2413.06, 2534.72, 2764.72), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2369.92, 2542.28, 2887.35), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2125.59, 2196.36, 2841.98), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((1884.39, 1937.33, 2766.66), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((1769.56, 1882.54, 2446.33), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((1654.62, 1761.79, 2182.19), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((1454.25, 1560.16, 1979.24), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((1722.67, 1613.56, 2262.55), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((1864.9, 1852.38, 2420.83), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((1700.63, 2090.08, 2498.3), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((1970.18, 2320.13, 2769.81), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2142.81, 2590.63, 2921.45), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2188.93, 2650.74, 2575.04), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((2014.9, 2764.52, 2131.97), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((1872.7, 2904.77, 1815.17), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((2065.07, 2647.77, 1890.05), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2360.24, 2560.58, 2370.17), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2521.03, 2417.09, 2790.03), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((2271.29, 2378.68, 3035.4), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((1921.36, 2277.87, 2853.41), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((1569.94, 1979.71, 2643.1), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((1788.05, 1619.44, 2732.05), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((2254.85, 1476.63, 2871.68), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((2490.01, 1519.96, 2536.69), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((2077.33, 1324.47, 2098.18), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((1448.99, 1016.35, 1560.04), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((1447.34, 1224.12, 1284.52), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((1551.52, 1550.44, 1380.45), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((1768.09, 1683.08, 1206.49), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((1982.46, 1838.62, 1752.99), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((2198.83, 1998.3, 2320.88), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((2190.67, 1966.85, 2672.38), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((1895.86, 1749.31, 2345.52), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((1591.82, 1544.87, 2020.88), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((1528.94, 1450.7, 2235.44), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((1783.1, 1461.63, 2119.04), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((1847.58, 1471.08, 1787.08), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((1520.89, 1431.7, 1714.26), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((1152.83, 1419.03, 2073.99), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((698.124, 1563.13, 2460.02), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((775.789, 1632.42, 3011.33), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((1192.45, 1685.06, 3227.69), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((1800.21, 1794.22, 3131.01), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((2255.57, 1850.98, 3056.35), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((1963.63, 1725.33, 2819.67), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((1517.44, 1438.73, 2496.89), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((1527.95, 1431.05, 2110.6), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((1694.71, 1409.22, 2246.94), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((1674.33, 1140.63, 2391.12), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((1487.69, 777.056, 2378.3), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((1752.35, 1276.02, 2061.65), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((1708.94, 1783.94, 1954.89), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((1485.58, 2146.14, 1936.5), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((1430.81, 2714.79, 1791.46), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((1825.17, 3079.98, 2010.05), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((1796.77, 3497.05, 2140.99), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((1468.19, 3876.25, 2083.91), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((781.992, 4412.77, 1751.36), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((452.374, 4014.27, 1944.13), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((637.816, 3308.4, 2240.97), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((1279.52, 2790.81, 2773.94), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((1808.85, 2461.01, 2977.05), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((1674.69, 2148.75, 2763.12), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((1523.26, 2055.97, 2364.47), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((1548.72, 2072.17, 1925.03), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((1832.95, 2290.67, 1876.74), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((1673.51, 2093.81, 2024.59), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((1675.7, 1825.47, 1962.75), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((2153.89, 2029.27, 2418.73), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((2235.17, 2245.35, 2877.83), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((1376.82, 2385.23, 2701.41), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((572.381, 2564.06, 2044.17), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((843.404, 2113.46, 1730.02), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((1062.67, 1499.51, 1741.59), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((1170.37, 1068.62, 2353.16), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((1544.28, 550.082, 2402.98), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((2016.7, 131.159, 2145.32), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((2089.25, 306.163, 1736.72), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((2299.11, 672.865, 2179), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((2376.54, 703.679, 2632.16), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((2467.3, 890.504, 2955.67), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((2116.27, 972.414, 3046.99), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((1517.88, 975.384, 2825.64), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((1173.27, 1341.07, 2476.2), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((1226, 1824.86, 2682.16), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((1810.16, 2083.16, 3081.64), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((2049.68, 2035.17, 3159.45), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((2002.8, 1804.82, 2963.79), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((1738.84, 1691.62, 2865.3), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((1917.55, 1933.98, 2491.22), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2394.2, 2036.54, 2413.8), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((2743.73, 2306.21, 2185.16), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((2653.33, 2390.4, 1745.59), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((2469.39, 2451.22, 1483.41), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((2740.18, 2154.97, 1540.15), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((3053.3, 2257.37, 1725.01), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((2715.89, 2207.82, 2029.94), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((2310.08, 1985.85, 2077.72), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((1896.13, 1585.3, 1798.31), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((1707.7, 1077.61, 1691.5), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((1823.02, 831.902, 1345.52), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((1738.01, 1243.53, 1422.1), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((2363.18, 1563.59, 1450.89), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((2646.24, 1927.1, 1692.43), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((2451.37, 2040.8, 1492.39), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((2398.47, 2157.14, 1885.29), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((2649.25, 2103.01, 2062.58), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((2827.85, 1846.12, 2128.08), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((2916.32, 1971.48, 2326.64), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((2973.11, 2443.52, 2368.5), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((2740.45, 2357.2, 2092.73), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((2617.43, 2048.49, 1819.37), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((2756.97, 1734.43, 1926.12), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((2822.78, 1579.74, 2264.45), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((2789.92, 1264.97, 2557.17), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((2657.19, 924.987, 2702.08), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((2720.09, 1120.29, 2451.79), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((2683.69, 1565.25, 2617.88), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((2801.07, 1985.05, 2600.21), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((2873.37, 1849.18, 2818.41), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((2801.4, 1933.59, 2839.12), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((2444.82, 1717.85, 2627.78), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((2187.33, 1742.3, 2535.87), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2470.27, 2087.38, 2564.15), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((2929.61, 2402.98, 2765.81), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((3259.73, 2711.49, 2955.63), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((3159.97, 2641.46, 3289.28), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((2941.8, 2445.57, 3302.79), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((3176.17, 2542.69, 3298.16), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((3236.26, 2493.29, 3334.52), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((3342.44, 2352.24, 3124.94), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((3482.05, 2164.07, 2603.57), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((3539.18, 1872.26, 2822.6), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((3654.04, 2010.91, 3148.77), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((3545.41, 1617.52, 2420.38), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((3435.3, 1006.37, 1708.62), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((3107.01, 1014.06, 1732.87), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((2888.16, 1357.05, 1812.11), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((2756.93, 1646.57, 1864.7), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((2834.94, 1997.43, 2368.07), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((3027.73, 2273.44, 2885.27), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((3072.7, 2335, 2870.46), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((2933.92, 2430.39, 2433.35), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((3286.35, 2911.51, 2530.48), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((3805.74, 3062.29, 2710.34), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((3952.09, 3123.12, 2676.58), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((3944.05, 3215.18, 2518.38), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((4034.33, 3075.73, 2026.06), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((4387.76, 3206.88, 1599.39), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((4233.71, 3426.83, 1812.26), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((3858.91, 3766.99, 2186.87), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((3687.44, 4375.9, 2009.21), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((3649.74, 5064.67, 1537.11), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((3748.77, 4865.97, 1164.35), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((4196.9, 3752.53, 1900.74), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((4156.19, 4798.06, 2260.97), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((4190.45, 5181.88, 2560.01), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((4328.47, 4974.96, 2420.46), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((4528.7, 5093.07, 2050.47), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((4449.91, 4469.78, 2084.06), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((4241.03, 3777.5, 2436.82), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((4085.35, 3409.3, 2417.04), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((4262.43, 3458.13, 2539.61), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((4549.39, 3303.75, 2276.16), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((4586.74, 3144.68, 1935.48), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((4476.82, 3033.57, 1594.45), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((4857.86, 2895.76, 1656.56), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((5223.57, 2977.99, 1482.94), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((5345.75, 3208.49, 1486.13), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((4942.13, 3132.57, 1931.07), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((4409.55, 3170.13, 2274.28), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((3959.43, 3159.81, 2261.88), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((3892.53, 3145.53, 2483.55), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((3898.19, 3128.6, 2442.22), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((3955.01, 3522.66, 2642.06), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((4076.64, 4172.26, 2905.16), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((4413.96, 3863.54, 2967.52), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((4257.81, 3411.24, 2783.5), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((3824.5, 3143.16, 2515.86), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((3495.67, 3034.95, 1783.33), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((3467.04, 3319.09, 1138.39), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((3324.76, 3694.78, 1255.71), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((3097.41, 3802.68, 1584.38), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((2864.86, 4059.96, 1764.75), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((2736.56, 4401.35, 1797.06), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((2648.14, 4857.37, 1636.07), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((3039.68, 4350.6, 1534.41), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((3238.43, 3878.26, 1595.24), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((2922.78, 3672.01, 1283.49), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((2771.34, 3522.17, 871.084), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((2591.23, 3438.03, 1277.23), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((2473.38, 3492.09, 1612.91), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((2446.81, 3820.67, 1594.22), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((2456.01, 4220.86, 1431.79), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((2486.59, 4645.34, 1223.87), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((2706.54, 4281.72, 1277.57), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((3090.38, 3819.46, 1464.01), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((3293.02, 3275.97, 1809.6), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((3616.14, 2888.19, 1869.23), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((4063.38, 2673.71, 1608.49), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((4535.83, 2712.13, 1479.01), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((4851.02, 2695.02, 1870.3), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((5089.52, 3029.52, 1975.18), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((5246.75, 2889.12, 1635.75), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((5052.35, 2568.47, 1748.65), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((4662.01, 2520.53, 1690.3), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((4328.01, 2796.1, 1382.62), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((4275.7, 2934.29, 760.898), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((3839.28, 3257.52, 612.817), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((3262.4, 3439.16, 903.102), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((3148.21, 3847.48, 1199.1), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((2973.89, 4174.58, 1271.65), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((2673.75, 3991.98, 1242.66), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((2854.8, 3561.24, 1193.44), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((3303.53, 3238.9, 1243.64), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((3932.74, 2998.71, 1165.2), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((4442.85, 2908.38, 1123.77), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((4640.95, 2681.84, 1289.52), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((4463.32, 2647.54, 1875.73), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((4357.82, 2367.04, 2582.59), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
MaizerGomes/youtube-dl | youtube_dl/extractor/wimp.py | 65 | 1839 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .youtube import YoutubeIE
class WimpIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?wimp\.com/([^/]+)/'
_TESTS = [{
'url': 'http://www.wimp.com/maruexhausted/',
'md5': 'f1acced123ecb28d9bb79f2479f2b6a1',
'info_dict': {
'id': 'maruexhausted',
'ext': 'flv',
'title': 'Maru is exhausted.',
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
}
}, {
# youtube video
'url': 'http://www.wimp.com/clowncar/',
'info_dict': {
'id': 'cG4CEr2aiSg',
'ext': 'mp4',
'title': 'Basset hound clown car...incredible!',
'description': 'md5:8d228485e0719898c017203f900b3a35',
'uploader': 'Gretchen Hoey',
'uploader_id': 'gretchenandjeff1',
'upload_date': '20140303',
},
'add_ie': ['Youtube'],
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
[r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", r"videoId\s*:\s*[\"']([^\"']+)[\"']"],
webpage, 'video URL')
if YoutubeIE.suitable(video_url):
self.to_screen('Found YouTube video')
return {
'_type': 'url',
'url': video_url,
'ie_key': YoutubeIE.ie_key(),
}
return {
'id': video_id,
'url': video_url,
'title': self._og_search_title(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
}
| unlicense |
AlperSaltabas/OR_Tools_Google_API | examples/python/production.py | 34 | 2848 | # Copyright 2011 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Production planning problem in Google or-tools.
From the OPL model production.mod.
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.linear_solver import pywraplp
def main(sol='GLPK'):
# Create the solver.
# using GLPK
if sol == 'GLPK':
solver = pywraplp.Solver('CoinsGridGLPK',
pywraplp.Solver.GLPK_LINEAR_PROGRAMMING)
else:
# Using CLP
solver = pywraplp.Solver('CoinsGridCLP',
pywraplp.Solver.CLP_LINEAR_PROGRAMMING)
#
# data
#
kluski = 0
capellini = 1
fettucine = 2
products = ['kluski', 'capellini', 'fettucine']
num_products = len(products)
flour = 0
eggs = 1
resources = ['flour', 'eggs']
num_resources = len(resources)
consumption = [[0.5, 0.2], [0.4, 0.4], [0.3, 0.6]]
capacity = [20, 40]
demand = [100, 200, 300]
inside_cost = [0.6, 0.8, 0.3]
outside_cost = [0.8, 0.9, 0.4]
#
# declare variables
#
inside = [solver.NumVar(0, 10000, 'inside[%i]' % p)
for p in range(num_products)]
outside = [solver.NumVar(0, 10000, 'outside[%i]' % p)
for p in range(num_products)]
# to minimize
z = solver.Sum([inside_cost[p] * inside[p] + outside_cost[p] * outside[p]
for p in range(num_products)])
#
# constraints
#
for r in range(num_resources):
solver.Add(solver.Sum(
[consumption[p][r] * inside[p]
for p in range(num_products)]) <= capacity[r])
for p in range(num_products):
solver.Add(inside[p] + outside[p] >= demand[p])
objective = solver.Minimize(z)
solver.Solve()
print
print 'z = ', solver.Objective().Value()
for p in range(num_products):
print products[p], ': inside:', inside[p].SolutionValue(), '(ReducedCost:', inside[p].ReducedCost(), ')',
print 'outside:', outside[p].SolutionValue(), ' (ReducedCost:', outside[p].ReducedCost(), ')'
print
if __name__ == '__main__':
sol = 'CBC'
if len(sys.argv) > 1:
sol = sys.argv[1]
if sol != 'GLPK' and sol != 'CBC':
print 'Solver must be either GLPK or CBC'
sys.exit(1)
main(sol)
| apache-2.0 |
allmightyspiff/softlayer-python | tests/api_tests.py | 2 | 12260 | """
SoftLayer.tests.api_tests
~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
from unittest import mock as mock
import SoftLayer
import SoftLayer.API
from SoftLayer import testing
from SoftLayer import transports
class Initialization(testing.TestCase):
def test_init(self):
client = SoftLayer.Client(username='doesnotexist',
api_key='issurelywrong',
timeout=10,
endpoint_url='http://example.com/v3/xmlrpc/')
self.assertIsInstance(client.auth, SoftLayer.BasicAuthentication)
self.assertEqual(client.auth.username, 'doesnotexist')
self.assertEqual(client.auth.api_key, 'issurelywrong')
self.assertIsNotNone(client.transport)
self.assertIsInstance(client.transport, transports.XmlRpcTransport)
self.assertEqual(client.transport.timeout, 10)
def test_init_with_rest_url(self):
client = SoftLayer.Client(username='doesnotexist',
api_key='issurelywrong',
timeout=10,
endpoint_url='http://example.com/v3/rest/')
self.assertIsInstance(client.auth, SoftLayer.BasicHTTPAuthentication)
self.assertEqual(client.auth.username, 'doesnotexist')
self.assertEqual(client.auth.api_key, 'issurelywrong')
self.assertIsNotNone(client.transport)
self.assertIsInstance(client.transport, transports.RestTransport)
self.assertEqual(client.transport.endpoint_url,
'http://example.com/v3/rest')
self.assertEqual(client.transport.timeout, 10)
@mock.patch('SoftLayer.config.get_client_settings')
def test_env(self, get_client_settings):
auth = mock.Mock()
get_client_settings.return_value = {
'timeout': 10,
'endpoint_url': 'http://endpoint_url/',
}
client = SoftLayer.Client(auth=auth)
self.assertEqual(client.auth.get_headers(), auth.get_headers())
self.assertEqual(client.transport.timeout, 10)
self.assertEqual(client.transport.endpoint_url, 'http://endpoint_url')
class ClientMethods(testing.TestCase):
def test_repr(self):
client = SoftLayer.Client(
username='doesnotexist',
api_key='issurelywrong'
)
self.assertIn("Client", repr(client))
def test_service_repr(self):
client = SoftLayer.Client(
username='doesnotexist',
api_key='issurelywrong'
)
self.assertIn("Service", repr(client['SERVICE']))
def test_len(self):
client = SoftLayer.Client(
username='doesnotexist',
api_key='issurelywrong'
)
self.assertEqual(len(client), 0)
class APIClient(testing.TestCase):
def test_simple_call(self):
mock = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mock.return_value = {"test": "result"}
resp = self.client['SERVICE'].METHOD()
self.assertEqual(resp, {"test": "result"})
self.assert_called_with('SoftLayer_SERVICE', 'METHOD',
mask=None,
filter=None,
identifier=None,
args=tuple(),
limit=None,
offset=None,
)
def test_simple_call_2(self):
mock = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mock.return_value = {"test": "result"}
resp = self.client.call('SERVICE', 'METHOD', {'networkComponents': [{'maxSpeed': 100}]})
self.assertEqual(resp, {"test": "result"})
self.assert_called_with('SoftLayer_SERVICE', 'METHOD',
mask=None, filter=None, identifier=None,
args=({'networkComponents': [{'maxSpeed': 100}]},), limit=None, offset=None,
)
def test_verify_request_false(self):
client = SoftLayer.BaseClient(transport=self.mocks)
mock = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mock.return_value = {"test": "result"}
resp = client.call('SERVICE', 'METHOD', verify=False)
self.assertEqual(resp, {"test": "result"})
self.assert_called_with('SoftLayer_SERVICE', 'METHOD', verify=False)
def test_verify_request_true(self):
client = SoftLayer.BaseClient(transport=self.mocks)
mock = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mock.return_value = {"test": "result"}
resp = client.call('SERVICE', 'METHOD', verify=True)
self.assertEqual(resp, {"test": "result"})
self.assert_called_with('SoftLayer_SERVICE', 'METHOD', verify=True)
def test_verify_request_not_specified(self):
client = SoftLayer.BaseClient(transport=self.mocks)
mock = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mock.return_value = {"test": "result"}
resp = client.call('SERVICE', 'METHOD')
self.assertEqual(resp, {"test": "result"})
self.assert_called_with('SoftLayer_SERVICE', 'METHOD', verify=None)
@mock.patch('SoftLayer.API.BaseClient.iter_call')
def test_iterate(self, _iter_call):
self.client['SERVICE'].METHOD(iter=True)
_iter_call.assert_called_with('SERVICE', 'METHOD')
@mock.patch('SoftLayer.API.BaseClient.iter_call')
def test_service_iter_call(self, _iter_call):
self.client['SERVICE'].iter_call('METHOD', 'ARG')
_iter_call.assert_called_with('SERVICE', 'METHOD', 'ARG')
@mock.patch('SoftLayer.API.BaseClient.iter_call')
def test_service_iter_call_with_chunk(self, _iter_call):
self.client['SERVICE'].iter_call('METHOD', 'ARG', chunk=2)
_iter_call.assert_called_with('SERVICE', 'METHOD', 'ARG', chunk=2)
@mock.patch('SoftLayer.API.BaseClient.call')
def test_iter_call(self, _call):
# chunk=100, no limit
_call.side_effect = [
transports.SoftLayerListResult(range(100), 125),
transports.SoftLayerListResult(range(100, 125), 125)
]
result = list(self.client.iter_call('SERVICE', 'METHOD', iter=True))
self.assertEqual(list(range(125)), result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=0),
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=100),
])
_call.reset_mock()
# chunk=100, no limit. Requires one extra request.
_call.side_effect = [
transports.SoftLayerListResult(range(100), 201),
transports.SoftLayerListResult(range(100, 200), 201),
transports.SoftLayerListResult([], 201)
]
result = list(self.client.iter_call('SERVICE', 'METHOD', iter=True))
self.assertEqual(list(range(200)), result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=0),
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=100),
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=200),
])
_call.reset_mock()
# chunk=25, limit=30
_call.side_effect = [
transports.SoftLayerListResult(range(0, 25), 30),
transports.SoftLayerListResult(range(25, 30), 30)
]
result = list(self.client.iter_call(
'SERVICE', 'METHOD', iter=True, limit=25))
self.assertEqual(list(range(30)), result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', iter=False, limit=25, offset=0),
mock.call('SERVICE', 'METHOD', iter=False, limit=25, offset=25),
])
_call.reset_mock()
# A non-list was returned
_call.side_effect = ["test"]
result = list(self.client.iter_call('SERVICE', 'METHOD', iter=True))
self.assertEqual(["test"], result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', iter=False, limit=100, offset=0),
])
_call.reset_mock()
_call.side_effect = [
transports.SoftLayerListResult(range(0, 25), 30),
transports.SoftLayerListResult(range(25, 30), 30)
]
result = list(self.client.iter_call('SERVICE', 'METHOD', 'ARG',
iter=True,
limit=25,
offset=12))
self.assertEqual(list(range(30)), result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', 'ARG',
iter=False, limit=25, offset=12),
mock.call('SERVICE', 'METHOD', 'ARG',
iter=False, limit=25, offset=37),
])
# Chunk size of 0 is invalid
self.assertRaises(
AttributeError,
lambda: list(self.client.iter_call('SERVICE', 'METHOD',
iter=True, limit=0)))
def test_call_invalid_arguments(self):
self.assertRaises(
TypeError,
self.client.call, 'SERVICE', 'METHOD', invalid_kwarg='invalid')
def test_call_compression_disabled(self):
mocked = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mocked.return_value = {}
self.client['SERVICE'].METHOD(compress=False)
calls = self.calls('SoftLayer_SERVICE', 'METHOD')
self.assertEqual(len(calls), 1)
headers = calls[0].transport_headers
self.assertEqual(headers.get('accept-encoding'), 'identity')
def test_call_compression_enabled(self):
mocked = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mocked.return_value = {}
self.client['SERVICE'].METHOD(compress=True)
calls = self.calls('SoftLayer_SERVICE', 'METHOD')
self.assertEqual(len(calls), 1)
headers = calls[0].transport_headers
self.assertEqual(headers.get('accept-encoding'),
'gzip, deflate, compress')
def test_call_compression_override(self):
# raw_headers should override compress=False
mocked = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mocked.return_value = {}
self.client['SERVICE'].METHOD(
compress=False,
raw_headers={'Accept-Encoding': 'gzip'})
calls = self.calls('SoftLayer_SERVICE', 'METHOD')
self.assertEqual(len(calls), 1)
headers = calls[0].transport_headers
self.assertEqual(headers.get('accept-encoding'), 'gzip')
def test_special_services(self):
# Tests for the special classes that don't need to start with SoftLayer_
self.client.call('BluePages_Search', 'findBluePagesProfile')
self.assert_called_with('BluePages_Search', 'findBluePagesProfile')
class UnauthenticatedAPIClient(testing.TestCase):
def set_up(self):
self.client = SoftLayer.Client(endpoint_url="ENDPOINT")
@mock.patch('SoftLayer.config.get_client_settings')
def test_init(self, get_client_settings):
get_client_settings.return_value = {}
client = SoftLayer.Client()
self.assertIsNone(client.auth)
@mock.patch('SoftLayer.config.get_client_settings')
def test_init_with_proxy(self, get_client_settings):
get_client_settings.return_value = {'proxy': 'http://localhost:3128'}
client = SoftLayer.Client()
self.assertEqual(client.transport.proxy, 'http://localhost:3128')
@mock.patch('SoftLayer.API.BaseClient.call')
def test_authenticate_with_password(self, _call):
_call.return_value = {
'userId': 12345,
'hash': 'TOKEN',
}
self.client.authenticate_with_password('USERNAME', 'PASSWORD')
_call.assert_called_with(
'User_Customer',
'getPortalLoginToken',
'USERNAME',
'PASSWORD',
None,
None)
self.assertIsNotNone(self.client.auth)
self.assertEqual(self.client.auth.user_id, 12345)
self.assertEqual(self.client.auth.auth_token, 'TOKEN')
| mit |
jelugbo/tundex | lms/djangoapps/instructor/tests/test_legacy_anon_csv.py | 27 | 2540 | """
Unit tests for instructor dashboard
Based on (and depends on) unit tests for courseware.
Notes for running by hand:
./manage.py lms --settings test test lms/djangoapps/instructor
"""
from django.test.utils import override_settings
# Need access to internal func to put users in the right group
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
import instructor.views.legacy
from student.roles import CourseStaffRole
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from mock import Mock, patch
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorDashboardAnonCSV(ModuleStoreTestCase, LoginEnrollmentTestCase):
'''
Check for download of csv
'''
# Note -- I copied this setUp from a similar test
def setUp(self):
clear_existing_modulestores()
self.toy = modulestore().get_course(SlashSeparatedCourseKey("edX", "toy", "2012_Fall"))
# Create two accounts
self.student = 'view@test.com'
self.instructor = 'view2@test.com'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.create_account('u2', self.instructor, self.password)
self.activate_user(self.student)
self.activate_user(self.instructor)
CourseStaffRole(self.toy.id).add_users(User.objects.get(email=self.instructor))
self.logout()
self.login(self.instructor, self.password)
self.enroll(self.toy)
@patch.object(instructor.views.legacy, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(instructor.views.legacy, 'unique_id_for_user', Mock(return_value='41'))
def test_download_anon_csv(self):
course = self.toy
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id.to_deprecated_string()})
response = self.client.post(url, {'action': 'Download CSV of all student anonymized IDs'})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertEqual(
body,
('"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"2","41","42"\n')
)
| agpl-3.0 |
matthewgall/dnsjson.com | app.py | 1 | 5654 | #!/usr/bin/env python3
import os, logging, argparse, json, datetime
import requests
import dns.resolver
from bottle import route, request, response, redirect, hook, error, default_app, view, static_file, template
def set_content_type(fn):
def _return_type(*args, **kwargs):
if request.headers.get('Accept') == "application/json":
response.headers['Content-Type'] = 'application/json'
if request.headers.get('Accept') == "text/plain":
response.headers['Content-Type'] = 'text/plain'
if request.method != 'OPTIONS':
return fn(*args, **kwargs)
return _return_type
def enable_cors(fn):
def _enable_cors(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
if request.method != 'OPTIONS':
return fn(*args, **kwargs)
return _enable_cors
def resolveDomain(domain, recordType, args):
records = []
if args.doh:
try:
payload = {
'name': domain,
'type': recordType
}
data = requests.get("{}".format(args.resolver), params=payload)
for rec in data.json()['Answer']:
records.append(rec['data'])
except:
return records
return records
else:
try:
resolver = dns.resolver.Resolver()
resolver.nameservers = args.resolver.split(',')
if recordType in args.records.split(','):
lookup = resolver.resolve(domain, recordType)
for data in lookup:
if recordType in ['A', 'AAAA']:
records.append(data.address)
elif recordType in ['TXT']:
for rec in data.strings:
records.append(rec.decode("utf-8").replace('"', '').strip())
else:
records.append(str(data).replace('"', '').strip())
return records
except dns.resolver.NXDOMAIN:
return records
except dns.resolver.NoAnswer:
return records
except dns.exception.Timeout:
return records
except dns.resolver.NoNameservers:
return records
@error('404')
@error('403')
def returnError(code, msg, contentType="text/plain"):
response.status = int(code)
response.content_type = contentType
return template('error')
@route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root='views/static')
@route('/servers')
def servers():
try:
response.content_type = 'text/plain'
return "\r\n".join(args.resolver.split(","))
except:
return "Unable to open servers file."
@route('/version')
def version():
try:
dirname, filename = os.path.split(os.path.abspath(__file__))
del filename
f = open(os.getenv('VERSION_PATH', dirname + '/.git/refs/heads/master'), 'r')
content = f.read()
response.content_type = 'text/plain'
return content
except:
return "Unable to open version file."
@route('/<record>')
def route_redirect(record):
return redirect("/{}/A".format(record))
@route('/<record>/<type>')
@route('/<record>/<type>.<ext>')
@set_content_type
@enable_cors
def loadRecord(record, type='A', ext='html'):
try:
if record == "":
raise ValueError
if not ext in ["html","txt", "text", "json"]:
raise ValueError
if not type.upper() in args.records.split(','):
raise ValueError
except ValueError:
return returnError(404, "Not Found", "text/html")
if ext in ["json"]:
response.content_type = 'application/json'
if ext in ["txt", "text"]:
response.content_type = 'text/plain'
# We make a request to get information
data = resolveDomain(record, type.upper(), args)
if response.content_type == 'application/json':
return json.dumps({
'results': {
'name': record,
'type': type.upper(),
'records': data,
}
})
elif response.content_type == "text/plain":
return "\r\n".join(data)
else:
return template('rec', {
'name': record,
'type': type.upper(),
'records': data,
'recTypes': args.records.split(',')
})
@route('/', ('GET', 'POST'))
def index():
if request.method == "POST":
recordName = request.forms.get('recordName', '')
recordType = request.forms.get('recordType', '')
if recordName != '' and recordType in args.records.split(','):
return redirect("/{}/{}".format(recordName, recordType))
else:
return returnError(404, "We were not able to figure out what you were asking for", "text/html")
return template("home", {
'recTypes': args.records.split(',')
})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Server settings
parser.add_argument("-i", "--host", default=os.getenv('HOST', '127.0.0.1'), help="server ip")
parser.add_argument("-p", "--port", default=os.getenv('PORT', 5000), help="server port")
# Redis settings
parser.add_argument("--redis", default=os.getenv('REDIS', 'redis://localhost:6379/0'), help="redis connection string")
# Application settings
parser.add_argument("--doh", help="use DNS-over-HTTPS and treat --resolver as DNS-over-HTTPS capable (beta)", action="store_true")
parser.add_argument("--records", default=os.getenv('RECORDS', "A,AAAA,CAA,CNAME,DS,DNSKEY,MX,NS,NSEC,NSEC3,RRSIG,SOA,TXT"), help="supported records")
parser.add_argument("--resolver", default=os.getenv('RESOLVER', '8.8.8.8'), help="resolver address")
# Verbose mode
parser.add_argument("--verbose", "-v", help="increase output verbosity", action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
app = default_app()
app.run(host=args.host, port=args.port, server='tornado')
except:
log.error("Unable to start server on {}:{}".format(args.host, args.port)) | mit |
ventrixcode/yowsup | yowsup/env/env.py | 11 | 2576 | import abc
import logging
from six import with_metaclass
logger = logging.getLogger(__name__)
DEFAULT = "s40"
class YowsupEnvType(abc.ABCMeta):
def __init__(cls, name, bases, dct):
if name != "YowsupEnv":
YowsupEnv.registerEnv(cls)
super(YowsupEnvType, cls).__init__(name, bases, dct)
class YowsupEnv(with_metaclass(YowsupEnvType, object)):
__metaclass__ = YowsupEnvType
__ENVS = {}
__CURR = None
_USERAGENT_STRING = "WhatsApp/{WHATSAPP_VERSION} {OS_NAME}/{OS_VERSION} Device/{MANUFACTURER}-{DEVICE_NAME}"
@classmethod
def registerEnv(cls, envCls):
envName = envCls.__name__.lower().replace("yowsupenv", "")
cls.__ENVS[envName] = envCls
logger.debug("registered env %s => %s" % (envName, envCls))
@classmethod
def setEnv(cls, envName):
if not envName in cls.__ENVS:
raise ValueError("%s env does not exist" % envName)
logger.debug("Current env changed to %s " % envName)
cls.__CURR = cls.__ENVS[envName]()
@classmethod
def getEnv(cls, envName):
if not envName in cls.__ENVS:
raise ValueError("%s env does not exist" % envName)
return cls.__ENVS[envName]()
@classmethod
def getRegisteredEnvs(cls):
return list(cls.__ENVS.keys())
@classmethod
def getCurrent(cls):
if cls.__CURR is None:
env = DEFAULT
envs = cls.getRegisteredEnvs()
if env not in envs:
env = envs[0]
logger.debug("Env not set, setting it to %s" % env)
cls.setEnv(env)
return cls.__CURR
@abc.abstractmethod
def getToken(self, phoneNumber):
pass
@abc.abstractmethod
def getVersion(self):
pass
@abc.abstractmethod
def getOSVersion(self):
pass
@abc.abstractmethod
def getOSName(self):
pass
@abc.abstractmethod
def getDeviceName(self):
pass
@abc.abstractmethod
def getManufacturer(self):
pass
@abc.abstractmethod
def isAxolotlEnabled(self):
pass
def getBuildVersion(self):
return ""
def getResource(self):
return self.getOSName() + "-" + self.getVersion()
def getUserAgent(self):
return self.__class__._USERAGENT_STRING.format(
WHATSAPP_VERSION = self.getVersion(),
OS_NAME = self.getOSName(),
OS_VERSION = self.getOSVersion(),
MANUFACTURER = self.getManufacturer(),
DEVICE_NAME = self.getDeviceName()
)
| gpl-3.0 |
Agent007/deep-learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
anirudhSK/chromium | tools/perf/benchmarks/page_cycler.py | 1 | 3123 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import page_cycler
from telemetry import test
class PageCyclerBloat(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/page_cycler/bloat.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerDhtml(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/page_cycler/dhtml.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerIntlArFaHe(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_ar_fa_he.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerIntlEsFrPtBr(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_es_fr_pt-BR.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerIntlHiRu(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_hi_ru.json'
options = {'pageset_repeat_iters': 10}
@test.Disabled('win') # crbug.com/330909
class PageCyclerIntlJaZh(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_ja_zh.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerIntlKoThVi(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_ko_th_vi.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerMorejs(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/page_cycler/morejs.json'
options = {'pageset_repeat_iters': 20,
'cold_load_percent': 50}
class PageCyclerMoz(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/page_cycler/moz.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerNetsimTop10(test.Test):
"""Measures load time of the top 10 sites under simulated cable network."""
tag = 'netsim'
test = page_cycler.PageCycler
page_set = 'page_sets/top_10.json'
options = {
'cold_load_percent': 100,
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat_iters': 5,
}
def __init__(self):
super(PageCyclerNetsimTop10, self).__init__()
# TODO: This isn't quite right.
# This option will still apply to page cyclers that run after this one.
self.test.clear_cache_before_each_run = True
class PageCyclerTop10Mobile(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/top_10_mobile.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerKeyMobileSites(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/key_mobile_sites.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerToughLayoutCases(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/tough_layout_cases.json'
options = {'pageset_repeat_iters': 10}
# crbug.com/273986: This test is really flakey on xp.
# cabug.com/341843: This test is always timing out on Android.
@test.Disabled('android', 'win')
class PageCyclerTypical25(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/typical_25.json'
options = {'pageset_repeat_iters': 10}
| bsd-3-clause |
flotre/sickbeard-vfvo | lib/hachoir_parser/container/ogg.py | 90 | 11888 | #
# Ogg parser
# Author Julien Muchembled <jm AT jm10.no-ip.com>
# Created: 10 june 2006
#
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (Field, FieldSet, createOrphanField,
NullBits, Bit, Bits, Enum, Fragment, MissingField, ParserError,
UInt8, UInt16, UInt24, UInt32, UInt64,
RawBytes, String, PascalString32, NullBytes)
from lib.hachoir_core.stream import FragmentedStream, InputStreamError
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_core.tools import humanDurationNanosec
from lib.hachoir_core.text_handler import textHandler, hexadecimal
MAX_FILESIZE = 1000 * 1024 * 1024
class XiphInt(Field):
"""
Positive integer with variable size. Values bigger than 254 are stored as
(255, 255, ..., rest): value is the sum of all bytes.
Example: 1000 is stored as (255, 255, 255, 235), total = 255*3+235 = 1000
"""
def __init__(self, parent, name, max_size=None, description=None):
Field.__init__(self, parent, name, size=0, description=description)
value = 0
addr = self.absolute_address
while max_size is None or self._size < max_size:
byte = parent.stream.readBits(addr, 8, LITTLE_ENDIAN)
value += byte
self._size += 8
if byte != 0xff:
break
addr += 8
self.createValue = lambda: value
class Lacing(FieldSet):
def createFields(self):
size = self.size
while size:
field = XiphInt(self, 'size[]', size)
yield field
size -= field.size
def parseVorbisComment(parent):
yield PascalString32(parent, 'vendor', charset="UTF-8")
yield UInt32(parent, 'count')
for index in xrange(parent["count"].value):
yield PascalString32(parent, 'metadata[]', charset="UTF-8")
if parent.current_size != parent.size:
yield UInt8(parent, "framing_flag")
PIXEL_FORMATS = {
0: "4:2:0",
2: "4:2:2",
3: "4:4:4",
}
def formatTimeUnit(field):
return humanDurationNanosec(field.value * 100)
def parseVideoHeader(parent):
yield NullBytes(parent, "padding[]", 2)
yield String(parent, "fourcc", 4)
yield UInt32(parent, "size")
yield textHandler(UInt64(parent, "time_unit", "Frame duration"), formatTimeUnit)
yield UInt64(parent, "sample_per_unit")
yield UInt32(parent, "default_len")
yield UInt32(parent, "buffer_size")
yield UInt16(parent, "bits_per_sample")
yield NullBytes(parent, "padding[]", 2)
yield UInt32(parent, "width")
yield UInt32(parent, "height")
yield NullBytes(parent, "padding[]", 4)
def parseTheoraHeader(parent):
yield UInt8(parent, "version_major")
yield UInt8(parent, "version_minor")
yield UInt8(parent, "version_revision")
yield UInt16(parent, "width", "Width*16 in pixel")
yield UInt16(parent, "height", "Height*16 in pixel")
yield UInt24(parent, "frame_width")
yield UInt24(parent, "frame_height")
yield UInt8(parent, "offset_x")
yield UInt8(parent, "offset_y")
yield UInt32(parent, "fps_num", "Frame per second numerator")
yield UInt32(parent, "fps_den", "Frame per second denominator")
yield UInt24(parent, "aspect_ratio_num", "Aspect ratio numerator")
yield UInt24(parent, "aspect_ratio_den", "Aspect ratio denominator")
yield UInt8(parent, "color_space")
yield UInt24(parent, "target_bitrate")
yield Bits(parent, "quality", 6)
yield Bits(parent, "gp_shift", 5)
yield Enum(Bits(parent, "pixel_format", 2), PIXEL_FORMATS)
yield Bits(parent, "spare_config", 3)
def parseVorbisHeader(parent):
yield UInt32(parent, "vorbis_version")
yield UInt8(parent, "audio_channels")
yield UInt32(parent, "audio_sample_rate")
yield UInt32(parent, "bitrate_maximum")
yield UInt32(parent, "bitrate_nominal")
yield UInt32(parent, "bitrate_minimum")
yield Bits(parent, "blocksize_0", 4)
yield Bits(parent, "blocksize_1", 4)
yield UInt8(parent, "framing_flag")
class Chunk(FieldSet):
tag_info = {
"vorbis": {
3: ("comment", parseVorbisComment),
1: ("vorbis_hdr", parseVorbisHeader),
}, "theora": {
128: ("theora_hdr", parseTheoraHeader),
129: ("comment", parseVorbisComment),
}, "video\0": {
1: ("video_hdr", parseVideoHeader),
},
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if 7*8 <= self.size:
try:
self._name, self.parser = self.tag_info[self["codec"].value][self["type"].value]
if self._name == "theora_hdr":
self.endian = BIG_ENDIAN
except KeyError:
self.parser = None
else:
self.parser = None
def createFields(self):
if 7*8 <= self.size:
yield UInt8(self, 'type')
yield String(self, 'codec', 6)
if self.parser:
for field in self.parser(self):
yield field
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw", size)
class Packets:
def __init__(self, first):
self.first = first
def __iter__(self):
fragment = self.first
size = None
while fragment is not None:
page = fragment.parent
continued_packet = page["continued_packet"].value
for segment_size in page.segment_size:
if continued_packet:
size += segment_size
continued_packet = False
else:
if size:
yield size * 8
size = segment_size
fragment = fragment.next
if size:
yield size * 8
class Segments(Fragment):
def __init__(self, parent, *args, **kw):
Fragment.__init__(self, parent, *args, **kw)
if parent['last_page'].value:
next = None
else:
next = self.createNext
self.setLinks(parent.parent.streams.setdefault(parent['serial'].value, self), next)
def _createInputStream(self, **args):
if self.first is self:
return FragmentedStream(self, packets=Packets(self), tags=[("id","ogg_stream")], **args)
return Fragment._createInputStream(self, **args)
def _getData(self):
return self
def createNext(self):
parent = self.parent
index = parent.index
parent = parent.parent
first = self.first
try:
while True:
index += 1
next = parent[index][self.name]
if next.first is first:
return next
except MissingField:
pass
def createFields(self):
for segment_size in self.parent.segment_size:
if segment_size:
yield Chunk(self, "chunk[]", size=segment_size*8)
class OggPage(FieldSet):
MAGIC = "OggS"
def __init__(self, *args):
FieldSet.__init__(self, *args)
size = 27
self.lacing_size = self['lacing_size'].value
if self.lacing_size:
size += self.lacing_size
lacing = self['lacing']
self.segment_size = [ field.value for field in lacing ]
size += sum(self.segment_size)
self._size = size * 8
def createFields(self):
yield String(self, 'capture_pattern', 4, charset="ASCII")
if self['capture_pattern'].value != self.MAGIC:
self.warning('Invalid signature. An Ogg page must start with "%s".' % self.MAGIC)
yield UInt8(self, 'stream_structure_version')
yield Bit(self, 'continued_packet')
yield Bit(self, 'first_page')
yield Bit(self, 'last_page')
yield NullBits(self, 'unused', 5)
yield UInt64(self, 'abs_granule_pos')
yield textHandler(UInt32(self, 'serial'), hexadecimal)
yield UInt32(self, 'page')
yield textHandler(UInt32(self, 'checksum'), hexadecimal)
yield UInt8(self, 'lacing_size')
if self.lacing_size:
yield Lacing(self, "lacing", size=self.lacing_size*8)
yield Segments(self, "segments", size=self._size-self._current_size)
def validate(self):
if self['capture_pattern'].value != self.MAGIC:
return "Wrong signature"
if self['stream_structure_version'].value != 0:
return "Unknown structure version (%s)" % self['stream_structure_version'].value
return ""
class OggFile(Parser):
PARSER_TAGS = {
"id": "ogg",
"category": "container",
"file_ext": ("ogg", "ogm"),
"mime": (
u"application/ogg", u"application/x-ogg",
u"audio/ogg", u"audio/x-ogg",
u"video/ogg", u"video/x-ogg",
u"video/theora", u"video/x-theora",
),
"magic": ((OggPage.MAGIC, 0),),
"subfile": "skip",
"min_size": 28*8,
"description": "Ogg multimedia container"
}
endian = LITTLE_ENDIAN
def validate(self):
magic = OggPage.MAGIC
if self.stream.readBytes(0, len(magic)) != magic:
return "Invalid magic string"
# Validate first 3 pages
for index in xrange(3):
try:
page = self[index]
except MissingField:
if self.done:
return True
return "Unable to get page #%u" % index
except (InputStreamError, ParserError):
return "Unable to create page #%u" % index
err = page.validate()
if err:
return "Invalid page #%s: %s" % (index, err)
return True
def createMimeType(self):
if "theora_hdr" in self["page[0]/segments"]:
return u"video/theora"
elif "vorbis_hdr" in self["page[0]/segments"]:
return u"audio/vorbis"
else:
return u"application/ogg"
def createDescription(self):
if "theora_hdr" in self["page[0]"]:
return u"Ogg/Theora video"
elif "vorbis_hdr" in self["page[0]"]:
return u"Ogg/Vorbis audio"
else:
return u"Ogg multimedia container"
def createFields(self):
self.streams = {}
while not self.eof:
yield OggPage(self, "page[]")
def createLastPage(self):
start = self[0].size
end = MAX_FILESIZE * 8
if True:
# FIXME: This doesn't work on all files (eg. some Ogg/Theora)
offset = self.stream.searchBytes("OggS\0\5", start, end)
if offset is None:
offset = self.stream.searchBytes("OggS\0\4", start, end)
if offset is None:
return None
return createOrphanField(self, offset, OggPage, "page")
else:
# Very slow version
page = None
while True:
offset = self.stream.searchBytes("OggS\0", start, end)
if offset is None:
break
page = createOrphanField(self, offset, OggPage, "page")
start += page.size
return page
def createContentSize(self):
page = self.createLastPage()
if page:
return page.absolute_address + page.size
else:
return None
class OggStream(Parser):
PARSER_TAGS = {
"id": "ogg_stream",
"category": "container",
"subfile": "skip",
"min_size": 7*8,
"description": "Ogg logical stream"
}
endian = LITTLE_ENDIAN
def validate(self):
return False
def createFields(self):
for size in self.stream.packets:
yield RawBytes(self, "packet[]", size//8)
| gpl-3.0 |
JimCircadian/ansible | lib/ansible/modules/cloud/amazon/cloudfront_distribution.py | 15 | 85998 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudfront_distribution
short_description: create, update and delete aws cloudfront distributions.
description:
- Allows for easy creation, updating and deletion of CloudFront distributions.
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.5"
author:
- Willem van Ketwich (@wilvk)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
options:
state:
description:
- The desired state of the distribution
present - creates a new distribution or updates an existing distribution.
absent - deletes an existing distribution.
choices: ['present', 'absent']
default: 'present'
distribution_id:
description:
- The id of the cloudfront distribution. This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag).
e_tag:
description:
- A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id).
Is determined automatically if not specified.
caller_reference:
description:
- A unique identifier for creating and updating cloudfront distributions. Each caller reference must be unique across all distributions. e.g. a caller
reference used in a web distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id)
to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format
'YYYY-MM-DDTHH:MM:SS.ffffff'.
tags:
description:
- Should be input as a dict() of key-value pairs.
Note that numeric keys or values must be wrapped in quotes. e.g. "Priority:" '1'
purge_tags:
description:
- Specifies whether existing tags will be removed before adding new tags. When I(purge_tags=yes), existing tags are removed and I(tags) are added, if
specified. If no tags are specified, it removes all existing tags for the distribution. When I(purge_tags=no), existing tags are kept and I(tags)
are added, if specified.
default: 'no'
type: bool
alias:
description:
- The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only
be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as
the I(e_tag), or I(caller_reference) of an existing distribution.
aliases:
description:
- A I(list[]) of domain name aliases (CNAMEs) as strings to be used for the distribution. Each alias must be unique across all distribution for the AWS
account.
purge_aliases:
description:
- Specifies whether existing aliases will be removed before adding new aliases. When I(purge_aliases=yes), existing aliases are removed and I(aliases)
are added.
default: 'no'
type: bool
default_root_object:
description:
- A config element that specifies the path to request when the user requests the origin. e.g. if specified as 'index.html', this maps to
www.example.com/index.html when www.example.com is called by the user. This prevents the entire distribution origin from being exposed at the root.
default_origin_domain_name:
description:
- The domain name to use for an origin if no I(origins) have been specified. Should only be used on a first run of generating a distribution and not on
subsequent runs. Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias).
default_origin_path:
description:
- The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified.
origins:
description:
- A config element that is a I(list[]) of complex origin objects to be specified for the distribution. Used for creating and updating distributions.
Each origin item comprises the attributes
I(id)
I(domain_name) (defaults to default_origin_domain_name if not specified)
I(origin_path) (defaults to default_origin_path if not specified)
I(custom_headers[])
I(header_name)
I(header_value)
I(s3_origin_access_identity_enabled)
I(custom_origin_config)
I(http_port)
I(https_port)
I(origin_protocol_policy)
I(origin_ssl_protocols[])
I(origin_read_timeout)
I(origin_keepalive_timeout)
purge_origins:
description: Whether to remove any origins that aren't listed in I(origins)
default: false
default_cache_behavior:
description:
- A config element that is a complex object specifying the default cache behavior of the distribution. If not specified, the I(target_origin_id) is
defined as the I(target_origin_id) of the first valid I(cache_behavior) in I(cache_behaviors) with defaults.
The default cache behavior comprises the attributes
I(target_origin_id)
I(forwarded_values)
I(query_string)
I(cookies)
I(forward)
I(whitelisted_names)
I(headers[])
I(query_string_cache_keys[])
I(trusted_signers)
I(enabled)
I(items[])
I(viewer_protocol_policy)
I(min_ttl)
I(allowed_methods)
I(items[])
I(cached_methods[])
I(smooth_streaming)
I(default_ttl)
I(max_ttl)
I(compress)
I(lambda_function_associations[])
I(lambda_function_arn)
I(event_type)
cache_behaviors:
description:
- A config element that is a I(list[]) of complex cache behavior objects to be specified for the distribution. The order
of the list is preserved across runs unless C(purge_cache_behavior) is enabled.
Each cache behavior comprises the attributes
I(path_pattern)
I(target_origin_id)
I(forwarded_values)
I(query_string)
I(cookies)
I(forward)
I(whitelisted_names)
I(headers[])
I(query_string_cache_keys[])
I(trusted_signers)
I(enabled)
I(items[])
I(viewer_protocol_policy)
I(min_ttl)
I(allowed_methods)
I(items[])
I(cached_methods[])
I(smooth_streaming)
I(default_ttl)
I(max_ttl)
I(compress)
I(lambda_function_associations[])
purge_cache_behaviors:
description: Whether to remove any cache behaviors that aren't listed in I(cache_behaviors). This switch
also allows the reordering of cache_behaviors.
default: false
custom_error_responses:
description:
- A config element that is a I(list[]) of complex custom error responses to be specified for the distribution. This attribute configures custom http
error messages returned to the user.
Each custom error response object comprises the attributes
I(error_code)
I(reponse_page_path)
I(response_code)
I(error_caching_min_ttl)
purge_custom_error_responses:
description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses)
default: false
comment:
description:
- A comment that describes the cloudfront distribution. If not specified, it defaults to a
generic message that it has been created with Ansible, and a datetime stamp.
logging:
description:
- A config element that is a complex object that defines logging for the distribution.
The logging object comprises the attributes
I(enabled)
I(include_cookies)
I(bucket)
I(prefix)
price_class:
description:
- A string that specifies the pricing class of the distribution. As per
U(https://aws.amazon.com/cloudfront/pricing/)
I(price_class=PriceClass_100) consists of the areas
United States
Canada
Europe
I(price_class=PriceClass_200) consists of the areas
United States
Canada
Europe
Hong Kong, Philippines, S. Korea, Singapore & Taiwan
Japan
India
I(price_class=PriceClass_All) consists of the areas
United States
Canada
Europe
Hong Kong, Philippines, S. Korea, Singapore & Taiwan
Japan
India
South America
Australia
choices: ['PriceClass_100', 'PriceClass_200', 'PriceClass_All']
default: aws defaults this to 'PriceClass_All'
enabled:
description:
- A boolean value that specifies whether the distribution is enabled or disabled.
default: 'yes'
type: bool
viewer_certificate:
description:
- A config element that is a complex object that specifies the encryption details of the distribution.
Comprises the following attributes
I(cloudfront_default_certificate)
I(iam_certificate_id)
I(acm_certificate_arn)
I(ssl_support_method)
I(minimum_protocol_version)
I(certificate)
I(certificate_source)
restrictions:
description:
- A config element that is a complex object that describes how a distribution should restrict it's content.
The restriction object comprises the following attributes
I(geo_restriction)
I(restriction_type)
I(items[])
web_acl_id:
description:
- The id of a Web Application Firewall (WAF) Access Control List (ACL).
http_version:
description:
- The version of the http protocol to use for the distribution.
choices: [ 'http1.1', 'http2' ]
default: aws defaults this to 'http2'
ipv6_enabled:
description:
- Determines whether IPv6 support is enabled or not.
type: bool
default: 'no'
wait:
description:
- Specifies whether the module waits until the distribution has completed processing the creation or update.
type: bool
default: 'no'
wait_timeout:
description:
- Specifies the duration in seconds to wait for a timeout of a cloudfront create or update. Defaults to 1800 seconds (30 minutes).
default: 1800
'''
EXAMPLES = '''
# create a basic distribution with defaults and tags
- cloudfront_distribution:
state: present
default_origin_domain_name: www.my-cloudfront-origin.com
tags:
Name: example distribution
Project: example project
Priority: '1'
# update a distribution comment by distribution_id
- cloudfront_distribution:
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by ansible cloudfront.py
# update a distribution comment by caller_reference
- cloudfront_distribution:
state: present
caller_reference: my cloudfront distribution 001
comment: modified by ansible cloudfront.py
# update a distribution's aliases and comment using the distribution_id as a reference
- cloudfront_distribution:
state: present
distribution_id: E1RP5A2MJ8073O
comment: modified by cloudfront.py again
aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
# update a distribution's aliases and comment using an alias as a reference
- cloudfront_distribution:
state: present
caller_reference: my test distribution
comment: modified by cloudfront.py again
aliases:
- www.my-distribution-source.com
- zzz.aaa.io
# update a distribution's comment and aliases and tags and remove existing tags
- cloudfront_distribution:
state: present
distribution_id: E15BU8SDCGSG57
comment: modified by cloudfront.py again
aliases:
- tested.com
tags:
Project: distribution 1.2
purge_tags: yes
# create a distribution with an origin, logging and default cache behavior
- cloudfront_distribution:
state: present
caller_reference: unique test distribution id
origins:
- id: 'my test origin-000111'
domain_name: www.example.com
origin_path: /production
custom_headers:
- header_name: MyCustomHeaderName
header_value: MyCustomHeaderValue
default_cache_behavior:
target_origin_id: 'my test origin-000111'
forwarded_values:
query_string: true
cookies:
forward: all
headers:
- '*'
viewer_protocol_policy: allow-all
smooth_streaming: true
compress: true
allowed_methods:
items:
- GET
- HEAD
cached_methods:
- GET
- HEAD
logging:
enabled: true
include_cookies: false
bucket: mylogbucket.s3.amazonaws.com
prefix: myprefix/
enabled: false
comment: this is a cloudfront distribution with logging
# delete a distribution
- cloudfront_distribution:
state: absent
caller_reference: replaceable distribution
'''
RETURN = '''
active_trusted_signers:
description: Key pair IDs that CloudFront is aware of for each trusted signer
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are in use
returned: always
type: bool
sample: false
quantity:
description: Number of trusted signers
returned: always
type: int
sample: 1
items:
description: Number of trusted signers
returned: when there are trusted signers
type: list
sample:
- key_pair_id
aliases:
description: Aliases that refer to the distribution
returned: always
type: complex
contains:
items:
description: List of aliases
returned: always
type: list
sample:
- test.example.com
quantity:
description: Number of aliases
returned: always
type: int
sample: 1
arn:
description: Amazon Resource Name of the distribution
returned: always
type: string
sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI
cache_behaviors:
description: Cloudfront cache behaviors
returned: always
type: complex
contains:
items:
description: List of cache behaviors
returned: always
type: complex
contains:
allowed_methods:
description: Methods allowed by the cache behavior
returned: always
type: complex
contains:
cached_methods:
description: Methods cached by the cache behavior
returned: always
type: complex
contains:
items:
description: List of cached methods
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of cached methods
returned: always
type: int
sample: 2
items:
description: List of methods allowed by the cache behavior
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of methods allowed by the cache behavior
returned: always
type: int
sample: 2
compress:
description: Whether compression is turned on for the cache behavior
returned: always
type: bool
sample: false
default_ttl:
description: Default Time to Live of the cache behavior
returned: always
type: int
sample: 86400
forwarded_values:
description: Values forwarded to the origin for this cache behavior
returned: always
type: complex
contains:
cookies:
description: Cookies to forward to the origin
returned: always
type: complex
contains:
forward:
description: Which cookies to forward to the origin for this cache behavior
returned: always
type: string
sample: none
whitelisted_names:
description: The names of the cookies to forward to the origin for this cache behavior
returned: when I(forward) is C(whitelist)
type: complex
contains:
quantity:
description: Count of cookies to forward
returned: always
type: int
sample: 1
items:
description: List of cookies to forward
returned: when list is not empty
type: list
sample: my_cookie
headers:
description: Which headers are used to vary on cache retrievals
returned: always
type: complex
contains:
quantity:
description: Count of headers to vary on
returned: always
type: int
sample: 1
items:
description: List of headers to vary on
returned: when list is not empty
type: list
sample:
- Host
query_string:
description: Whether the query string is used in cache lookups
returned: always
type: bool
sample: false
query_string_cache_keys:
description: Which query string keys to use in cache lookups
returned: always
type: complex
contains:
quantity:
description: Count of query string cache keys to use in cache lookups
returned: always
type: int
sample: 1
items:
description: List of query string cache keys to use in cache lookups
returned: when list is not empty
type: list
sample:
lambda_function_associations:
description: Lambda function associations for a cache behavior
returned: always
type: complex
contains:
quantity:
description: Count of lambda function associations
returned: always
type: int
sample: 1
items:
description: List of lambda function associations
returned: when list is not empty
type: list
sample:
- lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
event_type: viewer-response
max_ttl:
description: Maximum Time to Live
returned: always
type: int
sample: 31536000
min_ttl:
description: Minimum Time to Live
returned: always
type: int
sample: 0
path_pattern:
description: Path pattern that determines this cache behavior
returned: always
type: string
sample: /path/to/files/*
smooth_streaming:
description: Whether smooth streaming is enabled
returned: always
type: bool
sample: false
target_origin_id:
description: Id of origin reference by this cache behavior
returned: always
type: string
sample: origin_abcd
trusted_signers:
description: Trusted signers
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are enabled for this cache behavior
returned: always
type: bool
sample: false
quantity:
description: Count of trusted signers
returned: always
type: int
sample: 1
viewer_protocol_policy:
description: Policy of how to handle http/https
returned: always
type: string
sample: redirect-to-https
quantity:
description: Count of cache behaviors
returned: always
type: int
sample: 1
caller_reference:
description: Idempotency reference given when creating cloudfront distribution
returned: always
type: string
sample: '1484796016700'
comment:
description: Any comments you want to include about the distribution
returned: always
type: string
sample: 'my first cloudfront distribution'
custom_error_responses:
description: Custom error responses to use for error handling
returned: always
type: complex
contains:
items:
description: List of custom error responses
returned: always
type: complex
contains:
error_caching_min_ttl:
description: Mininum time to cache this error response
returned: always
type: int
sample: 300
error_code:
description: Origin response code that triggers this error response
returned: always
type: int
sample: 500
response_code:
description: Response code to return to the requester
returned: always
type: string
sample: '500'
response_page_path:
description: Path that contains the error page to display
returned: always
type: string
sample: /errors/5xx.html
quantity:
description: Count of custom error response items
returned: always
type: int
sample: 1
default_cache_behavior:
description: Default cache behavior
returned: always
type: complex
contains:
allowed_methods:
description: Methods allowed by the cache behavior
returned: always
type: complex
contains:
cached_methods:
description: Methods cached by the cache behavior
returned: always
type: complex
contains:
items:
description: List of cached methods
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of cached methods
returned: always
type: int
sample: 2
items:
description: List of methods allowed by the cache behavior
returned: always
type: list
sample:
- HEAD
- GET
quantity:
description: Count of methods allowed by the cache behavior
returned: always
type: int
sample: 2
compress:
description: Whether compression is turned on for the cache behavior
returned: always
type: bool
sample: false
default_ttl:
description: Default Time to Live of the cache behavior
returned: always
type: int
sample: 86400
forwarded_values:
description: Values forwarded to the origin for this cache behavior
returned: always
type: complex
contains:
cookies:
description: Cookies to forward to the origin
returned: always
type: complex
contains:
forward:
description: Which cookies to forward to the origin for this cache behavior
returned: always
type: string
sample: none
whitelisted_names:
description: The names of the cookies to forward to the origin for this cache behavior
returned: when I(forward) is C(whitelist)
type: complex
contains:
quantity:
description: Count of cookies to forward
returned: always
type: int
sample: 1
items:
description: List of cookies to forward
returned: when list is not empty
type: list
sample: my_cookie
headers:
description: Which headers are used to vary on cache retrievals
returned: always
type: complex
contains:
quantity:
description: Count of headers to vary on
returned: always
type: int
sample: 1
items:
description: List of headers to vary on
returned: when list is not empty
type: list
sample:
- Host
query_string:
description: Whether the query string is used in cache lookups
returned: always
type: bool
sample: false
query_string_cache_keys:
description: Which query string keys to use in cache lookups
returned: always
type: complex
contains:
quantity:
description: Count of query string cache keys to use in cache lookups
returned: always
type: int
sample: 1
items:
description: List of query string cache keys to use in cache lookups
returned: when list is not empty
type: list
sample:
lambda_function_associations:
description: Lambda function associations for a cache behavior
returned: always
type: complex
contains:
quantity:
description: Count of lambda function associations
returned: always
type: int
sample: 1
items:
description: List of lambda function associations
returned: when list is not empty
type: list
sample:
- lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
event_type: viewer-response
max_ttl:
description: Maximum Time to Live
returned: always
type: int
sample: 31536000
min_ttl:
description: Minimum Time to Live
returned: always
type: int
sample: 0
path_pattern:
description: Path pattern that determines this cache behavior
returned: always
type: string
sample: /path/to/files/*
smooth_streaming:
description: Whether smooth streaming is enabled
returned: always
type: bool
sample: false
target_origin_id:
description: Id of origin reference by this cache behavior
returned: always
type: string
sample: origin_abcd
trusted_signers:
description: Trusted signers
returned: always
type: complex
contains:
enabled:
description: Whether trusted signers are enabled for this cache behavior
returned: always
type: bool
sample: false
quantity:
description: Count of trusted signers
returned: always
type: int
sample: 1
viewer_protocol_policy:
description: Policy of how to handle http/https
returned: always
type: string
sample: redirect-to-https
default_root_object:
description: The object that you want CloudFront to request from your origin (for example, index.html)
when a viewer requests the root URL for your distribution
returned: always
type: string
sample: ''
diff:
description: Difference between previous configuration and new configuration
returned: always
type: dict
sample: {}
domain_name:
description: Domain name of cloudfront distribution
returned: always
type: string
sample: d1vz8pzgurxosf.cloudfront.net
enabled:
description: Whether the cloudfront distribution is enabled or not
returned: always
type: bool
sample: true
http_version:
description: Version of HTTP supported by the distribution
returned: always
type: string
sample: http2
id:
description: Cloudfront distribution ID
returned: always
type: string
sample: E123456ABCDEFG
in_progress_invalidation_batches:
description: The number of invalidation batches currently in progress
returned: always
type: int
sample: 0
is_ipv6_enabled:
description: Whether IPv6 is enabled
returned: always
type: bool
sample: true
last_modified_time:
description: Date and time distribution was last modified
returned: always
type: string
sample: '2017-10-13T01:51:12.656000+00:00'
logging:
description: Logging information
returned: always
type: complex
contains:
bucket:
description: S3 bucket logging destination
returned: always
type: string
sample: logs-example-com.s3.amazonaws.com
enabled:
description: Whether logging is enabled
returned: always
type: bool
sample: true
include_cookies:
description: Whether to log cookies
returned: always
type: bool
sample: false
prefix:
description: Prefix added to logging object names
returned: always
type: string
sample: cloudfront/test
origins:
description: Origins in the cloudfront distribution
returned: always
type: complex
contains:
items:
description: List of origins
returned: always
type: complex
contains:
custom_headers:
description: Custom headers passed to the origin
returned: always
type: complex
contains:
quantity:
description: Count of headers
returned: always
type: int
sample: 1
custom_origin_config:
description: Configuration of the origin
returned: always
type: complex
contains:
http_port:
description: Port on which HTTP is listening
returned: always
type: int
sample: 80
https_port:
description: Port on which HTTPS is listening
returned: always
type: int
sample: 443
origin_keepalive_timeout:
description: Keep-alive timeout
returned: always
type: int
sample: 5
origin_protocol_policy:
description: Policy of which protocols are supported
returned: always
type: string
sample: https-only
origin_read_timeout:
description: Timeout for reads to the origin
returned: always
type: int
sample: 30
origin_ssl_protocols:
description: SSL protocols allowed by the origin
returned: always
type: complex
contains:
items:
description: List of SSL protocols
returned: always
type: list
sample:
- TLSv1
- TLSv1.1
- TLSv1.2
quantity:
description: Count of SSL protocols
returned: always
type: int
sample: 3
domain_name:
description: Domain name of the origin
returned: always
type: string
sample: test-origin.example.com
id:
description: ID of the origin
returned: always
type: string
sample: test-origin.example.com
origin_path:
description: Subdirectory to prefix the request from the S3 or HTTP origin
returned: always
type: string
sample: ''
quantity:
description: Count of origins
returned: always
type: int
sample: 1
price_class:
description: Price class of cloudfront distribution
returned: always
type: string
sample: PriceClass_All
restrictions:
description: Restrictions in use by Cloudfront
returned: always
type: complex
contains:
geo_restriction:
description: Controls the countries in which your content is distributed.
returned: always
type: complex
contains:
quantity:
description: Count of restrictions
returned: always
type: int
sample: 1
items:
description: List of country codes allowed or disallowed
returned: always
type: list
sample: xy
restriction_type:
description: Type of restriction
returned: always
type: string
sample: blacklist
status:
description: Status of the cloudfront distribution
returned: always
type: string
sample: InProgress
tags:
description: Distribution tags
returned: always
type: dict
sample:
Hello: World
viewer_certificate:
description: Certificate used by cloudfront distribution
returned: always
type: complex
contains:
acm_certificate_arn:
description: ARN of ACM certificate
returned: when certificate comes from ACM
type: string
sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
certificate:
description: Reference to certificate
returned: always
type: string
sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
certificate_source:
description: Where certificate comes from
returned: always
type: string
sample: acm
minimum_protocol_version:
description: Minimum SSL/TLS protocol supported by this distribution
returned: always
type: string
sample: TLSv1
ssl_support_method:
description: Support for pre-SNI browsers or not
returned: always
type: string
sample: sni-only
web_acl_id:
description: ID of Web Access Control List (from WAF service)
returned: always
type: string
sample: abcd1234-1234-abcd-abcd-abcd12345678
'''
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
from ansible.module_utils.ec2 import get_aws_connection_info
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, compare_aws_tags
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, boto3_tag_list_to_ansible_dict
import datetime
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed)
try:
import botocore
except ImportError:
pass
def change_dict_key_name(dictionary, old_key, new_key):
if old_key in dictionary:
dictionary[new_key] = dictionary.get(old_key)
dictionary.pop(old_key, None)
return dictionary
def merge_validation_into_config(config, validated_node, node_name):
if validated_node is not None:
if isinstance(validated_node, dict):
config_node = config.get(node_name)
if config_node is not None:
config_node_items = list(config_node.items())
else:
config_node_items = []
config[node_name] = dict(config_node_items + list(validated_node.items()))
if isinstance(validated_node, list):
config[node_name] = list(set(config.get(node_name) + validated_node))
return config
def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True):
if list_items is None:
list_items = []
if not isinstance(list_items, list):
raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items)))
result = {}
if include_quantity:
result['quantity'] = len(list_items)
if len(list_items) > 0:
result['items'] = list_items
return result
def recursive_diff(dict1, dict2):
left = dict((k, v) for (k, v) in dict1.items() if k not in dict2)
right = dict((k, v) for (k, v) in dict2.items() if k not in dict1)
for k in (set(dict1.keys()) & set(dict2.keys())):
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
result = recursive_diff(dict1[k], dict2[k])
if result:
left[k] = result[0]
right[k] = result[1]
elif dict1[k] != dict2[k]:
left[k] = dict1[k]
right[k] = dict2[k]
if left or right:
return left, right
else:
return None
def create_distribution(client, module, config, tags):
try:
if not tags:
return client.create_distribution(DistributionConfig=config)['Distribution']
else:
distribution_config_with_tags = {
'DistributionConfig': config,
'Tags': {
'Items': tags
}
}
return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error creating distribution")
def delete_distribution(client, module, distribution):
try:
return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution']))
def update_distribution(client, module, config, distribution_id, e_tag):
try:
return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config))
def tag_resource(client, module, arn, tags):
try:
return client.tag_resource(Resource=arn, Tags=dict(Items=tags))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error tagging resource")
def untag_resource(client, module, arn, tag_keys):
try:
return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error untagging resource")
def list_tags_for_resource(client, module, arn):
try:
response = client.list_tags_for_resource(Resource=arn)
return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items'))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Error listing tags for resource")
def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn):
changed = False
to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags)
if to_remove:
untag_resource(client, module, arn, to_remove)
changed = True
if to_add:
tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add))
changed = True
return changed
class CloudFrontValidationManager(object):
"""
Manages Cloudfront validations
"""
def __init__(self, module):
self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
self.module = module
self.__default_distribution_enabled = True
self.__default_http_port = 80
self.__default_https_port = 443
self.__default_ipv6_enabled = False
self.__default_origin_ssl_protocols = [
'TLSv1',
'TLSv1.1',
'TLSv1.2'
]
self.__default_custom_origin_protocol_policy = 'match-viewer'
self.__default_custom_origin_read_timeout = 30
self.__default_custom_origin_keepalive_timeout = 5
self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
self.__default_cache_behavior_min_ttl = 0
self.__default_cache_behavior_max_ttl = 31536000
self.__default_cache_behavior_default_ttl = 86400
self.__default_cache_behavior_compress = False
self.__default_cache_behavior_viewer_protocol_policy = 'allow-all'
self.__default_cache_behavior_smooth_streaming = False
self.__default_cache_behavior_forwarded_values_forward_cookies = 'none'
self.__default_cache_behavior_forwarded_values_query_string = True
self.__default_trusted_signers_enabled = False
self.__valid_price_classes = set([
'PriceClass_100',
'PriceClass_200',
'PriceClass_All'
])
self.__valid_origin_protocol_policies = set([
'http-only',
'match-viewer',
'https-only'
])
self.__valid_origin_ssl_protocols = set([
'SSLv3',
'TLSv1',
'TLSv1.1',
'TLSv1.2'
])
self.__valid_cookie_forwarding = set([
'none',
'whitelist',
'all'
])
self.__valid_viewer_protocol_policies = set([
'allow-all',
'https-only',
'redirect-to-https'
])
self.__valid_methods = set([
'GET',
'HEAD',
'POST',
'PUT',
'PATCH',
'OPTIONS',
'DELETE'
])
self.__valid_methods_cached_methods = [
set([
'GET',
'HEAD'
]),
set([
'GET',
'HEAD',
'OPTIONS'
])
]
self.__valid_methods_allowed_methods = [
self.__valid_methods_cached_methods[0],
self.__valid_methods_cached_methods[1],
self.__valid_methods
]
self.__valid_lambda_function_association_event_types = set([
'viewer-request',
'viewer-response',
'origin-request',
'origin-response'
])
self.__valid_viewer_certificate_ssl_support_methods = set([
'sni-only',
'vip'
])
self.__valid_viewer_certificate_minimum_protocol_versions = set([
'SSLv3',
'TLSv1',
'TLSv1_2016',
'TLSv1.1_2016',
'TLSv1.2_2018'
])
self.__valid_viewer_certificate_certificate_sources = set([
'cloudfront',
'iam',
'acm'
])
self.__valid_http_versions = set([
'http1.1',
'http2'
])
self.__s3_bucket_domain_identifier = '.s3.amazonaws.com'
def add_missing_key(self, dict_object, key_to_set, value_to_set):
if key_to_set not in dict_object and value_to_set is not None:
dict_object[key_to_set] = value_to_set
return dict_object
def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set):
if old_key not in dict_object and value_to_set is not None:
dict_object[new_key] = value_to_set
else:
dict_object = change_dict_key_name(dict_object, old_key, new_key)
return dict_object
def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False):
if key_name in dict_object:
self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values)
else:
if to_aws_list:
dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set)
elif value_to_set is not None:
dict_object[key_name] = value_to_set
return dict_object
def validate_logging(self, logging):
try:
if logging is None:
return None
valid_logging = {}
if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging):
self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.")
valid_logging['include_cookies'] = logging.get('include_cookies')
valid_logging['enabled'] = logging.get('enabled')
valid_logging['bucket'] = logging.get('bucket')
valid_logging['prefix'] = logging.get('prefix')
return valid_logging
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution logging")
def validate_is_list(self, list_to_validate, list_name):
if not isinstance(list_to_validate, list):
self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__))
def validate_required_key(self, key_name, full_key_name, dict_object):
if key_name not in dict_object:
self.module.fail_json(msg="%s must be specified." % full_key_name)
def validate_origins(self, client, config, origins, default_origin_domain_name,
default_origin_path, create_distribution, purge_origins=False):
try:
if origins is None:
if default_origin_domain_name is None and not create_distribution:
if purge_origins:
return None
else:
return ansible_list_to_cloudfront_list(config)
if default_origin_domain_name is not None:
origins = [{
'domain_name': default_origin_domain_name,
'origin_path': default_origin_path or ''
}]
else:
origins = []
self.validate_is_list(origins, 'origins')
if not origins and default_origin_domain_name is None and create_distribution:
self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.")
all_origins = OrderedDict()
new_domains = list()
for origin in config:
all_origins[origin.get('domain_name')] = origin
for origin in origins:
origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path)
all_origins[origin['domain_name']] = origin
new_domains.append(origin['domain_name'])
if purge_origins:
for domain in list(all_origins.keys()):
if domain not in new_domains:
del(all_origins[domain])
return ansible_list_to_cloudfront_list(list(all_origins.values()))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution origins")
def validate_s3_origin_configuration(self, client, existing_config, origin):
if origin['s3_origin_access_identity_enabled'] and existing_config.get('s3_origin_config', {}).get('origin_access_identity'):
return existing_config['s3_origin_config']['origin_access_identity']
if not origin['s3_origin_access_identity_enabled']:
return None
try:
comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=self.__default_datetime_string,
Comment=comment))
oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
except Exception as e:
self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
return "origin-access-identity/cloudfront/%s" % oai
def validate_origin(self, client, existing_config, origin, default_origin_path):
try:
origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or ''))
self.validate_required_key('origin_path', 'origins[].origin_path', origin)
origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string))
if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0:
for custom_header in origin.get('custom_headers'):
if 'header_name' not in custom_header or 'header_value' not in custom_header:
self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.")
origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers'))
else:
origin['custom_headers'] = ansible_list_to_cloudfront_list()
if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower():
if origin.get("s3_origin_access_identity_enabled") is not None:
s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin)
if s3_origin_config:
oai = s3_origin_config
else:
oai = ""
origin["s3_origin_config"] = dict(origin_access_identity=oai)
del(origin["s3_origin_access_identity_enabled"])
if 'custom_origin_config' in origin:
self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive")
else:
origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {}))
custom_origin_config = origin.get('custom_origin_config')
custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy',
'origins[].custom_origin_config.origin_protocol_policy',
self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies)
custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout)
custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout)
custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port)
custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port)
if custom_origin_config.get('origin_ssl_protocols', {}).get('items'):
custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items']
if custom_origin_config.get('origin_ssl_protocols'):
self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols',
self.__valid_origin_ssl_protocols)
else:
custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols
custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols'])
return origin
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error validating distribution origin")
def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False):
try:
if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False:
return ansible_list_to_cloudfront_list(config)
all_cache_behaviors = OrderedDict()
# cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors
# is true (if purge_cache_behaviors is not true, we can't really know the full new order)
if not purge_cache_behaviors:
for behavior in config:
all_cache_behaviors[behavior['path_pattern']] = behavior
for cache_behavior in cache_behaviors:
valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}),
cache_behavior, valid_origins)
all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
if purge_cache_behaviors:
for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
del(all_cache_behaviors[target_origin_id])
return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values()))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors")
def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False):
if is_default_cache and cache_behavior is None:
cache_behavior = {}
if cache_behavior is None and valid_origins is not None:
return config
cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache)
cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior)
cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior)
cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior)
cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior)
return cache_behavior
def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
try:
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l',
config.get('min_t_t_l', self.__default_cache_behavior_min_ttl))
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l',
config.get('max_t_t_l', self.__default_cache_behavior_max_ttl))
cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l',
config.get('default_t_t_l', self.__default_cache_behavior_default_ttl))
cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress))
target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id'))
if not target_origin_id:
target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins)
if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]:
if is_default_cache:
cache_behavior_name = 'Default cache behavior'
else:
cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern']
self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." %
cache_behavior_name)
cache_behavior['target_origin_id'] = target_origin_id
cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy',
config.get('viewer_protocol_policy',
self.__default_cache_behavior_viewer_protocol_policy),
self.__valid_viewer_protocol_policies)
cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming',
config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming))
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys")
def validate_forwarded_values(self, config, forwarded_values, cache_behavior):
try:
if not forwarded_values:
forwarded_values = dict()
existing_config = config.get('forwarded_values', {})
headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items'))
if headers:
headers.sort()
forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers)
if 'cookies' not in forwarded_values:
forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies)
forwarded_values['cookies'] = {'forward': forward}
else:
existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items')
whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist)
if whitelist:
self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names')
forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist)
cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward'))
self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward',
self.__valid_cookie_forwarding)
forwarded_values['cookies']['forward'] = cookie_forwarding
query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', []))
self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys')
forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys)
forwarded_values = self.add_missing_key(forwarded_values, 'query_string',
existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string))
cache_behavior['forwarded_values'] = forwarded_values
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating forwarded values")
def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior):
try:
if lambda_function_associations is not None:
self.validate_is_list(lambda_function_associations, 'lambda_function_associations')
for association in lambda_function_associations:
association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n')
self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type',
self.__valid_lambda_function_association_event_types)
cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations)
else:
if 'lambda_function_associations' in config:
cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations')
else:
cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([])
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating lambda function associations")
def validate_allowed_methods(self, config, allowed_methods, cache_behavior):
try:
if allowed_methods is not None:
self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods)
temp_allowed_items = allowed_methods.get('items')
self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items')
self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]',
self.__valid_methods_allowed_methods)
cached_items = allowed_methods.get('cached_methods')
if 'cached_methods' in allowed_methods:
self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods')
self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]',
self.__valid_methods_cached_methods)
# we don't care if the order of how cloudfront stores the methods differs - preserving existing
# order reduces likelihood of making unnecessary changes
if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items):
cache_behavior['allowed_methods'] = config['allowed_methods']
else:
cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items)
if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])):
cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods']
else:
cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items)
else:
if 'allowed_methods' in config:
cache_behavior['allowed_methods'] = config.get('allowed_methods')
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating allowed methods")
def validate_trusted_signers(self, config, trusted_signers, cache_behavior):
try:
if trusted_signers is None:
trusted_signers = {}
if 'items' in trusted_signers:
valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items'))
else:
valid_trusted_signers = dict(quantity=config.get('quantity', 0))
if 'items' in config:
valid_trusted_signers = dict(items=config['items'])
valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled))
cache_behavior['trusted_signers'] = valid_trusted_signers
return cache_behavior
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating trusted signers")
def validate_viewer_certificate(self, viewer_certificate):
try:
if viewer_certificate is None:
return None
if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None:
self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" +
"_certificate set to true.")
self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method',
self.__valid_viewer_certificate_ssl_support_methods)
self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version',
self.__valid_viewer_certificate_minimum_protocol_versions)
self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source',
self.__valid_viewer_certificate_certificate_sources)
viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate')
viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method')
viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id')
viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn')
return viewer_certificate
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating viewer certificate")
def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses):
try:
if custom_error_responses is None and not purge_custom_error_responses:
return ansible_list_to_cloudfront_list(config)
self.validate_is_list(custom_error_responses, 'custom_error_responses')
result = list()
existing_responses = dict((response['error_code'], response) for response in custom_error_responses)
for custom_error_response in custom_error_responses:
self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response)
custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l')
if 'response_code' in custom_error_response:
custom_error_response['response_code'] = str(custom_error_response['response_code'])
if custom_error_response['error_code'] in existing_responses:
del(existing_responses[custom_error_response['error_code']])
result.append(custom_error_response)
if not purge_custom_error_responses:
result.extend(existing_responses.values())
return ansible_list_to_cloudfront_list(result)
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating custom error responses")
def validate_restrictions(self, config, restrictions, purge_restrictions=False):
try:
if restrictions is None:
if purge_restrictions:
return None
else:
return config
self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions)
geo_restriction = restrictions.get('geo_restriction')
self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction)
existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', [])
geo_restriction_items = geo_restriction.get('items')
if not purge_restrictions:
geo_restriction_items.extend([rest for rest in existing_restrictions if
rest not in geo_restriction_items])
valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items)
valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type')
return {'geo_restriction': valid_restrictions}
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating restrictions")
def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id):
try:
config['default_root_object'] = default_root_object or config.get('default_root_object', '')
config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled)
if http_version is not None or config.get('http_version'):
self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions)
config['http_version'] = http_version or config.get('http_version')
if web_acl_id or config.get('web_a_c_l_id'):
config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id')
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution config parameters")
def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False):
try:
if config is None:
config = {}
if aliases is not None:
if not purge_aliases:
aliases.extend([alias for alias in config.get('aliases', {}).get('items', [])
if alias not in aliases])
config['aliases'] = ansible_list_to_cloudfront_list(aliases)
if logging is not None:
config['logging'] = self.validate_logging(logging)
config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled)
if price_class is not None:
self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes)
config['price_class'] = price_class
return config
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating common distribution parameters")
def validate_comment(self, config, comment):
config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string)
return config
def validate_caller_reference(self, caller_reference):
return caller_reference or self.__default_datetime_string
def get_first_origin_id_for_default_cache_behavior(self, valid_origins):
try:
if valid_origins is not None:
valid_origins_list = valid_origins.get('items')
if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0:
return str(valid_origins_list[0].get('id'))
self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.")
except Exception as e:
self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior")
def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list):
try:
self.validate_is_list(attribute_list, attribute_list_name)
if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or
isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)):
self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list)))
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list")
def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list):
if attribute is not None and attribute not in allowed_list:
self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list)))
def validate_distribution_from_caller_reference(self, caller_reference):
try:
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
distribution_name = 'Distribution'
distribution_config_name = 'DistributionConfig'
distribution_ids = [dist.get('Id') for dist in distributions]
for distribution_id in distribution_ids:
config = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
distribution = config.get(distribution_name)
if distribution is not None:
distribution_config = distribution.get(distribution_config_name)
if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference:
distribution['DistributionConfig'] = distribution_config
return distribution
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution from caller reference")
def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference):
try:
if caller_reference is not None:
return self.validate_distribution_from_caller_reference(caller_reference)
else:
if aliases:
distribution_id = self.validate_distribution_id_from_alias(aliases)
if distribution_id:
return self.__cloudfront_facts_mgr.get_distribution(distribution_id)
return None
except Exception as e:
self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference")
def validate_distribution_id_from_alias(self, aliases):
distributions = self.__cloudfront_facts_mgr.list_distributions(False)
if distributions:
for distribution in distributions:
distribution_aliases = distribution.get('Aliases', {}).get('Items', [])
if set(aliases) & set(distribution_aliases):
return distribution['Id']
return None
def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):
if distribution_id is None:
distribution_id = self.validate_distribution_id_from_caller_reference(caller_reference=caller_reference)
try:
waiter = client.get_waiter('distribution_deployed')
attempts = 1 + int(wait_timeout / 60)
waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts})
except botocore.exceptions.WaiterError as e:
self.module.fail_json(msg="Timeout waiting for cloudfront action. Waited for {0} seconds before timeout. "
"Error: {1}".format(to_text(wait_timeout), to_native(e)))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(choices=['present', 'absent'], default='present'),
caller_reference=dict(),
comment=dict(),
distribution_id=dict(),
e_tag=dict(),
tags=dict(type='dict', default={}),
purge_tags=dict(type='bool', default=False),
alias=dict(),
aliases=dict(type='list', default=[]),
purge_aliases=dict(type='bool', default=False),
default_root_object=dict(),
origins=dict(type='list'),
purge_origins=dict(type='bool', default=False),
default_cache_behavior=dict(type='dict'),
cache_behaviors=dict(type='list'),
purge_cache_behaviors=dict(type='bool', default=False),
custom_error_responses=dict(type='list'),
purge_custom_error_responses=dict(type='bool', default=False),
logging=dict(type='dict'),
price_class=dict(),
enabled=dict(type='bool'),
viewer_certificate=dict(type='dict'),
restrictions=dict(type='dict'),
web_acl_id=dict(),
http_version=dict(),
ipv6_enabled=dict(type='bool'),
default_origin_domain_name=dict(),
default_origin_path=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=1800, type='int')
))
result = {}
changed = True
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=False,
mutually_exclusive=[
['distribution_id', 'alias'],
['default_origin_domain_name', 'distribution_id'],
['default_origin_domain_name', 'alias'],
]
)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudfront', region=region, endpoint=ec2_url, **aws_connect_kwargs)
validation_mgr = CloudFrontValidationManager(module)
state = module.params.get('state')
caller_reference = module.params.get('caller_reference')
comment = module.params.get('comment')
e_tag = module.params.get('e_tag')
tags = module.params.get('tags')
purge_tags = module.params.get('purge_tags')
distribution_id = module.params.get('distribution_id')
alias = module.params.get('alias')
aliases = module.params.get('aliases')
purge_aliases = module.params.get('purge_aliases')
default_root_object = module.params.get('default_root_object')
origins = module.params.get('origins')
purge_origins = module.params.get('purge_origins')
default_cache_behavior = module.params.get('default_cache_behavior')
cache_behaviors = module.params.get('cache_behaviors')
purge_cache_behaviors = module.params.get('purge_cache_behaviors')
custom_error_responses = module.params.get('custom_error_responses')
purge_custom_error_responses = module.params.get('purge_custom_error_responses')
logging = module.params.get('logging')
price_class = module.params.get('price_class')
enabled = module.params.get('enabled')
viewer_certificate = module.params.get('viewer_certificate')
restrictions = module.params.get('restrictions')
purge_restrictions = module.params.get('purge_restrictions')
web_acl_id = module.params.get('web_acl_id')
http_version = module.params.get('http_version')
ipv6_enabled = module.params.get('ipv6_enabled')
default_origin_domain_name = module.params.get('default_origin_domain_name')
default_origin_path = module.params.get('default_origin_path')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
if alias and alias not in aliases:
aliases.append(alias)
distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
update = state == 'present' and distribution
create = state == 'present' and not distribution
delete = state == 'absent' and distribution
if not (update or create or delete):
module.exit_json(changed=False)
if update or delete:
config = distribution['Distribution']['DistributionConfig']
e_tag = distribution['ETag']
distribution_id = distribution['Distribution']['Id']
else:
config = dict()
if update:
config = camel_dict_to_snake_dict(config, reversible=True)
if create or update:
config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases)
config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id)
config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name,
default_origin_path, create, purge_origins)
config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []),
cache_behaviors, config['origins'], purge_cache_behaviors)
config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}),
default_cache_behavior, config['origins'], True)
config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []),
custom_error_responses, purge_custom_error_responses)
valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions)
if valid_restrictions:
config['restrictions'] = valid_restrictions
valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate)
config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate')
config = validation_mgr.validate_comment(config, comment)
config = snake_dict_to_camel_dict(config, capitalize_first=True)
if create:
config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference)
result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags))
result = camel_dict_to_snake_dict(result)
result['tags'] = list_tags_for_resource(client, module, result['arn'])
if delete:
if config['Enabled']:
config['Enabled'] = False
result = update_distribution(client, module, config, distribution_id, e_tag)
validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
# e_tag = distribution['ETag']
result = delete_distribution(client, module, distribution)
if update:
changed = config != distribution['Distribution']['DistributionConfig']
if changed:
result = update_distribution(client, module, config, distribution_id, e_tag)
else:
result = distribution['Distribution']
existing_tags = list_tags_for_resource(client, module, result['ARN'])
distribution['Distribution']['DistributionConfig']['tags'] = existing_tags
changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN'])
result = camel_dict_to_snake_dict(result)
result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn'])
result['diff'] = dict()
diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config)
if diff:
result['diff']['before'] = diff[0]
result['diff']['after'] = diff[1]
if wait and (create or update):
validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
if 'distribution_config' in result:
result.update(result['distribution_config'])
del(result['distribution_config'])
module.exit_json(changed=changed, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
samdoran/ansible | lib/ansible/utils/module_docs_fragments/eos.py | 87 | 5493 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(eapi). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443).
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which
transport is used. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(eapi) transports. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH keyfile to use to authenticate the connection to
the remote device. This argument is only used for I(cli) transports.
If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
transport:
description:
- Configures the transport connection to use when connecting to the
remote device.
required: true
choices:
- eapi
- cli
default: cli
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
C(transport=eapi). If the transport
argument is not eapi, this value is ignored.
default: yes
choices: ['yes', 'no']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates. If the transport
argument is not eapi, this value is ignored.
choices: ['yes', 'no']
"""
| gpl-3.0 |
garyjyao1/ansible | lib/ansible/modules/core/cloud/amazon/iam.py | 29 | 28823 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam
short_description: Manage IAM users, groups, roles and keys
description:
- Allows for the management of IAM users, groups, roles and access keys.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: [ "user", "group", "role"]
name:
description:
- Name of IAM resource to create or identify
required: true
new_name:
description:
- When state is update, will replace name with new_name on IAM resource
required: false
default: null
new_path:
description:
- When state is update, will replace the path with new_path on the IAM resource
required: false
default: null
state:
description:
- Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
required: true
default: null
choices: [ "present", "absent", "update" ]
path:
description:
- When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match.
required: false
default: "/"
access_key_state:
description:
- When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
required: false
default: null
choices: [ "create", "remove", "active", "inactive"]
key_count:
description:
- When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1.
required: false
default: '1'
access_key_ids:
description:
- A list of the keys that you want impacted by the access_key_state paramter.
groups:
description:
- A list of groups the user should belong to. When update, will gracefully remove groups not listed.
required: false
default: null
password:
description:
- When type is user and state is present, define the users login password. Also works with update. Note that always returns changed.
required: false
default: null
update_password:
required: false
default: always
choices: ['always', 'on_create']
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
author:
- "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic user creation example
tasks:
- name: Create two new IAM users with API keys
iam:
iam_type: user
name: "{{ item }}"
state: present
password: "{{ temp_pass }}"
access_key_state: create
with_items:
- jcleese
- mpython
# Advanced example, create two new groups and add the pre-existing user
# jdavila to both groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name:
iam:
iam_type: user
name: jdavila
state: update
groups: "{{ item.created_group.group_name }}"
with_items: new_groups.results
'''
import json
import itertools
import sys
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def create_user(module, iam, name, pwd, path, key_state, key_count):
key_qty = 0
keys = []
try:
user_meta = iam.create_user(
name, path).create_user_response.create_user_result.user
changed = True
if pwd is not None:
pwd = iam.create_login_profile(name, pwd)
if key_state in ['create']:
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
user_name=name).create_access_key_response.\
create_access_key_result.\
access_key)
key_qty += 1
else:
keys = None
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
return (user_info, changed)
def delete_user(module, iam, name):
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
del_meta = iam.delete_user(name).delete_user_response
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
iam.delete_user_policy(name, policy)
try:
del_meta = iam.delete_user(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return del_meta, name, changed
else:
changed = True
return del_meta, name, changed
def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
changed = False
name_change = False
if updated and new_name:
name = new_name
try:
current_keys, status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys, status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
updated_key_list = {}
if new_name or new_path:
c_path = iam.get_user(name).get_user_result.user['path']
if (name != new_name) or (c_path != new_path):
changed = True
try:
if not updated:
user = iam.update_user(
name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
else:
user = iam.update_user(
name, new_path=new_path).update_user_response.response_metadata
user['updates'] = dict(
old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=False, msg=str(err))
else:
if not updated:
name_change = True
if pwd:
try:
iam.update_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError:
try:
iam.create_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(str(err))
if 'Password does not conform to the account password policy' in error_msg:
module.fail_json(changed=False, msg="Passsword doesn't conform to policy")
else:
module.fail_json(msg=error_msg)
if key_state == 'create':
try:
while key_count > key_qty:
new_key = iam.create_access_key(
user_name=name).create_access_key_response.create_access_key_result.access_key
key_qty += 1
changed = True
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
if keys and key_state:
for access_key in keys:
if access_key in current_keys:
for current_key, current_key_state in zip(current_keys, status):
if key_state != current_key_state.lower():
try:
iam.update_access_key(
access_key, key_state.capitalize(), user_name=name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
if key_state == 'remove':
try:
iam.delete_access_key(access_key, user_name=name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
try:
final_keys, final_key_status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata]
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
for fk, fks in zip(final_keys, final_key_status):
updated_key_list.update({fk: fks})
return name_change, updated_key_list, changed
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
""" Sets groups for a user, will purge groups not explictly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
if updated:
name = new_name
try:
orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
name).list_groups_for_user_result.groups]
remove_groups = [
rg for rg in frozenset(orig_users_groups).difference(groups)]
new_groups = [
ng for ng in frozenset(groups).difference(orig_users_groups)]
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
if len(orig_users_groups) > 0:
for new in new_groups:
iam.add_user_to_group(new, name)
for rm in remove_groups:
iam.remove_user_from_group(rm, name)
else:
for group in groups:
try:
iam.add_user_to_group(group, name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('The group with name %s cannot be found.' % group) in error_msg:
module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
if len(remove_groups) > 0 or len(new_groups) > 0:
changed = True
return (groups, changed)
def create_group(module=None, iam=None, name=None, path=None):
changed = False
try:
iam.create_group(
name, path).create_group_response.create_group_result.group
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return name, changed
def delete_group(module=None, iam=None, name=None):
changed = False
try:
iam.delete_group(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
current_group_path = iam.get_group(
name).get_group_response.get_group_result.group['path']
if new_path:
if current_group_path != new_path:
iam.update_group(name, new_path=new_path)
changed = True
if new_name:
if name != new_name:
iam.update_group(name, new_group_name=new_name, new_path=new_path)
changed = True
name = new_name
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
return changed, name, new_path, current_group_path
def create_role(module, iam, name, path, role_list, prof_list):
changed = False
try:
if name not in role_list:
changed = True
iam.create_role(
name, path=path).create_role_response.create_role_result.role.role_name
if name not in prof_list:
iam.create_instance_profile(name, path=path)
iam.add_role_to_instance_profile(name, name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.roles]
return changed, updated_role_list
def delete_role(module, iam, name, role_list, prof_list):
changed = False
try:
if name in role_list:
cur_ins_prof = [rp['instance_profile_name'] for rp in
iam.list_instance_profiles_for_role(name).
list_instance_profiles_for_role_result.
instance_profiles]
for profile in cur_ins_prof:
iam.remove_role_from_instance_profile(profile, name)
try:
iam.delete_role(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
iam.delete_role_policy(name, policy)
try:
iam.delete_role(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
for prof in prof_list:
if name == prof:
iam.delete_instance_profile(name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.roles]
return changed, updated_role_list
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
groups=dict(type='list', default=None, required=False),
state=dict(
default=None, required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False, no_log=True),
update_password=dict(default='always', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove',
'Active', 'Inactive', 'Create', 'Remove']),
access_key_ids=dict(type='list', default=None, required=False),
key_count=dict(type='int', default=1, required=False),
name=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg='This module requires boto, please install it')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
groups = module.params.get('groups')
name = module.params.get('name')
new_name = module.params.get('new_name')
password = module.params.get('password')
update_pw = module.params.get('update_password')
path = module.params.get('path')
new_path = module.params.get('new_path')
key_count = module.params.get('key_count')
key_state = module.params.get('access_key_state')
if key_state:
key_state = key_state.lower()
if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
module.fail_json(changed=False, msg="At least one access key has to be defined in order"
" to use 'active' or 'inactive'")
key_ids = module.params.get('access_key_ids')
if iam_type == 'user' and module.params.get('password') is not None:
pwd = module.params.get('password')
elif iam_type != 'user' and module.params.get('password') is not None:
module.fail_json(msg="a password is being specified when the iam_type "
"is not user. Check parameters")
else:
pwd = None
if iam_type != 'user' and (module.params.get('access_key_state') is not None or
module.params.get('access_key_id') is not None):
module.fail_json(msg="the IAM type must be user, when IAM access keys "
"are being modified. Check parameters")
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specificy present or absent")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
result = {}
changed = False
try:
orig_group_list = [gl['group_name'] for gl in iam.get_all_groups().
list_groups_result.
groups]
orig_user_list = [ul['user_name'] for ul in iam.get_all_users().
list_users_result.
users]
orig_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.
roles]
orig_prof_list = [ap['instance_profile_name'] for ap in iam.list_instance_profiles().
list_instance_profiles_response.
list_instance_profiles_result.
instance_profiles]
except boto.exception.BotoServerError, err:
module.fail_json(msg=err.message)
if iam_type == 'user':
been_updated = False
user_groups = None
user_exists = any([n in [name, new_name] for n in orig_user_list])
if user_exists:
current_path = iam.get_user(name).get_user_result.user['path']
if not new_path and current_path != path:
new_path = path
path = current_path
if state == 'present' and not user_exists and not new_name:
(meta, changed) = create_user(
module, iam, name, password, path, key_state, key_count)
keys = iam.get_all_access_keys(name).list_access_keys_result.\
access_key_metadata
if groups:
(user_groups, changed) = set_users_groups(
module, iam, name, groups, been_updated, new_name)
module.exit_json(
user_meta=meta, groups=user_groups, keys=keys, changed=changed)
elif state in ['present', 'update'] and user_exists:
if update_pw == 'on_create':
password = None
if name not in orig_user_list and new_name in orig_user_list:
been_updated = True
name_change, key_list, user_changed = update_user(
module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
if name_change and new_name:
orig_name = name
name = new_name
if groups:
user_groups, groups_changed = set_users_groups(
module, iam, name, groups, been_updated, new_name)
if groups_changed == user_changed:
changed = groups_changed
else:
changed = True
else:
changed = user_changed
if new_name and new_path:
module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list)
elif new_name and not new_path and not been_updated:
module.exit_json(
changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list)
elif new_name and not new_path and been_updated:
module.exit_json(
changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state)
elif not new_name and new_path:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, keys=key_list)
else:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, keys=key_list)
elif state == 'update' and not user_exists:
module.fail_json(
msg="The user %s does not exit. No update made." % name)
elif state == 'absent':
if name in orig_user_list:
set_users_groups(module, iam, name, '')
del_meta, name, changed = delete_user(module, iam, name)
module.exit_json(
deletion_meta=del_meta, deleted_user=name, changed=changed)
else:
module.exit_json(
changed=False, msg="User %s is already absent from your AWS IAM users" % name)
elif iam_type == 'group':
group_exists = name in orig_group_list
if state == 'present' and not group_exists:
new_group, changed = create_group(iam=iam, name=name, path=path)
module.exit_json(changed=changed, group_name=new_group)
elif state in ['present', 'update'] and group_exists:
changed, updated_name, updated_path, cur_path = update_group(
iam=iam, name=name, new_name=new_name, new_path=new_path)
if new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, old_path=cur_path,
new_group_path=updated_path)
if new_path and not new_name:
module.exit_json(changed=changed, group_name=name,
old_path=cur_path,
new_group_path=updated_path)
if not new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, group_path=cur_path)
if not new_path and not new_name:
module.exit_json(
changed=changed, group_name=name, group_path=cur_path)
elif state == 'update' and not group_exists:
module.fail_json(
changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name)
elif state == 'absent':
if name in orig_group_list:
removed_group, changed = delete_group(iam=iam, name=name)
module.exit_json(changed=changed, delete_group=removed_group)
else:
module.exit_json(changed=changed, msg="Group already absent")
elif iam_type == 'role':
role_list = []
if state == 'present':
changed, role_list = create_role(
module, iam, name, path, orig_role_list, orig_prof_list)
elif state == 'absent':
changed, role_list = delete_role(
module, iam, name, orig_role_list, orig_prof_list)
elif state == 'update':
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
JanNash/sms-tools | lectures/06-Harmonic-model/plots-code/spectral-peaks.py | 22 | 1161 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 6))
plt.subplot (2,1,1)
plt.plot(freqaxis, mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis, pX,'c', lw=1.5)
plt.axis([0,7000, min(pX),10])
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
| agpl-3.0 |
SYNchroACK/VulnPryer | shiploader.py | 3 | 7960 | #!/usr/bin/env python
import ConfigParser
from pymongo import MongoClient
import csv
import simplejson as json
import sys
import glob
import logging
import os
logger = logging.getLogger('vulnpryer.shiploader')
config = ConfigParser.ConfigParser()
config.read('vulnpryer.conf')
mongo_host = config.get('Mongo', 'hostname')
temp_directory = config.get('VulnDB', 'working_dir')
json_directory = config.get('VulnDB', 'json_dir')
# connect to our MongoDB instance
client = MongoClient(host=mongo_host)
db = client.vulndb
collection = db.osvdb
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def load_mongo(json_glob_pattern):
"""Load a pattern of JSON files to Mongo"""
path_to_json = json_directory + json_glob_pattern
for filename in sorted(glob.glob(path_to_json), key=os.path.getmtime):
logger.info("Working on: {}".format(filename))
json_data = open(filename).read()
try:
# auto-handling unicode object hook derived from
# http://stackoverflow.com/questions/956867/how-to-get-string-
# objects-instead-unicode-ones-from-json-in-python
data = json.loads(json_data, object_hook=_decode_dict)
except:
print sys.argv[0], " Unexpected error:", sys.exc_info()[1]
if data is None:
continue
for vulndb in data['results']:
logger.debug(json.dumps(vulndb, sort_keys=True, indent=4 * ' '))
vulndb['_id'] = vulndb['osvdb_id']
osvdb_id = collection.save(vulndb)
# osvdb_id = collection.insert(vulndb)
logger.debug("Saved: {} with MongoDB id: {}".format(
filename, osvdb_id))
logger.info("Mapping OSVDB entries to CVE IDs")
_map_osvdb_to_cve()
logger.info("Marking deprecated VUulnDB entries")
_mark_deprecated_entries()
def _map_osvdb_to_cve():
"""Add CVE_ID field to all OSVDB entries"""
results = db.osvdb.aggregate([
{"$unwind": "$ext_references"},
{"$match": {"ext_references.type": "CVE ID"}},
{"$project": {"CVE_ID": "$ext_references.value"}},
{"$group": {"_id": "$_id", "CVE_ID": {"$addToSet": "$CVE_ID"}}}
])
for entry in results['result']:
db.osvdb.update({"_id": entry['_id']}, {"$set":
{"CVE_ID": entry['CVE_ID']}})
logger.info("Adding CVEs to {}".format(entry['_id']))
def _mark_deprecated_entries():
"""Mark deprecated entries as such"""
logger.info("Marking deprecated entries based on title.")
db.osvdb.update(
{'title': {'$regex': '^DEPRECA'}},
{'$set': {'deprecated': True}},
upsert=False, multi=True
)
def _run_aggregation():
"""Set the classifications to a bogus array value if it's empty (size 0)
this keeps the unwind from dropping empty classification documents
alternate query based upon ext.references.type == 'CVE ID.'
"""
results = []
result_cursor = db.osvdb.aggregate([
{"$unwind": "$CVE_ID"},
{"$unwind": "$cvss_metrics"},
{"$project": {"CVE_ID": 1, "ext_references": 1,
"cvss_score":
"$cvss_metrics.calculated_cvss_base_score",
"classifications": {"$cond": {
"if": {"$eq": [{"$size":
"$classifications"}, 0]},
"then": ["bogus"],
"else": "$classifications"}}}},
{"$unwind": "$classifications"},
{"$unwind": "$ext_references"},
{"$group": {
"_id": {"_id": "$_id", "CVE_ID": {"$concat":
["CVE-", "$CVE_ID"]}},
"public_exploit": {"$sum": {"$cond": [
{"$eq": ["$classifications.name", "exploit_public"]}, 1, 0]}},
"private_exploit": {"$sum": {"$cond": [
{"$eq": ["$classifications.name", "exploit_private"]}, 1, 0]}},
"cvss_score": {"$max": "$cvss_score"},
"msp": {"$sum": {"$cond": [{"$eq": ["$ext_references.type",
"Metasploit URL"]}, 1, 0]}},
"edb": {"$sum": {"$cond": [{"$eq": ["$ext_references.type",
"Exploit Database"]}, 1, 0]}},
"network_vector": {"$sum": {"$cond": [{"$eq": [
"$classifications.name", "location_remote"]}, 1, 0]}},
"impact_integrity": {"$sum": {"$cond": [
{"$eq": ["$classifications.name",
"impact_integrity"]}, 1, 0]}},
"impact_confidential": {"$sum": {"$cond": [
{"$eq": ["$classifications.name",
"impact_confidential"]}, 1, 0]}}}},
{"$project": {"_id": 0, "OSVDB": "$_id._id",
"CVE_ID": "$_id.CVE_ID",
"public_exploit": 1, "private_exploit": 1,
"cvss_score": 1, "msp": 1, "edb": 1,
"network_vector": 1, "impact_integrity": 1,
"impact_confidentiality": "$impact_confidential"}}
], cursor={}
)
# comment out {"$match": {"network_vector": {"$gt": 0}}}
for doc in result_cursor:
results.append(doc)
logger.info("There are {} entries in this aggregation.".format(
len(results)))
logger.debug("The headers are: {}".format(results[0].keys()))
return results
def _calculate_mean_cvss():
"""Calcuate the mean CVSS score across all known vulnerabilities"""
results = db.osvdb.aggregate([
{"$unwind": "$cvss_metrics"},
{"$group": {
"_id": "null",
"avgCVSS": {"$avg": "$cvss_metrics.calculated_cvss_base_score"}
}}
])
logger.info("There are {} entries in this aggregation.".format(
len(results['result'])))
logger.debug("The headers are: {}".format(results['result'][0].keys()))
try:
avgCVSS = results['result'][0]['avgCVSS']
except:
avgCVSS = None
return avgCVSS
class _DictUnicodeProxy(object):
"""Create helper function for writing unicode to CSV"""
def __init__(self, d):
self.d = d
def __iter__(self):
return self.d.__iter__()
def get(self, item, default=None):
i = self.d.get(item, default)
if isinstance(i, list):
i = i[0]
if isinstance(i, unicode):
return i.encode('utf-8')
return i
def _write_vulndb(results, filename):
"""Dump output to CSV"""
csvfile = open(filename, 'wb')
# headers = ['CVE_ID', 'OSVDB', 'public_exploit', 'private_exploit',
# 'cvss_score', 'msp', 'edb', 'network_vector', 'impact_integrity',
# 'impact_confidentialit', 'network_vector']
headers = results[0].keys()
csvwriter = csv.DictWriter(csvfile, fieldnames=headers)
csvwriter.writeheader()
for result in results:
csvwriter.writerow(_DictUnicodeProxy(result))
csvfile.close()
def get_extract(extract_file):
results = _run_aggregation()
_write_vulndb(results, extract_file)
if __name__ == "__main__":
"""Read in all the json files"""
load_mongo("data_*.json")
get_extract(temp_directory + 'vulndb_export.csv')
| mit |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/offline_user_data_job_service/transports/base.py | 1 | 5564 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v7.resources.types import offline_user_data_job
from google.ads.googleads.v7.services.types import offline_user_data_job_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class OfflineUserDataJobServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for OfflineUserDataJobService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.create_offline_user_data_job: gapic_v1.method.wrap_method(
self.create_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
self.get_offline_user_data_job: gapic_v1.method.wrap_method(
self.get_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
self.add_offline_user_data_job_operations: gapic_v1.method.wrap_method(
self.add_offline_user_data_job_operations,
default_timeout=None,
client_info=client_info,
),
self.run_offline_user_data_job: gapic_v1.method.wrap_method(
self.run_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError
@property
def create_offline_user_data_job(self) -> typing.Callable[
[offline_user_data_job_service.CreateOfflineUserDataJobRequest],
offline_user_data_job_service.CreateOfflineUserDataJobResponse]:
raise NotImplementedError
@property
def get_offline_user_data_job(self) -> typing.Callable[
[offline_user_data_job_service.GetOfflineUserDataJobRequest],
offline_user_data_job.OfflineUserDataJob]:
raise NotImplementedError
@property
def add_offline_user_data_job_operations(self) -> typing.Callable[
[offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest],
offline_user_data_job_service.AddOfflineUserDataJobOperationsResponse]:
raise NotImplementedError
@property
def run_offline_user_data_job(self) -> typing.Callable[
[offline_user_data_job_service.RunOfflineUserDataJobRequest],
operations_pb2.Operation]:
raise NotImplementedError
__all__ = (
'OfflineUserDataJobServiceTransport',
)
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/encodings/email/header.py | 255 | 22243 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email.charset import Charset
NL = '\n'
SPACE = ' '
USPACE = u' '
SPACE8 = ' ' * 8
UEMPTYSTRING = u''
MAXLINELEN = 76
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
(?=[ \t]|$) # whitespace or the end of the string
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Find a header embedded in a putative header value. Used to check for
# header injection attack.
_embeded_header = re.compile(r'\n[^ \t]+:')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case string containing the name of the character
set specified in the encoded string.
An email.errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If no encoding, just return the header
header = str(header)
if not ecre.search(header):
return [(header, None)]
decoded = []
dec = ''
for line in header.splitlines():
# This line might not have an encoding in it
if not ecre.search(line):
decoded.append((line, None))
continue
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if unenc:
# Should we continue a long line?
if decoded and decoded[-1][1] is None:
decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
else:
decoded.append((unenc, None))
if parts:
charset, encoding = [s.lower() for s in parts[0:2]]
encoded = parts[2]
dec = None
if encoding == 'q':
dec = email.quoprimime.header_decode(encoded)
elif encoding == 'b':
paderr = len(encoded) % 4 # Postel's law: add missing padding
if paderr:
encoded += '==='[:4 - paderr]
try:
dec = email.base64mime.decode(encoded)
except binascii.Error:
# Turn this into a higher level exception. BAW: Right
# now we throw the lower level exception away but
# when/if we get exception chaining, we'll preserve it.
raise HeaderParseError
if dec is None:
dec = encoded
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
else:
decoded.append((dec, charset))
del parts[0:3]
return decoded
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicit via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 76.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
# BAW: I believe `chunks' and `maxlinelen' should be non-public.
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
if header_name is None:
# We don't know anything about the field header so the first line
# is the same length as subsequent lines.
self._firstlinelen = maxlinelen
else:
# The first line should be shorter to take into account the field
# header. Also subtract off 2 extra for the colon and space.
self._firstlinelen = maxlinelen - len(header_name) - 2
# Second and subsequent lines should subtract off the length in
# columns of the continuation whitespace prefix.
self._maxlinelen = maxlinelen - cws_expanded_len
def __str__(self):
"""A synonym for self.encode()."""
return self.encode()
def __unicode__(self):
"""Helper for the built-in unicode function."""
uchunks = []
lastcs = None
for s, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(USPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(USPACE)
lastcs = nextcs
uchunks.append(unicode(s, str(charset)))
return UEMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a string, swap the args and do another comparison.
return other == self.encode()
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is true), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In this case, when producing an RFC 2822 compliant header
using RFC 2047 rules, the Unicode string will be encoded using the
following charsets in order: us-ascii, the charset hint, utf-8. The
first character set not to provoke a UnicodeError is used.
Optional `errors' is passed as the third argument to any unicode() or
ustr.encode() call.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
# If the charset is our faux 8bit charset, leave the string unchanged
if charset != '8bit':
# We need to test that the string can be converted to unicode and
# back to a byte string, given the input and output codecs of the
# charset.
if isinstance(s, str):
# Possibly raise UnicodeError if the byte string can't be
# converted to a unicode with the input codec of the charset.
incodec = charset.input_codec or 'us-ascii'
ustr = unicode(s, incodec, errors)
# Now make sure that the unicode could be converted back to a
# byte string with the output codec, which may be different
# than the iput coded. Still, use the original byte string.
outcodec = charset.output_codec or 'us-ascii'
ustr.encode(outcodec, errors)
elif isinstance(s, unicode):
# Now we have to be sure the unicode string can be converted
# to a byte string with a reasonable output codec. We want to
# use the byte string in the chunk.
for charset in USASCII, charset, UTF8:
try:
outcodec = charset.output_codec or 'us-ascii'
s = s.encode(outcodec, errors)
break
except UnicodeError:
pass
else:
assert False, 'utf-8 conversion failed'
self._chunks.append((s, charset))
def _split(self, s, charset, maxlinelen, splitchars):
# Split up a header safely for use with encode_chunks.
splittable = charset.to_splittable(s)
encoded = charset.from_splittable(splittable, True)
elen = charset.encoded_header_len(encoded)
# If the line's encoded length first, just return it
if elen <= maxlinelen:
return [(encoded, charset)]
# If we have undetermined raw 8bit characters sitting in a byte
# string, we really don't know what the right thing to do is. We
# can't really split it because it might be multibyte data which we
# could break if we split it between pairs. The least harm seems to
# be to not split the header at all, but that means they could go out
# longer than maxlinelen.
if charset == '8bit':
return [(s, charset)]
# BAW: I'm not sure what the right test here is. What we're trying to
# do is be faithful to RFC 2822's recommendation that ($2.2.3):
#
# "Note: Though structured field bodies are defined in such a way that
# folding can take place between many of the lexical tokens (and even
# within some of the lexical tokens), folding SHOULD be limited to
# placing the CRLF at higher-level syntactic breaks."
#
# For now, I can only imagine doing this when the charset is us-ascii,
# although it's possible that other charsets may also benefit from the
# higher-level syntactic breaks.
elif charset == 'us-ascii':
return self._split_ascii(s, charset, maxlinelen, splitchars)
# BAW: should we use encoded?
elif elen == len(s):
# We can split on _maxlinelen boundaries because we know that the
# encoding won't change the size of the string
splitpnt = maxlinelen
first = charset.from_splittable(splittable[:splitpnt], False)
last = charset.from_splittable(splittable[splitpnt:], False)
else:
# Binary search for split point
first, last = _binsplit(splittable, charset, maxlinelen)
# first is of the proper length so just wrap it in the appropriate
# chrome. last must be recursively split.
fsplittable = charset.to_splittable(first)
fencoded = charset.from_splittable(fsplittable, True)
chunk = [(fencoded, charset)]
return chunk + self._split(last, charset, self._maxlinelen, splitchars)
def _split_ascii(self, s, charset, firstlen, splitchars):
chunks = _split_ascii(s, firstlen, self._maxlinelen,
self._continuation_ws, splitchars)
return zip(chunks, [charset]*len(chunks))
def _encode_chunks(self, newchunks, maxlinelen):
# MIME-encode a header with many different charsets and/or encodings.
#
# Given a list of pairs (string, charset), return a MIME-encoded
# string suitable for use in a header field. Each pair may have
# different charsets and/or encodings, and the resulting header will
# accurately reflect each setting.
#
# Each encoding can be email.utils.QP (quoted-printable, for
# ASCII-like character sets like iso-8859-1), email.utils.BASE64
# (Base64, for non-ASCII like character sets like KOI8-R and
# iso-2022-jp), or None (no encoding).
#
# Each pair will be represented on a separate line; the resulting
# string will be in the format:
#
# =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
# =?charset2?b?SvxyZ2VuIEL2aW5n?="
chunks = []
for header, charset in newchunks:
if not header:
continue
if charset is None or charset.header_encoding is None:
s = header
else:
s = charset.header_encode(header)
# Don't add more folding whitespace than necessary
if chunks and chunks[-1].endswith(' '):
extra = ''
else:
extra = ' '
_max_append(chunks, s, maxlinelen, extra)
joiner = NL + self._continuation_ws
return joiner.join(chunks)
def encode(self, splitchars=';, '):
"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
This method will do its best to convert the string to the correct
character set used in email, and encode and line wrap it safely with
the appropriate scheme for that character set.
If the given charset is not known or an error occurs during
conversion, this function will return the header untouched.
Optional splitchars is a string containing characters to split long
ASCII lines on, in rough support of RFC 2822's `highest level
syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
"""
newchunks = []
maxlinelen = self._firstlinelen
lastlen = 0
for s, charset in self._chunks:
# The first bit of the next chunk should be just long enough to
# fill the next line. Don't forget the space separating the
# encoded words.
targetlen = maxlinelen - lastlen - 1
if targetlen < charset.encoded_header_len(''):
# Stick it on the next line
targetlen = maxlinelen
newchunks += self._split(s, charset, targetlen, splitchars)
lastchunk, lastcharset = newchunks[-1]
lastlen = lastcharset.encoded_header_len(lastchunk)
value = self._encode_chunks(newchunks, maxlinelen)
if _embeded_header.search(value):
raise HeaderParseError("header value appears to contain "
"an embedded header: {!r}".format(value))
return value
def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
lines = []
maxlen = firstlen
for line in s.splitlines():
# Ignore any leading whitespace (i.e. continuation whitespace) already
# on the line, since we'll be adding our own.
line = line.lstrip()
if len(line) < maxlen:
lines.append(line)
maxlen = restlen
continue
# Attempt to split the line at the highest-level syntactic break
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace.
for ch in splitchars:
if ch in line:
break
else:
# There's nothing useful to split the line on, not even spaces, so
# just append this line unchanged
lines.append(line)
maxlen = restlen
continue
# Now split the line on the character plus trailing whitespace
cre = re.compile(r'%s\s*' % ch)
if ch in ';,':
eol = ch
else:
eol = ''
joiner = eol + ' '
joinlen = len(joiner)
wslen = len(continuation_ws.replace('\t', SPACE8))
this = []
linelen = 0
for part in cre.split(line):
curlen = linelen + max(0, len(this)-1) * joinlen
partlen = len(part)
onfirstline = not lines
# We don't want to split after the field name, if we're on the
# first line and the field name is present in the header string.
if ch == ' ' and onfirstline and \
len(this) == 1 and fcre.match(this[0]):
this.append(part)
linelen += partlen
elif curlen + partlen > maxlen:
if this:
lines.append(joiner.join(this) + eol)
# If this part is longer than maxlen and we aren't already
# splitting on whitespace, try to recursively split this line
# on whitespace.
if partlen > maxlen and ch != ' ':
subl = _split_ascii(part, maxlen, restlen,
continuation_ws, ' ')
lines.extend(subl[:-1])
this = [subl[-1]]
else:
this = [part]
linelen = wslen + len(this[-1])
maxlen = restlen
else:
this.append(part)
linelen += partlen
# Put any left over parts on a line by themselves
if this:
lines.append(joiner.join(this))
return lines
def _binsplit(splittable, charset, maxlinelen):
i = 0
j = len(splittable)
while i < j:
# Invariants:
# 1. splittable[:k] fits for all k <= i (note that we *assume*,
# at the start, that splittable[:0] fits).
# 2. splittable[:k] does not fit for any k > j (at the start,
# this means we shouldn't look at any k > len(splittable)).
# 3. We don't know about splittable[:k] for k in i+1..j.
# 4. We want to set i to the largest k that fits, with i <= k <= j.
#
m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
chunk = charset.from_splittable(splittable[:m], True)
chunklen = charset.encoded_header_len(chunk)
if chunklen <= maxlinelen:
# m is acceptable, so is a new lower bound.
i = m
else:
# m is not acceptable, so final i must be < m.
j = m - 1
# i == j. Invariant #1 implies that splittable[:i] fits, and
# invariant #2 implies that splittable[:i+1] does not fit, so i
# is what we're looking for.
first = charset.from_splittable(splittable[:i], False)
last = charset.from_splittable(splittable[i:], False)
return first, last
| gpl-3.0 |
antonblanchard/linux | tools/perf/python/twatch.py | 221 | 2515 | #! /usr/bin/python
# SPDX-License-Identifier: GPL-2.0-only
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
import perf
def main(context_switch = 0, thread = -1):
cpus = perf.cpu_map()
threads = perf.thread_map(thread)
evsel = perf.evsel(type = perf.TYPE_SOFTWARE,
config = perf.COUNT_SW_DUMMY,
task = 1, comm = 1, mmap = 0, freq = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1, context_switch = context_switch,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
"""What we want are just the PERF_RECORD_ lifetime events for threads,
using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1
(the default), makes perf reenable irq_vectors:local_timer_entry, when
disabling nohz, not good for some use cases where all we want is to get
threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY,
freq=0) instead."""
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print("cpu: {0}, pid: {1}, tid: {2} {3}".format(event.sample_cpu,
event.sample_pid,
event.sample_tid,
event))
if __name__ == '__main__':
"""
To test the PERF_RECORD_SWITCH record, pick a pid and replace
in the following line.
Example output:
cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 }
cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 }
cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 }
cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 }
It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT
to figure out if this is a context switch in or out of the monitored threads.
If bored, please add command line option parsing support for these options :-)
"""
# main(context_switch = 1, thread = 31463)
main()
| gpl-2.0 |
harshadyeola/easyengine | ee/cli/plugins/stack_upgrade.py | 1 | 12752 | from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.logging import Log
from ee.core.variables import EEVariables
from ee.core.aptget import EEAptGet
from ee.core.apt_repo import EERepo
from ee.core.services import EEService
from ee.core.fileutils import EEFileUtils
from ee.core.shellexec import EEShellExec
from ee.core.git import EEGit
from ee.core.download import EEDownload
import configparser
import os
class EEStackUpgradeController(CementBaseController):
class Meta:
label = 'upgrade'
stacked_on = 'stack'
stacked_type = 'nested'
description = ('Upgrade stack safely')
arguments = [
(['--all'],
dict(help='Upgrade all stack', action='store_true')),
(['--web'],
dict(help='Upgrade web stack', action='store_true')),
(['--admin'],
dict(help='Upgrade admin tools stack', action='store_true')),
(['--mail'],
dict(help='Upgrade mail server stack', action='store_true')),
(['--mailscanner'],
dict(help='Upgrade mail scanner stack', action='store_true')),
(['--nginx'],
dict(help='Upgrade Nginx stack', action='store_true')),
(['--nginxmainline'],
dict(help='Upgrade Nginx Mainline stack', action='store_true')),
(['--php'],
dict(help='Upgrade PHP stack', action='store_true')),
(['--mysql'],
dict(help='Upgrade MySQL stack', action='store_true')),
(['--hhvm'],
dict(help='Upgrade HHVM stack', action='store_true')),
(['--postfix'],
dict(help='Upgrade Postfix stack', action='store_true')),
(['--wpcli'],
dict(help='Upgrade WPCLI', action='store_true')),
(['--redis'],
dict(help='Upgrade Redis', action='store_true')),
(['--php56'],
dict(help="Upgrade to PHP5.6 from PHP5.5",
action='store_true')),
(['--no-prompt'],
dict(help="Upgrade Packages without any prompt",
action='store_true')),
]
@expose(hide=True)
def upgrade_php56(self):
if EEVariables.ee_platform_distro == "ubuntu":
if os.path.isfile("/etc/apt/sources.list.d/ondrej-php5-5_6-{0}."
"list".format(EEVariables.ee_platform_codename)):
Log.error(self, "Unable to find PHP 5.5")
else:
if not(os.path.isfile(EEVariables.ee_repo_file_path) and
EEFileUtils.grep(self, EEVariables.ee_repo_file_path,
"php55")):
Log.error(self, "Unable to find PHP 5.5")
Log.info(self, "During PHP update process non nginx-cached"
" parts of your site may remain down.")
# Check prompt
if (not self.app.pargs.no_prompt):
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade != "Y" and start_upgrade != "y":
Log.error(self, "Not starting PHP package update")
if EEVariables.ee_platform_distro == "ubuntu":
EERepo.remove(self, ppa="ppa:ondrej/php5")
EERepo.add(self, ppa=EEVariables.ee_php_repo)
else:
EEAptGet.remove(self, ["php5-xdebug"])
EEFileUtils.searchreplace(self, EEVariables.ee_repo_file_path,
"php55", "php56")
Log.info(self, "Updating apt-cache, please wait...")
EEAptGet.update(self)
Log.info(self, "Installing packages, please wait ...")
if EEVariables.ee_platform_codename == 'trusty':
EEAptGet.install(self, EEVariables.ee_php5_6 + EEVariables.ee_php_extra)
else:
EEAptGet.install(self, EEVariables.ee_php)
if EEVariables.ee_platform_distro == "debian":
EEShellExec.cmd_exec(self, "pecl install xdebug")
with open("/etc/php5/mods-available/xdebug.ini",
encoding='utf-8', mode='a') as myfile:
myfile.write(";zend_extension=/usr/lib/php5/20131226/"
"xdebug.so\n")
EEFileUtils.create_symlink(self, ["/etc/php5/mods-available/"
"xdebug.ini", "/etc/php5/fpm/conf.d"
"/20-xedbug.ini"])
Log.info(self, "Successfully upgraded from PHP 5.5 to PHP 5.6")
@expose(hide=True)
def default(self):
# All package update
if ((not self.app.pargs.php56)):
apt_packages = []
packages = []
if ((not self.app.pargs.web) and (not self.app.pargs.nginx) and
(not self.app.pargs.php) and (not self.app.pargs.mysql) and
(not self.app.pargs.postfix) and (not self.app.pargs.hhvm) and
(not self.app.pargs.mailscanner) and (not self.app.pargs.all)
and (not self.app.pargs.wpcli) and (not self.app.pargs.redis) and (not self.app.pargs.nginxmainline)):
self.app.pargs.web = True
if self.app.pargs.all:
self.app.pargs.web = True
self.app.pargs.mail = True
if self.app.pargs.web:
if EEAptGet.is_installed(self, 'nginx-custom'):
self.app.pargs.nginx = True
elif EEAptGet.is_installed(self, 'nginx-mainline'):
self.app.pargs.nginxmainline = True
else:
Log.info(self, "Nginx is not already installed")
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
self.app.pargs.wpcli = True
if self.app.pargs.mail:
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.wpcli = True
self.app.pargs.postfix = True
if EEAptGet.is_installed(self, 'dovecot-core'):
apt_packages = apt_packages + EEVariables.ee_mail
self.app.pargs.mailscanner = True
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.nginx :
if EEAptGet.is_installed(self, 'nginx-custom'):
apt_packages = apt_packages + EEVariables.ee_nginx
else:
Log.info(self, "Nginx Stable is not already installed")
if self.app.pargs.nginxmainline:
if EEAptGet.is_installed(self, 'nginx-mainline'):
apt_packages = apt_packages + EEVariables.ee_nginx_dev
else:
Log.info(self, "Nginx Mainline is not already installed")
if self.app.pargs.php:
if EEVariables.ee_platform_codename != 'trusty':
if EEAptGet.is_installed(self, 'php5-fpm'):
apt_packages = apt_packages + EEVariables.ee_php
else:
Log.info(self, "PHP is not installed")
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
apt_packages = apt_packages + EEVariables.ee_php5_6 + EEVariables.ee_php_extra
else:
Log.info(self, "PHP 5.6 is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
apt_packages = apt_packages + EEVariables.ee_php7_0 + EEVariables.ee_php_extra
else:
Log.info(self, "PHP 7.0 is not installed")
if self.app.pargs.hhvm:
if EEAptGet.is_installed(self, 'hhvm'):
apt_packages = apt_packages + EEVariables.ee_hhvm
else:
Log.info(self, "HHVM is not installed")
if self.app.pargs.mysql:
if EEAptGet.is_installed(self, 'mariadb-server'):
apt_packages = apt_packages + EEVariables.ee_mysql
else:
Log.info(self, "MariaDB is not installed")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
apt_packages = apt_packages + EEVariables.ee_postfix
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
apt_packages = apt_packages + EEVariables.ee_redis
else:
Log.info(self, "Redis is not installed")
if self.app.pargs.wpcli:
if os.path.isfile('/usr/bin/wp'):
packages = packages + [["https://github.com/wp-cli/wp-cli/"
"releases/download/v{0}/"
"wp-cli-{0}.phar"
"".format(EEVariables.ee_wp_cli),
"/usr/bin/wp",
"WP-CLI"]]
else:
Log.info(self, "WPCLI is not installed with EasyEngine")
if self.app.pargs.mailscanner:
if EEAptGet.is_installed(self, 'amavisd-new'):
apt_packages = (apt_packages + EEVariables.ee_mailscanner)
else:
Log.info(self, "MailScanner is not installed")
if len(packages) or len(apt_packages):
Log.info(self, "During package update process non nginx-cached"
" parts of your site may remain down")
# Check prompt
if (not self.app.pargs.no_prompt):
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade != "Y" and start_upgrade != "y":
Log.error(self, "Not starting package update")
Log.info(self, "Updating packages, please wait...")
if len(apt_packages):
# apt-get update
EEAptGet.update(self)
# Update packages
EEAptGet.install(self, apt_packages)
# Post Actions after package updates
if (set(EEVariables.ee_nginx).issubset(set(apt_packages)) or
set(EEVariables.ee_nginx_dev).issubset(set(apt_packages))):
EEService.restart_service(self, 'nginx')
if EEVariables.ee_platform_codename != 'trusty':
if set(EEVariables.ee_php).issubset(set(apt_packages)):
EEService.restart_service(self, 'php5-fpm')
else:
if set(EEVariables.ee_php5_6).issubset(set(apt_packages)):
EEService.restart_service(self, 'php5.6-fpm')
if set(EEVariables.ee_php7_0).issubset(set(apt_packages)):
EEService.restart_service(self, 'php7.0-fpm')
if set(EEVariables.ee_hhvm).issubset(set(apt_packages)):
EEService.restart_service(self, 'hhvm')
if set(EEVariables.ee_postfix).issubset(set(apt_packages)):
EEService.restart_service(self, 'postfix')
if set(EEVariables.ee_mysql).issubset(set(apt_packages)):
EEService.restart_service(self, 'mysql')
if set(EEVariables.ee_mail).issubset(set(apt_packages)):
EEService.restart_service(self, 'dovecot')
if set(EEVariables.ee_redis).issubset(set(apt_packages)):
EEService.restart_service(self, 'redis-server')
if len(packages):
if self.app.pargs.wpcli:
EEFileUtils.remove(self,['/usr/bin/wp'])
Log.debug(self, "Downloading following: {0}".format(packages))
EEDownload.download(self, packages)
if self.app.pargs.wpcli:
EEFileUtils.chmod(self, "/usr/bin/wp", 0o775)
Log.info(self, "Successfully updated packages")
# PHP 5.6 to 5.6
elif (self.app.pargs.php56):
self.upgrade_php56()
else:
self.app.args.print_help()
| mit |
sahilTakiar/spark | python/pyspark/status.py | 159 | 3756 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
__all__ = ["SparkJobInfo", "SparkStageInfo", "StatusTracker"]
class SparkJobInfo(namedtuple("SparkJobInfo", "jobId stageIds status")):
"""
Exposes information about Spark Jobs.
"""
class SparkStageInfo(namedtuple("SparkStageInfo",
"stageId currentAttemptId name numTasks numActiveTasks "
"numCompletedTasks numFailedTasks")):
"""
Exposes information about Spark Stages.
"""
class StatusTracker(object):
"""
Low-level status reporting APIs for monitoring job and stage progress.
These APIs intentionally provide very weak consistency semantics;
consumers of these APIs should be prepared to handle empty / missing
information. For example, a job's stage ids may be known but the status
API may not have any information about the details of those stages, so
`getStageInfo` could potentially return `None` for a valid stage id.
To limit memory usage, these APIs only provide information on recent
jobs / stages. These APIs will provide information for the last
`spark.ui.retainedStages` stages and `spark.ui.retainedJobs` jobs.
"""
def __init__(self, jtracker):
self._jtracker = jtracker
def getJobIdsForGroup(self, jobGroup=None):
"""
Return a list of all known jobs in a particular job group. If
`jobGroup` is None, then returns all known jobs that are not
associated with a job group.
The returned list may contain running, failed, and completed jobs,
and may vary across invocations of this method. This method does
not guarantee the order of the elements in its result.
"""
return list(self._jtracker.getJobIdsForGroup(jobGroup))
def getActiveStageIds(self):
"""
Returns an array containing the ids of all active stages.
"""
return sorted(list(self._jtracker.getActiveStageIds()))
def getActiveJobsIds(self):
"""
Returns an array containing the ids of all active jobs.
"""
return sorted((list(self._jtracker.getActiveJobIds())))
def getJobInfo(self, jobId):
"""
Returns a :class:`SparkJobInfo` object, or None if the job info
could not be found or was garbage collected.
"""
job = self._jtracker.getJobInfo(jobId)
if job is not None:
return SparkJobInfo(jobId, job.stageIds(), str(job.status()))
def getStageInfo(self, stageId):
"""
Returns a :class:`SparkStageInfo` object, or None if the stage
info could not be found or was garbage collected.
"""
stage = self._jtracker.getStageInfo(stageId)
if stage is not None:
# TODO: fetch them in batch for better performance
attrs = [getattr(stage, f)() for f in SparkStageInfo._fields[1:]]
return SparkStageInfo(stageId, *attrs)
| apache-2.0 |
tunneln/CarnotKE | jyhton/lib-python/2.7/lib2to3/fixes/fix_exitfunc.py | 291 | 2505 | """
Convert use of sys.exitfunc to use the atexit module.
"""
# Author: Benjamin Peterson
from lib2to3 import pytree, fixer_base
from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms
class FixExitfunc(fixer_base.BaseFix):
keep_line_order = True
BM_compatible = True
PATTERN = """
(
sys_import=import_name<'import'
('sys'
|
dotted_as_names< (any ',')* 'sys' (',' any)* >
)
>
|
expr_stmt<
power< 'sys' trailer< '.' 'exitfunc' > >
'=' func=any >
)
"""
def __init__(self, *args):
super(FixExitfunc, self).__init__(*args)
def start_tree(self, tree, filename):
super(FixExitfunc, self).start_tree(tree, filename)
self.sys_import = None
def transform(self, node, results):
# First, find a the sys import. We'll just hope it's global scope.
if "sys_import" in results:
if self.sys_import is None:
self.sys_import = results["sys_import"]
return
func = results["func"].clone()
func.prefix = u""
register = pytree.Node(syms.power,
Attr(Name(u"atexit"), Name(u"register"))
)
call = Call(register, [func], node.prefix)
node.replace(call)
if self.sys_import is None:
# That's interesting.
self.warning(node, "Can't find sys import; Please add an atexit "
"import at the top of your file.")
return
# Now add an atexit import after the sys import.
names = self.sys_import.children[1]
if names.type == syms.dotted_as_names:
names.append_child(Comma())
names.append_child(Name(u"atexit", u" "))
else:
containing_stmt = self.sys_import.parent
position = containing_stmt.children.index(self.sys_import)
stmt_container = containing_stmt.parent
new_import = pytree.Node(syms.import_name,
[Name(u"import"), Name(u"atexit", u" ")]
)
new = pytree.Node(syms.simple_stmt, [new_import])
containing_stmt.insert_child(position + 1, Newline())
containing_stmt.insert_child(position + 2, new)
| apache-2.0 |
noba3/KoTos | addons/plugin.video.filmibynaturex-2.5.7/mymoves/movie/Metadata.py | 1 | 2017 |
from common import XBMCInterfaceUtils, Logger
from metahandler import metahandlers # @UnresolvedImport
import sys
def retieveMovieInfoAndAddItem(request_obj, response_obj):
items = response_obj.get_item_list()
XBMCInterfaceUtils.callBackDialogProgressBar(getattr(sys.modules[__name__], '__addMovieInfo_in_item'), items, 'Retrieving MOVIE info', 'Failed to retrieve movie information, please try again later')
__metaget__ = None
def __addMovieInfo_in_item(item):
if item.get_next_action_name() == 'Movie_Streams':
year = unicode(item.get_moving_data()['movieYear'], errors='ignore').encode('utf-8')
title = unicode(item.get_moving_data()['movieTitle'], errors='ignore').encode('utf-8')
meta = None
try:
global __metaget__
if __metaget__ is None:
__metaget__ = metahandlers.MetaData()
meta = __metaget__.get_meta('movie', title, year=year)
except:
Logger.logDebug('Failed to load metahandler module')
xbmc_item = item.get_xbmc_list_item_obj()
if(meta is not None):
xbmc_item.setIconImage(meta['thumb_url'])
xbmc_item.setThumbnailImage(meta['cover_url'])
videoInfo = {'trailer_url':meta['trailer_url']}
for key, value in meta.items():
if type(value) is str:
value = unicode(value).encode('utf-8')
videoInfo[key] = value
xbmc_item.setInfo('video', videoInfo)
xbmc_item.setProperty('fanart_image', meta['backdrop_url'])
item.add_request_data('videoInfo', videoInfo)
contextMenuItems = []
contextMenuItems.append(('Movie Information', 'XBMC.Action(Info)'))
xbmc_item.addContextMenuItems(contextMenuItems, replaceItems=False)
else:
xbmc_item.setInfo('video', {'title':title, 'year':year})
item.add_request_data('videoInfo', {'title':title, 'year':year})
| gpl-2.0 |
shepdelacreme/ansible | lib/ansible/modules/cloud/ovirt/ovirt_tag.py | 66 | 8130 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_tag
short_description: Module to manage tags in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage tags in oVirt/RHV. It can also manage assignments
of those tags to entities."
options:
id:
description:
- "ID of the tag to manage."
version_added: "2.8"
name:
description:
- "Name of the tag to manage."
required: true
state:
description:
- "Should the tag be present/absent/attached/detached."
- "C(Note): I(attached) and I(detached) states are supported since version 2.4."
choices: ['present', 'absent', 'attached', 'detached']
default: present
description:
description:
- "Description of the tag to manage."
parent:
description:
- "Name of the parent tag."
vms:
description:
- "List of the VMs names, which should have assigned this tag."
hosts:
description:
- "List of the hosts names, which should have assigned this tag."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign tag to vms vm1 and vm2:
- ovirt_tag:
name: mytag
vms:
- vm1
- vm2
# Attach a tag to VM 'vm3', keeping the rest already attached tags on VM:
- ovirt_tag:
name: mytag
state: attached
vms:
- vm3
# Detach a tag from VM 'vm3', keeping the rest already attached tags on VM:
- ovirt_tag:
name: mytag
state: detached
vms:
- vm3
# To detach all VMs from tag:
- ovirt_tag:
name: mytag
vms: []
# Remove tag
- ovirt_tag:
state: absent
name: mytag
# Change Tag Name
- ovirt_tag:
id: 00000000-0000-0000-0000-000000000000
name: "new_tag_name"
'''
RETURN = '''
id:
description: ID of the tag which is managed
returned: On success if tag is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
tag:
description: "Dictionary of all the tag attributes. Tag attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
returned: On success if tag is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_id_by_name,
ovirt_full_argument_spec,
)
class TagsModule(BaseModule):
def build_entity(self):
return otypes.Tag(
id=self._module.params['id'],
name=self._module.params['name'],
description=self._module.params['description'],
parent=otypes.Tag(
name=self._module.params['parent'],
) if self._module.params['parent'] else None,
)
def post_create(self, entity):
self.update_check(entity)
def _update_tag_assignments(self, entity, name):
if self._module.params[name] is None:
return
state = self.param('state')
entities_service = getattr(self._connection.system_service(), '%s_service' % name)()
current_vms = [
vm.name
for vm in entities_service.list(search='tag=%s' % self._module.params['name'])
]
# Assign tags:
if state in ['present', 'attached', 'detached']:
for entity_name in self._module.params[name]:
entity_id = get_id_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity_id).tags_service()
current_tags = [tag.name for tag in tags_service.list()]
# Assign the tag:
if state in ['attached', 'present']:
if self._module.params['name'] not in current_tags:
if not self._module.check_mode:
tags_service.add(
tag=otypes.Tag(
name=self._module.params['name'],
),
)
self.changed = True
# Detach the tag:
elif state == 'detached':
if self._module.params['name'] in current_tags:
tag_id = get_id_by_name(tags_service, self.param('name'))
if not self._module.check_mode:
tags_service.tag_service(tag_id).remove()
self.changed = True
# Unassign tags:
if state == 'present':
for entity_name in [e for e in current_vms if e not in self._module.params[name]]:
if not self._module.check_mode:
entity_id = get_id_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity_id).tags_service()
tag_id = get_id_by_name(tags_service, self.param('name'))
tags_service.tag_service(tag_id).remove()
self.changed = True
def _get_parent(self, entity):
parent = None
if entity.parent:
parent = self._connection.follow_link(entity.parent).name
return parent
def update_check(self, entity):
self._update_tag_assignments(entity, 'vms')
self._update_tag_assignments(entity, 'hosts')
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('name'), entity.name) and
equal(self._module.params.get('parent'), self._get_parent(entity))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'attached', 'detached'],
default='present',
),
id=dict(default=None),
name=dict(required=True),
description=dict(default=None),
parent=dict(default=None),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
tags_service = connection.system_service().tags_service()
tags_module = TagsModule(
connection=connection,
module=module,
service=tags_service,
)
state = module.params['state']
if state in ['present', 'attached', 'detached']:
ret = tags_module.create()
elif state == 'absent':
ret = tags_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/soyuz/model/archivesubscriber.py | 1 | 9106 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Database class for table ArchiveSubscriber."""
__metaclass__ = type
__all__ = [
'ArchiveSubscriber',
]
from operator import itemgetter
import pytz
from storm.expr import (
And,
Desc,
Join,
LeftJoin,
)
from storm.locals import (
DateTime,
Int,
Reference,
Store,
Storm,
Unicode,
)
from storm.store import EmptyResultSet
from zope.component import getUtility
from zope.interface import implements
from lp.registry.interfaces.person import validate_person
from lp.registry.model.person import Person
from lp.registry.model.teammembership import TeamParticipation
from lp.services.database.constants import UTC_NOW
from lp.services.database.decoratedresultset import DecoratedResultSet
from lp.services.database.enumcol import DBEnum
from lp.services.identity.interfaces.emailaddress import EmailAddressStatus
from lp.services.identity.model.emailaddress import EmailAddress
from lp.soyuz.enums import ArchiveSubscriberStatus
from lp.soyuz.interfaces.archiveauthtoken import IArchiveAuthTokenSet
from lp.soyuz.interfaces.archivesubscriber import IArchiveSubscriber
from lp.soyuz.model.archiveauthtoken import ArchiveAuthToken
class ArchiveSubscriber(Storm):
"""See `IArchiveSubscriber`."""
implements(IArchiveSubscriber)
__storm_table__ = 'ArchiveSubscriber'
id = Int(primary=True)
archive_id = Int(name='archive', allow_none=False)
archive = Reference(archive_id, 'Archive.id')
registrant_id = Int(name='registrant', allow_none=False)
registrant = Reference(registrant_id, 'Person.id')
date_created = DateTime(
name='date_created', allow_none=False, tzinfo=pytz.UTC)
subscriber_id = Int(
name='subscriber', allow_none=False,
validator=validate_person)
subscriber = Reference(subscriber_id, 'Person.id')
date_expires = DateTime(
name='date_expires', allow_none=True, tzinfo=pytz.UTC)
status = DBEnum(
name='status', allow_none=False,
enum=ArchiveSubscriberStatus)
description = Unicode(name='description', allow_none=True)
date_cancelled = DateTime(
name='date_cancelled', allow_none=True, tzinfo=pytz.UTC)
cancelled_by_id = Int(name='cancelled_by', allow_none=True)
cancelled_by = Reference(cancelled_by_id, 'Person.id')
@property
def displayname(self):
"""See `IArchiveSubscriber`."""
return "%s's access to %s" % (
self.subscriber.displayname, self.archive.displayname)
def cancel(self, cancelled_by):
"""See `IArchiveSubscriber`."""
self.date_cancelled = UTC_NOW
self.cancelled_by = cancelled_by
self.status = ArchiveSubscriberStatus.CANCELLED
def getNonActiveSubscribers(self):
"""See `IArchiveSubscriber`."""
store = Store.of(self)
if self.subscriber.is_team:
# We get all the people who already have active tokens for
# this archive (for example, through separate subscriptions).
auth_token = LeftJoin(
ArchiveAuthToken,
And(ArchiveAuthToken.person_id == Person.id,
ArchiveAuthToken.archive_id == self.archive_id,
ArchiveAuthToken.date_deactivated == None))
team_participation = Join(
TeamParticipation,
TeamParticipation.personID == Person.id)
# Only return people with preferred email address set.
preferred_email = Join(
EmailAddress, EmailAddress.personID == Person.id)
# We want to get all participants who are themselves
# individuals, not teams:
non_active_subscribers = store.using(
Person, team_participation, preferred_email, auth_token).find(
(Person, EmailAddress),
EmailAddress.status == EmailAddressStatus.PREFERRED,
TeamParticipation.teamID == self.subscriber_id,
Person.teamowner == None,
# There is no existing archive auth token.
ArchiveAuthToken.person_id == None)
non_active_subscribers.order_by(Person.name)
return non_active_subscribers
else:
# Subscriber is not a team.
token_set = getUtility(IArchiveAuthTokenSet)
if token_set.getActiveTokenForArchiveAndPerson(
self.archive, self.subscriber) is not None:
# There are active tokens, so return an empty result
# set.
return EmptyResultSet()
# Otherwise return a result set containing only the
# subscriber and their preferred email address.
return store.find(
(Person, EmailAddress),
Person.id == self.subscriber_id,
EmailAddress.personID == Person.id,
EmailAddress.status == EmailAddressStatus.PREFERRED)
class ArchiveSubscriberSet:
"""See `IArchiveSubscriberSet`."""
def _getBySubscriber(self, subscriber, archive, current_only,
with_active_tokens):
"""Return all the subscriptions for a person.
:param subscriber: An `IPerson` for whom to return all
`ArchiveSubscriber` records.
:param archive: An optional `IArchive` which restricts
the results to that particular archive.
:param current_only: Whether the result should only include current
subscriptions (which is the default).
:param with_active_tokens: Indicates whether the tokens for the given
subscribers subscriptions should be included in the resultset.
By default the tokens are not included in the resultset.
^ """
# Grab the extra Storm expressions, for this query,
# depending on the params:
extra_exprs = self._getExprsForSubscriptionQueries(
archive, current_only)
origin = [
ArchiveSubscriber,
Join(
TeamParticipation,
TeamParticipation.teamID == ArchiveSubscriber.subscriber_id)]
if with_active_tokens:
result_row = (ArchiveSubscriber, ArchiveAuthToken)
# We need a left join with ArchiveSubscriber as
# the origin:
origin.append(
LeftJoin(
ArchiveAuthToken,
And(
ArchiveAuthToken.archive_id ==
ArchiveSubscriber.archive_id,
ArchiveAuthToken.person_id == subscriber.id,
ArchiveAuthToken.date_deactivated == None)))
else:
result_row = ArchiveSubscriber
# Set the main expression to find all the subscriptions for
# which the subscriber is a direct subscriber OR is a member
# of a subscribed team.
# Note: the subscription to the owner itself will also be
# part of the join as there is a TeamParticipation entry
# showing that each person is a member of the "team" that
# consists of themselves.
store = Store.of(subscriber)
return store.using(*origin).find(
result_row,
TeamParticipation.personID == subscriber.id,
*extra_exprs).order_by(Desc(ArchiveSubscriber.date_created))
def getBySubscriber(self, subscriber, archive=None, current_only=True):
"""See `IArchiveSubscriberSet`."""
return self._getBySubscriber(subscriber, archive, current_only, False)
def getBySubscriberWithActiveToken(self, subscriber, archive=None):
"""See `IArchiveSubscriberSet`."""
return self._getBySubscriber(subscriber, archive, True, True)
def getByArchive(self, archive, current_only=True):
"""See `IArchiveSubscriberSet`."""
extra_exprs = self._getExprsForSubscriptionQueries(
archive, current_only)
store = Store.of(archive)
result = store.using(ArchiveSubscriber,
Join(Person, ArchiveSubscriber.subscriber_id == Person.id)).find(
(ArchiveSubscriber, Person),
*extra_exprs).order_by(Person.name)
return DecoratedResultSet(result, itemgetter(0))
def _getExprsForSubscriptionQueries(self, archive=None,
current_only=True):
"""Return the Storm expressions required for the parameters.
Just to keep the code DRY.
"""
extra_exprs = []
# Restrict the results to the specified archive if requested:
if archive:
extra_exprs.append(ArchiveSubscriber.archive == archive)
# Restrict the results to only those subscriptions that are current
# if requested:
if current_only:
extra_exprs.append(
ArchiveSubscriber.status == ArchiveSubscriberStatus.CURRENT)
return extra_exprs
| agpl-3.0 |
iffy/AutobahnPython | examples/twisted/wamp/rpc/errors/backend.py | 2 | 3487 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import math
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn import wamp
from autobahn.wamp.exception import ApplicationError
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
@wamp.error(u"com.myapp.error1")
class AppError1(Exception):
"""
An application specific exception that is decorated with a WAMP URI,
and hence can be automapped by Autobahn.
"""
class Component(ApplicationSession):
"""
Example WAMP application backend that raised exceptions.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
# raising standard exceptions
##
def sqrt(x):
if x == 0:
raise Exception("don't ask foolish questions;)")
else:
# this also will raise, if x < 0
return math.sqrt(x)
yield self.register(sqrt, u'com.myapp.sqrt')
# raising WAMP application exceptions
##
def checkname(name):
if name in ['foo', 'bar']:
raise ApplicationError("com.myapp.error.reserved")
if name.lower() != name.upper():
# forward positional arguments in exceptions
raise ApplicationError("com.myapp.error.mixed_case", name.lower(), name.upper())
if len(name) < 3 or len(name) > 10:
# forward keyword arguments in exceptions
raise ApplicationError("com.myapp.error.invalid_length", min=3, max=10)
yield self.register(checkname, 'com.myapp.checkname')
# defining and automapping WAMP application exceptions
##
self.define(AppError1)
def compare(a, b):
if a < b:
raise AppError1(b - a)
yield self.register(compare, 'com.myapp.compare')
print("procedures registered")
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", "ws://localhost:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| mit |
cee1/cerbero-mac | test/test_cerbero_packages_packagesstore.py | 27 | 5247 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import tempfile
from cerbero.config import Platform
from cerbero.errors import PackageNotFoundError
from cerbero.packages.package import Package, MetaPackage, SDKPackage,\
InstallerPackage, App
from cerbero.packages.packagesstore import PackagesStore
from test import test_packages_common as common
PACKAGE = '''
class Package(package.Package):
name = 'test-package'
def test_imports(self):
Platform.WINDOWS
Distro.WINDOWS
DistroVersion.WINDOWS_7
Architecture.X86
'''
SDKPACKAGE = '''
class SDKPackage(package.SDKPackage):
name = 'test-package'
'''
INSTALLERPACKAGE = '''
class InstallerPackage(package.InstallerPackage):
name = 'test-package'
'''
class PackageTest(unittest.TestCase):
def setUp(self):
self.config = common.DummyConfig()
self.config.packages_dir = '/test'
self.config.target_platform = Platform.LINUX
self.store = PackagesStore(self.config, False)
def testAddPackage(self):
package = common.Package1(self.config, None, None)
self.assertEquals(len(self.store._packages), 0)
self.store.add_package(package)
self.assertEquals(len(self.store._packages), 1)
self.assertEquals(package, self.store._packages[package.name])
def testGetPackage(self):
package = common.Package1(self.config, None, None)
self.store.add_package(package)
self.assertEquals(package, self.store.get_package(package.name))
def testPackageNotFound(self):
self.failUnlessRaises(PackageNotFoundError, self.store.get_package,
'unknown')
def testPackagesList(self):
package = common.Package1(self.config, None, None)
metapackage = common.MetaPackage(self.config, None)
self.store.add_package(package)
self.store.add_package(metapackage)
l = sorted([package, metapackage], key=lambda x: x.name)
self.assertEquals(l, self.store.get_packages_list())
def testPackageDeps(self):
package = common.Package1(self.config, None, None)
package2 = common.Package2(self.config, None, None)
self.store.add_package(package)
self.store.add_package(package2)
self.assertEquals(package.deps,
[x.name for x in self.store.get_package_deps(package.name)])
def testMetaPackageDeps(self):
metapackage = common.MetaPackage(self.config, None)
self.store.add_package(metapackage)
# the metapackage depends on package that are not yet in the store
self.failUnlessRaises(PackageNotFoundError,
self.store.get_package_deps, metapackage.name)
for klass in [common.Package1, common.Package2, common.Package3,
common.Package4]:
p = klass(self.config, None, None)
self.store.add_package(p)
for klass in [common.MetaPackage]:
p = klass(self.config, None)
self.store.add_package(p)
deps = ['gstreamer-test-bindings', 'gstreamer-test1',
'gstreamer-test2', 'gstreamer-test3']
res = [x.name for x in self.store.get_package_deps(metapackage.name)]
self.assertEquals(sorted(deps), sorted(res))
def testLoadPackageFromFile(self):
package_file = tempfile.NamedTemporaryFile()
package_file.write(PACKAGE)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
self.assertIsInstance(p, Package)
self.assertEquals('test-package', p.name)
def testLoadMetaPackageFromFile(self):
for x, t in [(SDKPACKAGE, SDKPackage),
(INSTALLERPACKAGE, InstallerPackage)]:
package_file = tempfile.NamedTemporaryFile()
package_file.write(x)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
print p, type(p)
self.assertIsInstance(p, t)
self.assertEquals('test-package', p.name)
def testImports(self):
package_file = tempfile.NamedTemporaryFile()
package_file.write(PACKAGE)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
self.assertIsInstance(p, Package)
try:
p.test_imports()
except ImportError, e:
self.fail("Import error raised, %s", e)
| lgpl-2.1 |
glass-bead-labs/data-reflector | comment_tracker/github_comments.py | 2 | 3106 | import requests
import json
class GitHubComments:
key = '63f38f069567d2468f1be592301eb096acb2682b'
def __init__(self, owner, repo):
self.owner = owner
self.repo = repo
parameters = {'state': 'all', 'per_page': 100}
self.request_issues = requests.get('https://api.github.com/repos/' + self.owner +
'/' + self.repo + '/issues', auth=(GitHubComments.key, 'x-oauth-basic'), params=parameters).json()
parameters = {'per_page': 100, 'direction': 'desc', 'sort': 'created'}
self.request_repo_comments = requests.get('https://api.github.com/repos/'
+ self.owner + '/' + self.repo + '/issues/comments', auth=(GitHubComments.key, 'x-oauth-basic'), params=parameters).headers
self.all_comments = self.__get_all_comments()
def __get_all_comments(self):
# Returns a list with all comments in dictionaries (most recent to oldest).
parameters = {'state': 'all', 'per_page': 100}
comments_page = self.request_repo_comments['link'].split(';')[1].split(',')[1][2:-1]
comments_page_URL = comments_page[:-1]
last_page_number = int(comments_page[-1:])
all_comments = []
for i in range(1, last_page_number + 1):
comments_URL = comments_page_URL + str(i)
all_comments += requests.get(comments_URL, params=parameters).json()
return all_comments
def get_issue_with_index(self, i):
# Returns the number assigned to the issue when made.
return str(self.request_issues[i]['number'])
def get_issue_with_title(self, title):
# Returns the number assigned to the issue when made.
for i in range(len(self.request_issues) - 1):
if title == str(self.request_issues[i]['title']):
return str(self.request_issues[i]['number'])
def get_all_creators(self):
creators = []
for i in range(len(self.all_comments)):
if self.get_creator(i) not in creators:
creators.append(self.get_creator(i))
return creators
def get_comments_issue(self, num):
# GET request to get the all the comments specific to an issue.
parameters = {'per_page': 100, 'direction': 'desc', 'sort': 'created'}
return requests.get('https://api.github.com/repos/' + self.owner +
'/' + self.repo + '/issues/' + str(num) + '/comments', auth=(GitHubComments.key, 'x-oauth-basic'),
params=parameters).json()
def get_creator(self, i):
# Returns who created the comment for a specific comment.
return str(self.all_comments[i]['user']['login'])
def get_message(self, i):
# Returns what the comment says.
return str(self.all_comments[i]['body'])
def get_comments_creator(self, creator):
# Returns the indices of the comments that has a specific creator.
index = []
for i in range(len(self.all_comments)):
if self.get_creator(i) == creator:
index.append(i)
return index
| isc |
ppizarror/Ned-For-Spod | bin/external/mechanize/_mozillacookiejar.py | 3 | 6323 | """Mozilla / Netscape cookie loading / saving.
Copyright 2002-2006 John J Lee <jjl@pobox.com>
Copyright 1997-1999 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import re, time, logging
from _clientcookie import reraise_unmasked_exceptions, FileCookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("ClientCookie").debug
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise LoadError(
"%s does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith("#") or
line.strip().startswith("$") or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t", 6)
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
if domain_specified != initial_dot:
raise LoadError("domain and domain specified flag don't "
"match in %s: %s" % (filename, line))
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except:
reraise_unmasked_exceptions((IOError, LoadError))
raise LoadError("invalid Netscape format file %s: %s" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
debug("Saving Netscape cookies.txt file")
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
debug(" Not saving %s: marked for discard", cookie.name)
continue
if not ignore_expires and cookie.is_expired(now):
debug(" Not saving %s: expired", cookie.name)
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()
| gpl-2.0 |
openhatch/oh-mainline | vendor/packages/irc/irc/util.py | 19 | 1109 | from __future__ import division, absolute_import
import six
# from jaraco.util.itertools
def always_iterable(item):
"""
Given an object, always return an iterable. If the item is not
already iterable, return a tuple containing only the item.
>>> always_iterable([1,2,3])
[1, 2, 3]
>>> always_iterable('foo')
('foo',)
>>> always_iterable(None)
(None,)
>>> import itertools
>>> numbers = itertools.count(10)
>>> always_iterable(numbers) is numbers
True
"""
if isinstance(item, six.string_types) or not hasattr(item, '__iter__'):
item = item,
return item
def total_seconds(td):
"""
Python 2.7 adds a total_seconds method to timedelta objects.
See http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds
>>> import datetime
>>> total_seconds(datetime.timedelta(hours=24))
86400.0
"""
try:
result = td.total_seconds()
except AttributeError:
seconds = td.seconds + td.days * 24 * 3600
result = (td.microseconds + seconds * 10**6) / 10**6
return result
| agpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/django/contrib/sessions/middleware.py | 36 | 2664 | import time
from importlib import import_module
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
class SessionMiddleware(object):
def __init__(self):
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.