repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
RydrDojo/Ridr | pylotVenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhungarianmodel.py | 2763 | 12536 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| mit |
tkinz27/ansible | lib/ansible/plugins/action/include_vars.py | 31 | 1836 | # (c) 2013-2014, Benno Joy <benno@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from types import NoneType
from ansible.errors import AnsibleError
from ansible.parsing import DataLoader
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
source = self._task.args.get('_raw_params')
if self._task._role:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'vars', source)
else:
source = self._loader.path_dwim(source)
if os.path.exists(source):
(data, show_content) = self._loader._get_file_contents(source)
data = self._loader.load(data, show_content)
if data is None:
data = {}
if not isinstance(data, dict):
raise AnsibleError("%s must be stored as a dictionary/hash" % source)
return dict(ansible_facts=data, _ansible_no_log=not show_content)
else:
return dict(failed=True, msg="Source file not found.", file=source)
| gpl-3.0 |
bravo-zhang/spark | examples/src/main/python/mllib/logistic_regression.py | 51 | 1830 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Logistic regression using MLlib.
This example requires NumPy (http://www.numpy.org/).
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import LogisticRegressionWithSGD
def parsePoint(line):
"""
Parse a line of text into an MLlib LabeledPoint object.
"""
values = [float(s) for s in line.split(' ')]
if values[0] == -1: # Convert -1 labels to 0 for MLlib
values[0] = 0
return LabeledPoint(values[0], values[1:])
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: logistic_regression <file> <iterations>", file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonLR")
points = sc.textFile(sys.argv[1]).map(parsePoint)
iterations = int(sys.argv[2])
model = LogisticRegressionWithSGD.train(points, iterations)
print("Final weights: " + str(model.weights))
print("Final intercept: " + str(model.intercept))
sc.stop()
| apache-2.0 |
fevxie/odoo | openerp/addons/test_impex/tests/test_load.py | 350 | 44525 | # -*- coding: utf-8 -*-
import json
import pkgutil
import unittest2
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def message(msg, type='error', from_=0, to_=0, record=0, field='value', **kwargs):
return dict(kwargs,
type=type, rows={'from': from_, 'to': to_}, record=record,
field=field, message=msg)
def moreaction(**kwargs):
return dict(kwargs,
type='ir.actions.act_window',
target='new',
view_mode='tree,form',
view_type='form',
views=[(False, 'tree'), (False, 'form')],
help=u"See all possible values")
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
self.registry('ir.model.data').clear_caches()
def import_(self, fields, rows, context=None):
return self.model.load(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = record.name_get()[0][1]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
def add_translations(self, name, type, code, *tnx):
Lang = self.registry('res.lang')
if not Lang.search(self.cr, openerp.SUPERUSER_ID, [('code', '=', code)]):
Lang.create(self.cr, openerp.SUPERUSER_ID, {
'name': code,
'code': code,
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
})
Translations = self.registry('ir.translation')
for source, value in tnx:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'lang': code,
'type': type,
'src': source,
'value': value,
'state': 'translated',
})
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
result = self.import_(['.id', 'value'], [['42', '36']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': '.id',
'message': u"Unknown database identifier '42'",
}])
def test_create_with_xid(self):
result = self.import_(['id', 'value'], [['somexmlid', '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
result = self.import_(['.id', 'value'], [[str(id), '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_exported(self):
result = self.import_(['value'], [['False'], ['True'], ])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
for lang, source, value in [('fr_FR', 'no', u'non'),
('de_DE', 'no', u'nein'),
('ru_RU', 'no', u'нет'),
('nl_BE', 'false', u'vals'),
('lt_LT', 'false', u'klaidingas')]:
self.add_translations('test_import.py', 'code', lang, (source, value))
falses = [[u'0'], [u'no'], [u'false'], [u'FALSE'], [u''],
[u'non'], # no, fr
[u'nein'], # no, de
[u'нет'], # no, ru
[u'vals'], # false, nl
[u'klaidingas'], # false, lt,
]
result = self.import_(['value'], falses)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(falses))
self.assertEqual([False] * len(falses), values(self.read()))
def test_trues(self):
trues = [['None'], ['nil'], ['()'], ['f'], ['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'], ['ok'], ['true'], ['yes'], ['1'], ]
result = self.import_(['value'], trues)
self.assertEqual(len(result['ids']), 10)
self.assertEqual(result['messages'], [
message(u"Unknown value '%s' for boolean field 'unknown', assuming 'yes'" % v[0],
moreinfo=u"Use '1' for yes and '0' for no",
type='warning', from_=i, to_=i, record=i)
for i, v in enumerate(trues)
if v[0] not in ('true', 'yes', '1')
])
self.assertEqual(
[True] * 10,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
])
self.assertEqual(len(result['ids']), 5)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db', 'openerp.models')
def test_out_of_range(self):
result = self.import_(['value'], [[str(2**31)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
result = self.import_(['value'], [[str(-2**32)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
def test_nonsense(self):
result = self.import_(['value'], [['zorglub']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': 'value',
'message': u"'zorglub' does not seem to be an integer for field 'unknown'",
}])
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
])
self.assertEqual(len(result['ids']), 6)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
])
self.assertEqual(len(result['ids']), 7)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
result = self.import_(['value'], [['foobar']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [
message(u"'foobar' does not seem to be a number for field 'unknown'")])
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
result = self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
result = self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_required_string_field(ImporterCase):
model_name = 'export.string.required'
@mute_logger('openerp.sql_db', 'openerp.models')
def test_empty(self):
result = self.import_(['value'], [[]])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db', 'openerp.models')
def test_not_provided(self):
result = self.import_(['const'], [['12']])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
result = self.import_(['value'], [[s]])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Foo", "tete"),
("Bar", "titi"),
("Qux", "toto"),
]
def test_imported(self):
result = self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 3)
self.assertFalse(result['messages'])
self.assertEqual([3, 1, 2], values(self.read()))
result = self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
def test_invalid(self):
result = self.import_(['value'], [['Baz']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value 'Baz' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
result = self.import_(['value'], [[42]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value '42' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
class test_selection_with_default(ImporterCase):
model_name = 'export.selection.withdefault'
def test_empty(self):
""" Empty cells should set corresponding field to False
"""
result = self.import_(['value'], [['']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[False])
def test_default(self):
""" Non-provided cells should set corresponding field to default
"""
result = self.import_(['const'], [['42']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[2])
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => pick first
result = self.import_(['value'], [
['3'],
["Grault"],
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual(
[3, 1],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['titi'],
['tete'],
], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
self.assertEqual(values(self.read()), [1, 2])
result = self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
result = self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
result = self.import_(['value/id'], [[xid]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
result = self.import_(['value/.id'], [[integer_id]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
result = self.import_(['value'], [[name2]])
self.assertEqual(
result['messages'],
[message(u"Found multiple matches for field 'unknown' (2 matches)",
type='warning')])
self.assertEqual(len(result['ids']), 1)
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# Because name_search all the things. Fallback schmallback
result = self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
])
self.assertEqual(result['messages'], [
message(u"No matching record found for name '%s' in field 'unknown'" % id,
from_=index, to_=index, record=index,
moreinfo=moreaction(res_model='export.integer'))
for index, id in enumerate([integer_id1, integer_id2, integer_id1])])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db')
def test_fail_id_mistype(self):
result = self.import_(['value/.id'], [["foo"]])
self.assertEqual(result['messages'], [
message(u"Invalid database id 'foo' for the field 'unknown'",
moreinfo=moreaction(res_model='ir.model.data',
domain=[('model','=','export.integer')]))
])
self.assertIs(result['ids'], False)
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
result = self.import_(['value/value'], [['42']])
self.assertEqual(result['messages'], [
message(u"Can not create Many-To-One records indirectly, import "
u"the field separately")])
self.assertIs(result['ids'], False)
def test_fail_noids(self):
result = self.import_(['value'], [['nameisnoexist:3']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'nameisnoexist:3' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='export.integer'))])
self.assertIs(result['ids'], False)
result = self.import_(['value/id'], [['noxidhere']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidhere' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
result = self.import_(['value/.id'], [['66']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '66' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
def test_fail_multiple(self):
result = self.import_(
['value', 'value/id'],
[['somename', 'somexid']])
self.assertEqual(result['messages'], [message(
u"Ambiguous specification for field 'unknown', only provide one of "
u"name, external id or database id")])
self.assertIs(result['ids'], False)
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
result = self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 4)
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
result = self.import_(['value/.id'], [['42']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '42' in field "
u"'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
result = self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
result = self.import_(['value/id'], [['noxidforthat']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidforthat' in field"
u" 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: record.name_get()[0][1]
result = self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
result = self.import_(['value'], [['wherethem2mhavenonames']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'wherethem2mhavenonames' in "
u"field 'unknown'", moreinfo=moreaction(
res_model='export.many2many.other'))])
self.assertIs(result['ids'], False)
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them ' \
u'to stack traces'
result = self.import_(
['const', 'value'],
[['5', s]])
self.assertEqual(result['messages'], [message(
u"No matching record found for name '%s' in field 'unknown'" % s,
moreinfo=moreaction(res_model='export.one2many.child'))])
self.assertIs(result['ids'], False)
def test_single(self):
result = self.import_(['const', 'value/value'], [
['5', '63']
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
result = self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
""" m2m-style specification for o2ms
"""
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link(self):
""" O2M relating to an existing record (update) force a LINK_TO as well
"""
O2M = self.registry('export.one2many.child')
id1 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
class test_realworld(common.TransactionCase):
def test_bigfile(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts_big.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
['name', 'mobile', 'email', 'image'],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_backlink(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
["name", "type", "street", "city", "country_id", "category_id",
"supplier", "customer", "is_company", "parent_id"],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_recursive_o2m(self):
""" The content of the o2m field's dict needs to go through conversion
as it may be composed of convertables or other relational fields
"""
self.registry('ir.model.data').clear_caches()
Model = self.registry('export.one2many.recursive')
result = Model.load(self.cr, openerp.SUPERUSER_ID,
['value', 'child/const', 'child/child1/str', 'child/child2/value'],
[
['4', '42', 'foo', '55'],
['', '43', 'bar', '56'],
['', '', 'baz', ''],
['', '55', 'qux', '57'],
['5', '99', 'wheee', ''],
['', '98', '', '12'],
],
context=None)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b = Model.browse(self.cr, openerp.SUPERUSER_ID, result['ids'], context=None)
self.assertEqual((b[0].value, b[1].value), (4, 5))
self.assertEqual([child.str for child in b[0].child[1].child1],
['bar', 'baz'])
self.assertFalse(len(b[1].child[1].child1))
self.assertEqual([child.value for child in b[1].child[1].child2],
[12])
class test_date(ImporterCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a date']])
self.assertEqual(result['messages'], [
message(u"'not really a date' does not seem to be a valid date "
u"for field 'unknown'",
moreinfo=u"Use the format '2012-12-31'")])
self.assertIs(result['ids'], False)
class test_datetime(ImporterCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a datetime']])
self.assertEqual(result['messages'], [
message(u"'not really a datetime' does not seem to be a valid "
u"datetime for field 'unknown'",
moreinfo=u"Use the format '2012-12-31 23:59:59'")])
self.assertIs(result['ids'], False)
def test_checktz1(self):
""" Imported date should be interpreted as being in the tz provided by
the context
"""
# write dummy tz in user (Asia/Hovd UTC+0700), should be superseded by
# context
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Hovd'})
# UTC+1400
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Kiritimati'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-02 21:11:11'])
# UTC-0930
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Marquesas'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 20:41:11'])
def test_usertz(self):
""" If the context does not hold a timezone, the importing user's tz
should be used
"""
# UTC +1000
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Yakutsk'})
result = self.import_(
['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 01:11:11'])
def test_notz(self):
""" If there is no tz either in the context or on the user, falls back
to UTC
"""
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': False})
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 11:11:11'])
class test_unique(ImporterCase):
model_name = 'export.unique'
@mute_logger('openerp.sql_db')
def test_unique(self):
result = self.import_(['value'], [
['1'],
['1'],
['2'],
['3'],
['3'],
])
self.assertFalse(result['ids'])
self.assertEqual(result['messages'], [
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 1, 'to': 1},
record=1, field='value'),
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 4, 'to': 4},
record=4, field='value'),
])
| agpl-3.0 |
pscholz/presto | bin/GBT350_drift_prep.py | 2 | 3968 | #!/usr/bin/env python
import sys, os, random, sigproc
import psr_utils as pu
def spigot_samples_per_file(spigot_filenm):
"""
spigot_samples_per_file(spigot_filenm,):
Return the number of samples present in the Spigot FITs file.
"""
hdrlen = 184320
bytes_per_sample = 2048
filelen = os.stat(spigot_filenm)[6]
return int((filelen-hdrlen)/bytes_per_sample)
debug = 1
if __name__=="__main__":
if (len(sys.argv) < 3):
print "usage: GBT350_drift_prep.py NUM spigot_fits_files"
print " NUM is the 'beam' number in the scan. It starts "
print " with 0 and goes to NMAX. If NUM is < 0, NMAX"
print " is sent to STDOUT by the program."
sys.exit()
orig_N = 1728000 # Number of samples to analyze at a time (~141 sec)
raw_N = 1900000 # Number of samples to step through .fits files
overlap_factor = 0.5 # Overlap each orig_N samples by this fraction
overlap_samples = int(orig_N * overlap_factor)
nom_samps_per_file = 976896
# Now see how much data we have to work with
samples_per_file = []
infilenms = sys.argv[2:]
numinfiles = len(infilenms)
for ii in range(numinfiles):
samps = spigot_samples_per_file(infilenms[ii])
if ((samps < nom_samps_per_file) and (ii < numinfiles-1)):
print "Warning! '%s' only has %d samples!"%\
(infilenms[ii], samps)
print " You need to fix that file!"
sys.exit(-1)
samples_per_file.append(samps)
total_samples = sum(samples_per_file)
num = int(sys.argv[1])
nmax = total_samples/overlap_samples-1
if num < 0:
print nmax
sys.exit(0)
if num > nmax:
print "NUM > NMAX (%d)! Exiting!"%nmax
sys.exit(-1)
# Now figure out which file is the first
first_sample = num * overlap_samples
accum_samples = 0
for ii in range(len(samples_per_file)):
next_accum_samples = accum_samples + samples_per_file[ii]
if next_accum_samples > first_sample:
first_filenm = infilenms[ii]
# How much data to skip in the first file
skip = first_sample - accum_samples
# How many total files we need
first_file_samples = samples_per_file[ii]-skip
numfiles = (raw_N - first_file_samples) / nom_samps_per_file + 1
if ((raw_N - first_file_samples) % nom_samps_per_file):
numfiles += 1
if debug:
print "first_filenum = ", ii
print "1st sample = ", first_sample
print "1st filenam = ", infilenms[ii]
print "skip = ", skip
print "1st_file_samps = ", first_file_samples
print "numfiles = ", numfiles
break
else:
accum_samples += samples_per_file[ii]
# Now make a command line option for spigot2filterbank
tmpfilenm = "tmp%d.fil"%random.randint(0,2**30)
cmd = "spigot2filterbank -skip %d -numout %d -o %s " % \
(skip, raw_N, tmpfilenm)
for goodfile in infilenms[ii:ii+numfiles]:
cmd += "%s "%goodfile
os.system(cmd)
# Now read the header to determine what the correct filename
# should be. Use that to rename the fil file.
filhdr, hdrlen = sigproc.read_header(tmpfilenm)
MJDi = int(filhdr['tstart'])
ra_rad = sigproc.ra2radians(filhdr['src_raj'])
ra_string = pu.coord_to_string(*pu.rad_to_hms(ra_rad))
dec_rad = sigproc.dec2radians(filhdr['src_dej'])
dec_string = pu.coord_to_string(*pu.rad_to_dms(dec_rad))
str_coords = "".join(ra_string.split(":")[:2])
if dec_rad >= 0.0: str_coords += "+"
str_coords += "".join(dec_string.split(":")[:2])
filfilenm = "GBT350drift_%d_%s.fil" % (MJDi, str_coords)
os.rename(tmpfilenm, filfilenm)
print "Renamed '%s' to '%s'." % (tmpfilenm, filfilenm)
| gpl-2.0 |
jacenkow/inspire-next | inspirehep/dojson/conferences/model.py | 3 | 1040 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Conferences model definition."""
from ..schema import SchemaOverdo
conferences = SchemaOverdo(schema="conferences.json")
| gpl-2.0 |
gangadhar-kadam/helpdesk-frappe | frappe/utils/response.py | 1 | 5416 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import json
import datetime
import mimetypes
import os
import frappe
from frappe import _
import frappe.model.document
import frappe.utils
import frappe.sessions
import werkzeug.utils
from werkzeug.local import LocalProxy
from werkzeug.wsgi import wrap_file
from werkzeug.wrappers import Response
from werkzeug.exceptions import NotFound, Forbidden
from frappe.core.doctype.file.file import check_file_permission
from frappe.website.render import render
def report_error(status_code):
if (status_code!=404 or frappe.conf.logging) and not frappe.local.flags.disable_traceback:
frappe.errprint(frappe.utils.get_traceback())
response = build_response("json")
response.status_code = status_code
return response
def build_response(response_type=None):
if "docs" in frappe.local.response and not frappe.local.response.docs:
del frappe.local.response["docs"]
response_type_map = {
'csv': as_csv,
'download': as_raw,
'json': as_json,
'page': as_page,
'redirect': redirect
}
return response_type_map[frappe.response.get('type') or response_type]()
def as_csv():
response = Response()
response.headers[b"Content-Type"] = b"text/csv; charset: utf-8"
response.headers[b"Content-Disposition"] = ("attachment; filename=\"%s.csv\"" % frappe.response['doctype'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['result']
return response
def as_raw():
response = Response()
response.headers[b"Content-Type"] = frappe.response.get("content_type") or mimetypes.guess_type(frappe.response['filename'])[0] or b"application/unknown"
response.headers[b"Content-Disposition"] = ("filename=\"%s\"" % frappe.response['filename'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['filecontent']
return response
def as_json():
make_logs()
response = Response()
if frappe.local.response.http_status_code:
response.status_code = frappe.local.response['http_status_code']
del frappe.local.response['http_status_code']
response.headers[b"Content-Type"] = b"application/json; charset: utf-8"
response.data = json.dumps(frappe.local.response, default=json_handler, separators=(',',':'))
return response
def make_logs(response = None):
"""make strings for msgprint and errprint"""
if not response:
response = frappe.local.response
if frappe.error_log:
# frappe.response['exc'] = json.dumps("\n".join([cstr(d) for d in frappe.error_log]))
response['exc'] = json.dumps([frappe.utils.cstr(d) for d in frappe.local.error_log])
if frappe.local.message_log:
response['_server_messages'] = json.dumps([frappe.utils.cstr(d) for
d in frappe.local.message_log])
if frappe.debug_log and frappe.conf.get("logging") or False:
response['_debug_messages'] = json.dumps(frappe.local.debug_log)
def json_handler(obj):
"""serialize non-serializable data for json"""
# serialize date
if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)):
return unicode(obj)
elif isinstance(obj, LocalProxy):
return unicode(obj)
elif isinstance(obj, frappe.model.document.BaseDocument):
doc = obj.as_dict(no_nulls=True)
return doc
else:
raise TypeError, """Object of type %s with value of %s is not JSON serializable""" % \
(type(obj), repr(obj))
def as_page():
"""print web page"""
return render(frappe.response['page_name'], http_status_code=frappe.response.get("http_status_code"))
def redirect():
return werkzeug.utils.redirect(frappe.response.location)
def download_backup(path):
try:
frappe.only_for(("System Manager", "Administrator"))
except frappe.PermissionError:
raise Forbidden(_("You need to be logged in and have System Manager Role to be able to access backups."))
return send_private_file(path)
def download_private_file(path):
"""Checks permissions and sends back private file"""
try:
check_file_permission(path)
except frappe.PermissionError:
raise Forbidden(_("You don't have permission to access this file"))
return send_private_file(path.split("/private", 1)[1])
def send_private_file(path):
path = os.path.join(frappe.local.conf.get('private_path', 'private'), path.strip("/"))
filename = os.path.basename(path)
if frappe.local.request.headers.get('X-Use-X-Accel-Redirect'):
path = '/protected/' + path
response = Response()
response.headers[b'X-Accel-Redirect'] = path
else:
filepath = frappe.utils.get_site_path(path)
try:
f = open(filepath, 'rb')
except IOError:
raise NotFound
response = Response(wrap_file(frappe.local.request.environ, f), direct_passthrough=True)
# no need for content disposition and force download. let browser handle its opening.
# response.headers.add(b'Content-Disposition', b'attachment', filename=filename.encode("utf-8"))
response.headers[b'Content-Type'] = mimetypes.guess_type(filename)[0] or b'application/octet-stream'
return response
def handle_session_stopped():
response = Response("""<html>
<body style="background-color: #EEE;">
<h3 style="width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto">
Updating.
We will be back in a few moments...
</h3>
</body>
</html>""")
response.status_code = 503
response.content_type = 'text/html'
return response
| mit |
megaserg/pants | tests/python/pants_test/backend/python/tasks/test_python_task.py | 17 | 4383 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import subprocess
from contextlib import contextmanager
from textwrap import dedent
from pants.backend.python.tasks.python_task import PythonTask
from pants.util.contextutil import temporary_file_path
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class PythonTaskTest(PythonTaskTestBase):
class NoopPythonTask(PythonTask):
def execute(self):
pass
@classmethod
def task_type(cls):
return cls.NoopPythonTask
def setUp(self):
super(PythonTaskTest, self).setUp()
self.requests = self.create_python_requirement_library('3rdparty/python/requests', 'requests',
requirements=['requests==2.6.0'])
self.six = self.create_python_requirement_library('3rdparty/python/six', 'six',
requirements=['six==1.9.0'])
self.library = self.create_python_library('src/python/lib', 'lib', {'lib.py': dedent("""
import six
def go():
six.print_('go', 'go', 'go!', sep='')
""")}, dependencies=['//3rdparty/python/six'])
self.binary = self.create_python_binary('src/python/bin', 'bin', 'lib.lib:go',
dependencies=['//src/python/lib'])
def rebind_targets(self):
# Creates new Target objects to ensure any cached fingerprints are reset and ready to be
# re-calculated.
self.reset_build_graph()
self.requests = self.target('3rdparty/python/requests')
self.six = self.target('3rdparty/python/six')
self.library = self.target('src/python/lib')
self.binary = self.target('src/python/bin')
@contextmanager
def cached_chroot(self):
python_task = self.create_task(self.context(target_roots=[self.binary]))
interpreter = python_task.select_interpreter_for_targets(self.binary.closure())
pex_info = self.binary.pexinfo
platforms = self.binary.platforms
chroot = python_task.cached_chroot(interpreter, pex_info, [self.binary], platforms)
with temporary_file_path() as pex:
chroot.dump()
chroot.package_pex(pex)
yield chroot, pex
def test_cached_chroot_reuse(self):
with self.cached_chroot() as (chroot1, pex1):
self.rebind_targets()
with self.cached_chroot() as (chroot2, pex2):
self.assertEqual(chroot1.path(), chroot2.path())
self.assertEqual(subprocess.check_output(pex1), subprocess.check_output(pex2))
# TODO(John Sirois): Test direct python_binary.source modification after moving
# PythonTaskTestBase to self.make_target
def test_cached_chroot_direct_dep_invalidation(self):
with self.cached_chroot() as (chroot1, pex1):
self.rebind_targets()
self.binary.inject_dependency(self.requests.address)
with self.cached_chroot() as (chroot2, pex2):
self.assertNotEqual(chroot1.path(), chroot2.path())
# Adding an unused requests dep does not change the behavior of the binary despite
# invalidating the chroot
self.assertEqual(subprocess.check_output(pex1), subprocess.check_output(pex2))
def test_cached_chroot_transitive_source_invalidation(self):
with self.cached_chroot() as (chroot1, pex1):
self.rebind_targets()
self.create_file('src/python/lib/lib.py', mode='ab',
contents=" six.print_('Mad River Glen!')")
with self.cached_chroot() as (chroot2, pex2):
self.assertNotEqual(chroot1.path(), chroot2.path())
self.assertNotEqual(subprocess.check_output(pex1), subprocess.check_output(pex2))
def test_cached_chroot_transitive_dep_invalidation(self):
with self.cached_chroot() as (chroot1, pex1):
self.rebind_targets()
self.library.inject_dependency(self.requests.address)
with self.cached_chroot() as (chroot2, pex2):
self.assertNotEqual(chroot1.path(), chroot2.path())
# Adding an unused requests dep does not change the behavior of the binary despite
# invalidating the chroot
self.assertEqual(subprocess.check_output(pex1), subprocess.check_output(pex2))
| apache-2.0 |
aspidites/django | django/contrib/gis/geos/mutable_list.py | 238 | 10705 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://static.aryehleib.com/oldsite/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from functools import total_ordering
from django.utils import six
from django.utils.six.moves import range
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
"""
_minlength = 0
_maxlength = None
# ### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in range(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = (self._get_single_internal(i)
for i in range(origLen)
if i not in indexRange)
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
# ### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n - 1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
# ### Public list interface Methods ###
# ## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i:
count += 1
return count
def index(self, val):
"Standard list index method"
for i in range(0, len(self)):
if self[i] == val:
return i
raise ValueError('%s not found in object' % str(val))
# ## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v), v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
# ### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in range(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in range(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| bsd-3-clause |
chujieyang/ice | python/test/Ice/exceptions/Client.py | 3 | 1219 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice
slice_dir = Ice.getSliceDir()
if not slice_dir:
print(sys.argv[0] + ': Slice directory not found.')
sys.exit(1)
Ice.loadSlice('"-I' + slice_dir + '" Test.ice')
import AllTests
def run(args, communicator):
thrower = AllTests.allTests(communicator)
thrower.shutdown()
return True
try:
initData = Ice.InitializationData()
initData.properties = Ice.createProperties(sys.argv)
initData.properties.setProperty("Ice.MessageSizeMax", "10")
initData.properties.setProperty("Ice.Warn.Connections", "0");
communicator = Ice.initialize(sys.argv, initData)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
| gpl-2.0 |
jmarsik/mopidy | mopidy/internal/deprecation.py | 12 | 3804 | from __future__ import unicode_literals
import contextlib
import re
import warnings
# Messages used in deprecation warnings are collected here so we can target
# them easily when ignoring warnings.
_MESSAGES = {
# Deprecated features mpd:
'mpd.protocol.playback.pause:state_arg':
'The use of pause command w/o the PAUSE argument is deprecated.',
'mpd.protocol.current_playlist.playlist':
'Do not use this, instead use playlistinfo',
# Deprecated features in audio:
'audio.emit_end_of_stream': 'audio.emit_end_of_stream() is deprecated',
# Deprecated features in core libary:
'core.library.find_exact': 'library.find_exact() is deprecated',
'core.library.lookup:uri_arg':
'library.lookup() "uri" argument is deprecated',
'core.library.search:kwargs_query':
'library.search() with "kwargs" as query is deprecated',
'core.library.search:empty_query':
'library.search() with empty "query" argument deprecated',
# Deprecated features in core playback:
'core.playback.get_mute': 'playback.get_mute() is deprecated',
'core.playback.set_mute': 'playback.set_mute() is deprecated',
'core.playback.get_volume': 'playback.get_volume() is deprecated',
'core.playback.set_volume': 'playback.set_volume() is deprecated',
'core.playback.play:tl_track_kwargs':
'playback.play() with "tl_track" argument is pending deprecation use '
'"tlid" instead',
# Deprecated features in core playlists:
'core.playlists.filter': 'playlists.filter() is deprecated',
'core.playlists.get_playlists': 'playlists.get_playlists() is deprecated',
# Deprecated features in core tracklist:
'core.tracklist.add:tracks_arg':
'tracklist.add() "tracks" argument is deprecated',
'core.tracklist.add:uri_arg':
'tracklist.add() "uri" argument is deprecated',
'core.tracklist.filter:kwargs_criteria':
'tracklist.filter() with "kwargs" as criteria is deprecated',
'core.tracklist.remove:kwargs_criteria':
'tracklist.remove() with "kwargs" as criteria is deprecated',
'core.tracklist.eot_track':
'tracklist.eot_track() is pending deprecation, use '
'tracklist.get_eot_tlid()',
'core.tracklist.next_track':
'tracklist.next_track() is pending deprecation, use '
'tracklist.get_next_tlid()',
'core.tracklist.previous_track':
'tracklist.previous_track() is pending deprecation, use '
'tracklist.get_previous_tlid()',
'models.immutable.copy':
'ImmutableObject.copy() is deprecated, use ImmutableObject.replace()',
}
def warn(msg_id, pending=False):
if pending:
category = PendingDeprecationWarning
else:
category = DeprecationWarning
warnings.warn(_MESSAGES.get(msg_id, msg_id), category)
@contextlib.contextmanager
def ignore(ids=None):
with warnings.catch_warnings():
if isinstance(ids, basestring):
ids = [ids]
if ids:
for msg_id in ids:
msg = re.escape(_MESSAGES.get(msg_id, msg_id))
warnings.filterwarnings('ignore', msg, DeprecationWarning)
else:
warnings.filterwarnings('ignore', category=DeprecationWarning)
yield
def deprecated_property(
getter=None, setter=None, message='Property is deprecated'):
# During development, this is a convenient place to add logging, emit
# warnings, or ``assert False`` to ensure you are not using any of the
# deprecated properties.
#
# Using inspect to find the call sites to emit proper warnings makes
# parallel execution of our test suite slower than serial execution. Thus,
# we don't want to add any extra overhead here by default.
return property(getter, setter)
| apache-2.0 |
cvast/arches | arches/app/utils/JSONResponse.py | 1 | 1289 | from io import StringIO
from django.http import HttpResponse
from arches.app.utils.betterJSONSerializer import JSONSerializer
class JSONResponse(HttpResponse):
def __init__(self, content=b'', *args, **kwargs):
kwargs['content_type'] = 'text/json'
ensure_ascii = kwargs.pop("ensure_ascii", True)
stream = kwargs.pop("stream", None)
indent = kwargs.pop("indent", None)
selected_fields = kwargs.pop("fields", None)
use_natural_keys = kwargs.pop("use_natural_keys", None)
geom_format = kwargs.pop("geom_format", None)
super(HttpResponse, self).__init__(*args, **kwargs)
options = {}
if ensure_ascii != None:
options['ensure_ascii'] = ensure_ascii
if stream != None:
options['stream'] = stream
if indent != None:
options['indent'] = indent
if selected_fields != None:
options['selected_fields'] = selected_fields
if use_natural_keys != None:
options['use_natural_keys'] = use_natural_keys
if geom_format != None:
options['geom_format'] = geom_format
# Content is a bytestring. See the `content` property methods.
self.content = JSONSerializer().serialize(content, **options) | agpl-3.0 |
2014c2g9/c2g9 | wsgi/static/Brython2.1.0-20140419-113919/Lib/threading.py | 730 | 45641 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
from time import sleep as _sleep
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self._count)
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count = self._count + 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = []
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
return gotit
finally:
self._acquire_restore(saved_state)
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
return
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value = self._value + 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._cond:
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += 1
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
self._cond.acquire()
try:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
finally:
self._cond.release()
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously once they have all
made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is uses as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are relased. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# For debug and leak testing
_dangling = WeakSet()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
#XXX __exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
self._started = Event()
self._stopped = False
self._block = Condition(Lock())
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
_dangling.add(self)
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_block'): # DummyThread deletes _block
self._block.__init__()
self._started._reset_internal_locks()
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
if self._stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
def _bootstrap_inner(self):
try:
self._set_ident()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
pass
except:
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print((
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):"), file=self._stderr)
print((
"Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print((
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
#XXX self.__exc_clear()
pass
finally:
with _active_limbo_lock:
self._stop()
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
self._block.acquire()
self._stopped = True
self._block.notify_all()
self._block.release()
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
self._block.acquire()
try:
if timeout is None:
while not self._stopped:
self._block.wait()
else:
deadline = _time() + timeout
while not self._stopped:
delay = deadline - _time()
if delay <= 0:
break
self._block.wait(delay)
finally:
self._block.release()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
return self._started.is_set() and not self._stopped
isAlive = is_alive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _exitfunc(self):
self._stop()
t = _pickSomeNonDaemonThread()
while t:
t.join()
t = _pickSomeNonDaemonThread()
self._delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
# Thread._block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._block
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _enumerate():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
| gpl-2.0 |
tashaxe/Red-DiscordBot | lib/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-3.0 |
nirmeshk/oh-mainline | vendor/packages/celery/celery/tests/test_bin/test_celerybeat.py | 32 | 5444 | from __future__ import absolute_import
from __future__ import with_statement
import logging
import sys
from collections import defaultdict
from kombu.tests.utils import redirect_stdouts
from celery import beat
from celery import platforms
from celery.app import app_or_default
from celery.bin import celerybeat as celerybeat_bin
from celery.apps import beat as beatapp
from celery.tests.utils import AppCase
class MockedShelveModule(object):
shelves = defaultdict(lambda: {})
def open(self, filename, *args, **kwargs):
return self.shelves[filename]
mocked_shelve = MockedShelveModule()
class MockService(beat.Service):
started = False
in_sync = False
persistence = mocked_shelve
def start(self):
self.__class__.started = True
def sync(self):
self.__class__.in_sync = True
class MockBeat(beatapp.Beat):
running = False
def run(self):
self.__class__.running = True
class MockBeat2(beatapp.Beat):
Service = MockService
def install_sync_handler(self, b):
pass
class MockBeat3(beatapp.Beat):
Service = MockService
def install_sync_handler(self, b):
raise TypeError("xxx")
class test_Beat(AppCase):
def test_loglevel_string(self):
b = beatapp.Beat(loglevel="DEBUG")
self.assertEqual(b.loglevel, logging.DEBUG)
b2 = beatapp.Beat(loglevel=logging.DEBUG)
self.assertEqual(b2.loglevel, logging.DEBUG)
def test_init_loader(self):
b = beatapp.Beat()
b.init_loader()
def test_process_title(self):
b = beatapp.Beat()
b.set_process_title()
def test_run(self):
b = MockBeat2()
MockService.started = False
b.run()
self.assertTrue(MockService.started)
def psig(self, fun, *args, **kwargs):
handlers = {}
class Signals(platforms.Signals):
def __setitem__(self, sig, handler):
handlers[sig] = handler
p, platforms.signals = platforms.signals, Signals()
try:
fun(*args, **kwargs)
return handlers
finally:
platforms.signals = p
def test_install_sync_handler(self):
b = beatapp.Beat()
clock = MockService()
MockService.in_sync = False
handlers = self.psig(b.install_sync_handler, clock)
with self.assertRaises(SystemExit):
handlers["SIGINT"]("SIGINT", object())
self.assertTrue(MockService.in_sync)
MockService.in_sync = False
def test_setup_logging(self):
try:
# py3k
delattr(sys.stdout, "logger")
except AttributeError:
pass
b = beatapp.Beat()
b.redirect_stdouts = False
b.setup_logging()
with self.assertRaises(AttributeError):
sys.stdout.logger
@redirect_stdouts
def test_logs_errors(self, stdout, stderr):
class MockLogger(object):
_critical = []
def debug(self, *args, **kwargs):
pass
def critical(self, msg, *args, **kwargs):
self._critical.append(msg)
logger = MockLogger()
b = MockBeat3(socket_timeout=None)
b.start_scheduler(logger)
self.assertTrue(logger._critical)
@redirect_stdouts
def test_use_pidfile(self, stdout, stderr):
from celery import platforms
class create_pidlock(object):
instance = [None]
def __init__(self, file):
self.file = file
self.instance[0] = self
def acquire(self):
self.acquired = True
class Object(object):
def release(self):
pass
return Object()
prev, platforms.create_pidlock = platforms.create_pidlock, \
create_pidlock
try:
b = MockBeat2(pidfile="pidfilelockfilepid", socket_timeout=None)
b.start_scheduler()
self.assertTrue(create_pidlock.instance[0].acquired)
finally:
platforms.create_pidlock = prev
class MockDaemonContext(object):
opened = False
closed = False
def __init__(self, *args, **kwargs):
pass
def open(self):
self.__class__.opened = True
return self
__enter__ = open
def close(self, *args):
self.__class__.closed = True
__exit__ = close
class test_div(AppCase):
def setup(self):
self.prev, beatapp.Beat = beatapp.Beat, MockBeat
self.ctx, celerybeat_bin.detached = \
celerybeat_bin.detached, MockDaemonContext
def teardown(self):
beatapp.Beat = self.prev
def test_main(self):
sys.argv = [sys.argv[0], "-s", "foo"]
try:
celerybeat_bin.main()
self.assertTrue(MockBeat.running)
finally:
MockBeat.running = False
def test_detach(self):
cmd = celerybeat_bin.BeatCommand()
cmd.app = app_or_default()
cmd.run(detach=True)
self.assertTrue(MockDaemonContext.opened)
self.assertTrue(MockDaemonContext.closed)
def test_parse_options(self):
cmd = celerybeat_bin.BeatCommand()
cmd.app = app_or_default()
options, args = cmd.parse_options("celerybeat", ["-s", "foo"])
self.assertEqual(options.schedule, "foo")
| agpl-3.0 |
FelixZYY/gyp | test/ios/gyptest-archs.py | 136 | 1786 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that device and simulator bundles are built correctly.
"""
import TestGyp
import TestMac
import collections
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
test_cases = [
('Default', 'TestArch32Bits', ['i386']),
('Default-iphoneos', 'TestArch32Bits', ['armv7']),
]
if TestMac.Xcode.Version() < '0510':
test_cases.extend([
('Default', 'TestNoArchs', ['i386']),
('Default-iphoneos', 'TestNoArchs', ['armv7'])])
if TestMac.Xcode.Version() >= '0500':
test_cases.extend([
('Default', 'TestArch64Bits', ['x86_64']),
('Default', 'TestMultiArchs', ['i386', 'x86_64']),
('Default-iphoneos', 'TestArch64Bits', ['arm64']),
('Default-iphoneos', 'TestMultiArchs', ['armv7', 'arm64'])])
test.run_gyp('test-archs.gyp', chdir='app-bundle')
for configuration, target, archs in test_cases:
is_device_build = configuration.endswith('-iphoneos')
kwds = collections.defaultdict(list)
if test.format == 'xcode':
if is_device_build:
configuration, sdk = configuration.split('-')
kwds['arguments'].extend(['-sdk', sdk])
if TestMac.Xcode.Version() < '0500':
kwds['arguments'].extend(['-arch', archs[0]])
test.set_configuration(configuration)
filename = '%s.bundle/%s' % (target, target)
test.build('test-archs.gyp', target, chdir='app-bundle', **kwds)
result_file = test.built_file_path(filename, chdir='app-bundle')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, archs)
test.pass_test()
| bsd-3-clause |
mapado/haversine | tests/test_haversine.py | 1 | 1610 | from haversine import haversine, haversine_vector, Unit
from numpy.testing import assert_allclose
LYON = (45.7597, 4.8422)
PARIS = (48.8567, 2.3508)
LONDON = (51.509865, -0.118092)
NEW_YORK = (40.7033962, -74.2351462)
EXPECTED_LYON_PARIS = {Unit.KILOMETERS: 392.2172595594006,
Unit.METERS: 392217.2595594006,
Unit.MILES: 243.71250609539814,
Unit.NAUTICAL_MILES: 211.78037755311516,
Unit.FEET: 1286802.0326751503,
Unit.INCHES: 15441624.392102592}
def haversine_test_factory(unit):
def test():
expected = EXPECTED_LYON_PARIS[unit]
assert haversine(LYON, PARIS, unit=unit) == expected
assert isinstance(unit.value, str)
assert haversine(LYON, PARIS, unit=unit.value) == expected
return test
test_kilometers = haversine_test_factory(Unit.KILOMETERS)
test_meters = haversine_test_factory(Unit.METERS)
test_miles = haversine_test_factory(Unit.MILES)
test_nautical_miles = haversine_test_factory(Unit.NAUTICAL_MILES)
test_feet = haversine_test_factory(Unit.FEET)
test_inches = haversine_test_factory(Unit.INCHES)
def test_units_enum():
from haversine.haversine import _CONVERSIONS
assert all(unit in _CONVERSIONS for unit in Unit)
def test_haversine_vector_comb():
expected = [[ 392.21725956, 343.37455271], [6163.43638211, 5586.48447423]]
assert_allclose( # See https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_allclose.html#numpy.testing.assert_allclose
haversine_vector([LYON, LONDON], [PARIS, NEW_YORK], Unit.KILOMETERS, comb=True),
expected
) | mit |
croxis/SpaceDrive | spacedrive/renderpipeline/rplibs/colorama/initialise.py | 7 | 1999 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import contextlib
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = None
orig_stderr = None
wrapped_stdout = None
wrapped_stderr = None
atexit_done = False
def reset_all():
if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
global orig_stdout, orig_stderr
orig_stdout = sys.stdout
orig_stderr = sys.stderr
if sys.stdout is None:
wrapped_stdout = None
else:
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
if sys.stderr is None:
wrapped_stderr = None
else:
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
if orig_stdout is not None:
sys.stdout = orig_stdout
if orig_stderr is not None:
sys.stderr = orig_stderr
@contextlib.contextmanager
def colorama_text(*args, **kwargs):
init(*args, **kwargs)
try:
yield
finally:
deinit()
def reinit():
if wrapped_stdout is not None:
sys.stdout = wrapped_stdout
if wrapped_stderr is not None:
sys.stderr = wrapped_stderr
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
| mit |
276361270/sqlalchemy | examples/adjacency_list/adjacency_list.py | 32 | 3538 | from sqlalchemy import Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.orm import Session, relationship, backref,\
joinedload_all
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.collections import attribute_mapped_collection
Base = declarative_base()
class TreeNode(Base):
__tablename__ = 'tree'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey(id))
name = Column(String(50), nullable=False)
children = relationship("TreeNode",
# cascade deletions
cascade="all, delete-orphan",
# many to one + adjacency list - remote_side
# is required to reference the 'remote'
# column in the join condition.
backref=backref("parent", remote_side=id),
# children will be represented as a dictionary
# on the "name" attribute.
collection_class=attribute_mapped_collection('name'),
)
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
def __repr__(self):
return "TreeNode(name=%r, id=%r, parent_id=%r)" % (
self.name,
self.id,
self.parent_id
)
def dump(self, _indent=0):
return " " * _indent + repr(self) + \
"\n" + \
"".join([
c.dump(_indent + 1)
for c in self.children.values()]
)
if __name__ == '__main__':
engine = create_engine('sqlite://', echo=True)
def msg(msg, *args):
msg = msg % args
print("\n\n\n" + "-" * len(msg.split("\n")[0]))
print(msg)
print("-" * len(msg.split("\n")[0]))
msg("Creating Tree Table:")
Base.metadata.create_all(engine)
session = Session(engine)
node = TreeNode('rootnode')
TreeNode('node1', parent=node)
TreeNode('node3', parent=node)
node2 = TreeNode('node2')
TreeNode('subnode1', parent=node2)
node.children['node2'] = node2
TreeNode('subnode2', parent=node.children['node2'])
msg("Created new tree structure:\n%s", node.dump())
msg("flush + commit:")
session.add(node)
session.commit()
msg("Tree After Save:\n %s", node.dump())
TreeNode('node4', parent=node)
TreeNode('subnode3', parent=node.children['node4'])
TreeNode('subnode4', parent=node.children['node4'])
TreeNode('subsubnode1', parent=node.children['node4'].children['subnode3'])
# remove node1 from the parent, which will trigger a delete
# via the delete-orphan cascade.
del node.children['node1']
msg("Removed node1. flush + commit:")
session.commit()
msg("Tree after save:\n %s", node.dump())
msg("Emptying out the session entirely, "
"selecting tree on root, using eager loading to join four levels deep.")
session.expunge_all()
node = session.query(TreeNode).\
options(joinedload_all("children", "children",
"children", "children")).\
filter(TreeNode.name == "rootnode").\
first()
msg("Full Tree:\n%s", node.dump())
msg("Marking root node as deleted, flush + commit:")
session.delete(node)
session.commit()
| mit |
dsquareindia/scikit-learn | sklearn/svm/base.py | 19 | 34587 | from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin
from ..preprocessing import LabelEncoder
from ..utils.multiclass import _ovr_decision_function
from ..utils import check_array, check_consistent_length, check_random_state
from ..utils import column_or_1d, check_X_y
from ..utils import compute_class_weight
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..exceptions import ConvergenceWarning
from ..exceptions import NotFittedError
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, impl))
if gamma == 0:
msg = ("The gamma value of 0.0 is invalid. Use 'auto' to set"
" gamma to a value of 1 / n_features.")
raise ValueError(msg)
self._impl = impl
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
return self.kernel == "precomputed"
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X, y = check_X_y(X, y, dtype=np.float64, order='C', accept_sparse='csr')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
if self.gamma == 'auto':
self._gamma = 1.0 / X.shape[1]
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return column_or_1d(y, warn=True).astype(np.float64)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
if six.PY2:
# In python2 ensure kernel is ascii bytes to prevent a TypeError
if isinstance(kernel, six.types.UnicodeType):
kernel = str(kernel)
if six.PY3:
# In python3 ensure kernel is utf8 unicode to prevent a TypeError
if isinstance(kernel, bytes):
kernel = str(kernel, 'utf8')
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise AttributeError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(six.with_metaclass(ABCMeta, BaseLibSVM, ClassifierMixin)):
"""ABC for LibSVM-based classifiers."""
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0, tol, C, nu,
shrinking, probability, cache_size, class_weight, verbose,
max_iter, decision_function_shape, random_state):
self.decision_function_shape = decision_function_shape
super(BaseSVC, self).__init__(
impl=impl, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
random_state=random_state)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
check_classification_targets(y)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
% len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes)
"""
dec = self._decision_function(X)
if self.decision_function_shape == 'ovr' and len(self.classes_) > 2:
return _ovr_decision_function(dec < 0, -dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError("predict_proba is not available when "
" probability=False")
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError("predict_proba is not available when fitted "
"with probability=False")
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss function,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
_solver_pen = _solver_type_dict.get(loss, None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
_solver_dual = _solver_pen.get(penalty, None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s' "
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1,
sample_weight=None):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'balanced'}, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
sample_weight : array-like, optional
Weights assigned to each sample.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficient vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
if loss not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float64)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
if sample_weight is None:
sample_weight = np.ones(X.shape[0])
else:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(sample_weight, X)
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon, sample_weight)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter and verbose > 0:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
| bsd-3-clause |
AZtheAsian/zulip | confirmation/models.py | 13 | 5970 | # -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $'
import re
from django.db import models
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.conf import settings
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from confirmation.util import get_status_field
from zerver.lib.utils import generate_random_token
from zerver.models import PreregistrationUser
from typing import Optional, Union, Any, Text
try:
import mailer
send_mail = mailer.send_mail
except ImportError:
# no mailer app present, stick with default
pass
B16_RE = re.compile('^[a-f0-9]{40}$')
def check_key_is_valid(creation_key):
# type: (Text) -> bool
if not RealmCreationKey.objects.filter(creation_key=creation_key).exists():
return False
days_sofar = (now() - RealmCreationKey.objects.get(creation_key=creation_key).date_created).days
# Realm creation link expires after settings.REALM_CREATION_LINK_VALIDITY_DAYS
if days_sofar <= settings.REALM_CREATION_LINK_VALIDITY_DAYS:
return True
return False
def generate_key():
# type: () -> Text
return generate_random_token(40)
def generate_activation_url(key, host=None):
# type: (Text, Optional[str]) -> Text
if host is None:
host = settings.EXTERNAL_HOST
return u'%s%s%s' % (settings.EXTERNAL_URI_SCHEME,
host,
reverse('confirmation.views.confirm',
kwargs={'confirmation_key': key}))
def generate_realm_creation_url():
# type: () -> Text
key = generate_key()
RealmCreationKey.objects.create(creation_key=key, date_created=now())
return u'%s%s%s' % (settings.EXTERNAL_URI_SCHEME,
settings.EXTERNAL_HOST,
reverse('zerver.views.create_realm',
kwargs={'creation_key': key}))
class ConfirmationManager(models.Manager):
def confirm(self, confirmation_key):
# type: (str) -> Union[bool, PreregistrationUser]
if B16_RE.search(confirmation_key):
try:
confirmation = self.get(confirmation_key=confirmation_key)
except self.model.DoesNotExist:
return False
obj = confirmation.content_object
status_field = get_status_field(obj._meta.app_label, obj._meta.model_name)
setattr(obj, status_field, getattr(settings, 'STATUS_ACTIVE', 1))
obj.save()
return obj
return False
def get_link_for_object(self, obj, host=None):
# type: (Union[ContentType, int], Optional[str]) -> Text
key = generate_key()
self.create(content_object=obj, date_sent=now(), confirmation_key=key)
return generate_activation_url(key, host=host)
def send_confirmation(self, obj, email_address, additional_context=None,
subject_template_path=None, body_template_path=None,
host=None):
# type: (ContentType, Text, Optional[Dict[str, Any]], Optional[str], Optional[str], Optional[str]) -> Confirmation
confirmation_key = generate_key()
current_site = Site.objects.get_current()
activate_url = generate_activation_url(confirmation_key, host=host)
context = Context({
'activate_url': activate_url,
'current_site': current_site,
'confirmation_key': confirmation_key,
'target': obj,
'days': getattr(settings, 'EMAIL_CONFIRMATION_DAYS', 10),
})
if additional_context is not None:
context.update(additional_context)
if obj.realm is not None and obj.realm.is_zephyr_mirror_realm:
template_name = "mituser"
else:
template_name = obj._meta.model_name
templates = [
'confirmation/%s_confirmation_email_subject.txt' % (template_name,),
'confirmation/confirmation_email_subject.txt',
]
if subject_template_path:
template = loader.get_template(subject_template_path)
else:
template = loader.select_template(templates)
subject = template.render(context).strip().replace(u'\n', u' ') # no newlines, please
templates = [
'confirmation/%s_confirmation_email_body.txt' % (template_name,),
'confirmation/confirmation_email_body.txt',
]
if body_template_path:
template = loader.get_template(body_template_path)
else:
template = loader.select_template(templates)
body = template.render(context)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email_address])
return self.create(content_object=obj, date_sent=now(), confirmation_key=confirmation_key)
class Confirmation(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
date_sent = models.DateTimeField(_('sent'))
confirmation_key = models.CharField(_('activation key'), max_length=40)
objects = ConfirmationManager()
class Meta(object):
verbose_name = _('confirmation email')
verbose_name_plural = _('confirmation emails')
def __unicode__(self):
# type: () -> Text
return _('confirmation email for %s') % (self.content_object,)
class RealmCreationKey(models.Model):
creation_key = models.CharField(_('activation key'), max_length=40)
date_created = models.DateTimeField(_('created'), default=now)
| apache-2.0 |
xunyou/vincent | vincent/core.py | 1 | 13550 | # -*- coding: utf-8 -*-
"""
Core: The core functionality for Vincent to map to Vega grammar
"""
from __future__ import (print_function, division)
import json
from string import Template
from pkg_resources import resource_string
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
from ._compat import str_types
def initialize_notebook():
"""Initialize the IPython notebook display elements"""
try:
from IPython.core.display import display, HTML
except ImportError:
print("IPython Notebook could not be loaded.")
# Thanks to @jakevdp:
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L85
load_lib = """
function vct_load_lib(url, callback){
if(typeof d3 !== 'undefined' &&
url === 'http://d3js.org/d3.v3.min.js'){
callback()
}
var s = document.createElement('script');
s.src = url;
s.async = true;
s.onreadystatechange = s.onload = callback;
s.onerror = function(){
console.warn("failed to load library " + url);
};
document.getElementsByTagName("head")[0].appendChild(s);
};
var vincent_event = new CustomEvent(
"vincent_libs_loaded",
{bubbles: true, cancelable: true}
);
"""
lib_urls = [
"'http://d3js.org/d3.v3.min.js'",
"'http://d3js.org/d3.geo.projection.v0.min.js'",
"'http://wrobstory.github.io/d3-cloud/d3.layout.cloud.js'",
"'http://wrobstory.github.io/vega/vega.v1.3.3.js'"
]
get_lib = """vct_load_lib(%s, function(){
%s
});"""
load_js = get_lib
ipy_trigger = "window.dispatchEvent(vincent_event);"
for elem in lib_urls[:-1]:
load_js = load_js % (elem, get_lib)
load_js = load_js % (lib_urls[-1], ipy_trigger)
html = """
<script>
%s
function load_all_libs(){
console.log('Loading Vincent libs...')
%s
};
if(typeof define === "function" && define.amd){
if (window['d3'] === undefined ||
window['topojson'] === undefined){
require.config(
{paths: {
d3: 'http://d3js.org/d3.v3.min',
topojson: 'http://d3js.org/topojson.v1.min'
}
}
);
require(["d3"], function(d3){
console.log('Loading Vincent from require.js...')
window.d3 = d3;
require(["topojson"], function(topojson){
window.topojson = topojson;
load_all_libs();
});
});
} else {
load_all_libs();
};
}else{
console.log('Require.js not found, loading manually...')
load_all_libs();
};
</script>""" % (load_lib, load_js,)
return display(HTML(html))
def _assert_is_type(name, value, value_type):
"""Assert that a value must be a given type."""
if not isinstance(value, value_type):
if type(value_type) is tuple:
types = ', '.join(t.__name__ for t in value_type)
raise ValueError('{0} must be one of ({1})'.format(name, types))
else:
raise ValueError('{0} must be {1}'
.format(name, value_type.__name__))
class ValidationError(Exception):
"""Exception raised with validation fails
This exception is raised only when the ``validate`` functions of classes
that inherit from ``FieldClass`` are called. It implies that the classes
do not contain valid Vega JSON."""
pass
class KeyedList(list):
"""A list that can optionally be indexed by the ``name`` attribute of
its elements"""
def __init__(self, attr_name='name', *args, **kwargs):
self.attr_name = attr_name
list.__init__(self, *args, **kwargs)
def get_keys(self):
keys = [getattr(x, self.attr_name) for x in self]
if len(keys) != len(set(keys)):
raise ValidationError('duplicate keys found')
return keys
def __getitem__(self, key):
if isinstance(key, str_types):
keys = self.get_keys()
if key not in keys:
raise KeyError(' "{0}" is an invalid key'.format(key))
else:
return self[keys.index(key)]
else:
return list.__getitem__(self, key)
def __delitem__(self, key):
if isinstance(key, str_types):
keys = self.get_keys()
if key not in keys:
raise KeyError(' "{0}" is an invalid key'.format(key))
else:
list.__delitem__(self, keys.index(key))
else:
return list.__delitem__(self, key)
def __setitem__(self, key, value):
if isinstance(key, str_types):
if not hasattr(value, self.attr_name):
raise ValidationError(
'object must have ' + self.attr_name + ' attribute')
elif getattr(value, self.attr_name) != key:
raise ValidationError(
"key must be equal to '" + self.attr_name +
"' attribute")
keys = self.get_keys()
if key not in keys:
self.append(value)
else:
list.__setitem__(self, keys.index(key), value)
else:
list.__setitem__(self, key, value)
def grammar(grammar_type=None, grammar_name=None):
"""Decorator to define properties that map to the ``grammar``
dict. This dict is the canonical representation of the Vega grammar
within Vincent.
This decorator is intended for classes that map to some pre-defined JSON
structure, such as axes, data, marks, scales, etc. It is assumed that this
decorates functions with an instance of ``self.grammar``.
Parameters
----------
grammar_type : type or tuple of types, default None
If the argument to the decorated function is not of the given types,
then a ValueError is raised. No type checking is done if the type is
None (default).
grammar_name : string, default None
An optional name to map to the internal ``grammar`` dict. If None
(default), then the key for the dict is the name of the function
being decorated. If not None, then it will be the name specified
here. This is useful if the expected JSON field name is a Python
keyword or has an un-Pythonic name.
This should decorate a "validator" function that should return no value
but raise an exception if the provided value is not valid Vega grammar. If
the validator throws no exception, then the value is assigned to the
``grammar`` dict.
The validator function should take only one argument - the value to be
validated - so that no ``self`` argument is included; the validator
should not modify the class.
If no arguments are given, then no type-checking is done the property
will be mapped to a field with the name of the decorated function.
The doc string for the property is taken from the validator functions's
doc string.
"""
def grammar_creator(validator, name):
def setter(self, value):
if isinstance(grammar_type, (type, tuple)):
_assert_is_type(validator.__name__, value, grammar_type)
validator(value)
self.grammar[name] = value
def getter(self):
return self.grammar.get(name, None)
def deleter(self):
if name in self.grammar:
del self.grammar[name]
return property(getter, setter, deleter, validator.__doc__)
if isinstance(grammar_type, (type, tuple)):
# If grammar_type is a type, return another decorator.
def grammar_dec(validator):
# Make sure to use the grammar name if it's there.
if grammar_name:
return grammar_creator(validator, grammar_name)
else:
return grammar_creator(validator, validator.__name__)
return grammar_dec
elif isinstance(grammar_name, str_types):
# If grammar_name is a string, use that name and return another
# decorator.
def grammar_dec(validator):
return grammar_creator(validator, grammar_name)
return grammar_dec
else:
# Otherwise we assume that grammar_type is actually the function being
# decorated.
return grammar_creator(grammar_type, grammar_type.__name__)
class GrammarDict(dict):
"""The Vega Grammar. When called, obj.grammar returns a Python data
structure for the Vega Grammar. When printed, obj.grammar returns a
string representation."""
def __init__(self, *args, **kwargs):
"""Standard Dict init"""
dict.__init__(self, *args, **kwargs)
def encoder(self, obj):
"""Encode grammar objects for each level of hierarchy"""
if hasattr(obj, 'grammar'):
return obj.grammar
def __call__(self):
"""When called, return the Vega grammar as a Python data structure."""
return json.loads(json.dumps(self, default=self.encoder))
def __str__(self):
"""String representation of Vega Grammar"""
return json.dumps(self, default=self.encoder)
class GrammarClass(object):
"""Base class for objects that rely on an internal ``grammar`` dict. This
dict contains the complete Vega grammar.
This should be used as a superclass for classes that map to some JSON
structure. The JSON content is stored in an internal dict named
``grammar``.
"""
def __init__(self, **kwargs):
"""Initialize a GrammarClass
**kwargs are attribute-value pairs that are set on initialization.
These will generally be keys for the ``grammar`` dict. If the
attribute does not already exist as a property, then a
``ValueError`` is raised.
"""
self.grammar = GrammarDict()
for attr, value in sorted(kwargs.items()):
if hasattr(self, attr):
setattr(self, attr, value)
else:
raise ValueError('unknown keyword argument ' + attr)
def validate(self):
"""Validate the contents of the object.
This calls ``setattr`` for each of the class's grammar properties. It
will catch ``ValueError``s raised by the grammar property's setters
and re-raise them as :class:`ValidationError`.
"""
for key, val in self.grammar.items():
try:
setattr(self, key, val)
except ValueError as e:
raise ValidationError('invalid contents: ' + e.args[0])
def to_json(self, path=None, html_out=False,
html_path='vega_template.html', validate=False,
pretty_print=True):
"""Convert object to JSON
Parameters
----------
path: string, default None
Path to write JSON out. If there is no path provided, JSON
will be returned as a string to the console.
html_out: boolean, default False
If True, vincent will output an simple HTML scaffold to
visualize the vega json output.
html_path: string, default 'vega_template.html'
Path for the html file (if html_out=True)
validate : boolean
If True, call the object's `validate` method before
serializing. Default is False.
pretty_print : boolean
If True (default), JSON is printed in more-readable form with
indentation and spaces.
Returns
-------
string
JSON serialization of the class's grammar properties.
"""
if validate:
self.validate()
if pretty_print:
dumps_args = {'indent': 2, 'separators': (',', ': ')}
else:
dumps_args = {}
def encoder(obj):
if hasattr(obj, 'grammar'):
return obj.grammar
if html_out:
template = Template(
str(resource_string('vincent', 'vega_template.html')))
with open(html_path, 'w') as f:
f.write(template.substitute(path=path))
if path:
with open(path, 'w') as f:
json.dump(self.grammar, f, default=encoder, sort_keys=True,
**dumps_args)
else:
return json.dumps(self.grammar, default=encoder, sort_keys=True,
**dumps_args)
def from_json(self):
"""Load object from JSON
Not yet implemented.
"""
raise NotImplementedError()
class LoadError(Exception):
"""Exception for errors on loading data from third-party objects"""
pass
| mit |
uvacorpnet/transparent_nederlands | extract-organizations.py | 1 | 5108 | # Code to extract organization names from the 'waarde' field of Parlement.com position data
# @author Frank Takes - takes@uva.nl
# @dependencies: Python 2, Pandas
# @run code using: python details.csv
# after running once iconv -f utf8 -t ascii//TRANSLIT originalfile > details.csv replace all x with umlaut/accent/etc by plain x (with x in klinkers)
import sys
import pandas as pd
import string
from unidecode import unidecode
# some custom "dictionaries"
positions = [
' lid', '^lid', 'vicevoorzitter', 'vice voorzitter', 'vicepresident', 'plaatsvervangend voorzitter', 'algemeen voorzitter', 'voorzitter', 'columnist', 'permanent vertegenwoordiger', 'secretaris', 'bedrijfs economisch medewerker', 'wetenschappelijk medewerker', 'medewerker', 'medewerkster', 'penningmeester',
'vertegenwoordiger', 'medewerker', 'concern directeur', 'directeur', 'senior adviseur', 'organisatie adviseur', 'intern adviseur', 'adviseur', 'eindredacteur', 'gastdocent', 'fellow', 'manager', 'officier'
]
bodies = [
'dagelijks bestuur', 'raad van bestuur', 'algemeen bestuur', 'bestuur', 'raad van advies', 'raad van toezicht', 'raad van commissarissen', 'curatorium', 'regiegroep', 'comite van aanbeveling'
]
parties = [
'vvd', 'cda', 'pvda', 'd66', 'christenunie', 'groenlinks', 'lpf',
'jovd', 'jonge socialisten', 'cdja', 'volkspartij', 'kvp', 'arp',
'politiek leider',
'provinciale staten', 'eerste kamer', 'tweede kamer', 'parlement', #'gemeenteraad'
'partijbestuur', 'minister', 'formateur', 'informateur', 'raad van state', 'staatssecretaris',
'ambteloos', 'tijdelijk vervangen'
]
# read the data
df = pd.read_csv(str(sys.argv[1]),sep=",")
df2 = df
# print some stats before and after filtering non "neven" positions
print (df.shape)
df = df[df.rubriek >= 3500]
print (df.shape)
# capitalize some stuff that should be capitalized for proper filtering later
df['waarde'] = df['waarde'].str.replace('gemeente', 'Gemeente', case=True)
df['waarde'] = df['waarde'].str.replace('waarnemend burgemeester', 'Waarnemend Burgemeester', case=True)
df['waarde'] = df['waarde'].str.replace('burgemeester', 'Burgemeester', case=True)
# create a lower case version of the "waarde" column
df['waardelower'] = df['waarde'].str.lower()
# remove forbidded strings related to party/political positions
forbiddenstrings = parties
forbiddenregex = '|'.join(forbiddenstrings)
df = df[df['waardelower'].str.match('(.*(' + forbiddenregex + ').*)').str.len() == 0]
# some stats after filtering out political positions
print ("\n")
print (df.shape)
# filter the type of position. order seems important, "plaatsvervangend voorzitter" will not be filtered if it is preceded by "voorzitter"
typestring = positions + bodies
typeregex = '|'.join(typestring)
df['organisatie'] = df['waarde'].str.replace('(\S*(' + typeregex + ')\S*)', '', case=False)
# organization starts at first occurence of uppercase
df['organisatie'] = df['organisatie'].str.lstrip(string.lowercase + ' ,().')
df['organisatie'] = df['organisatie'].str.replace('(\(|\)|\'|\"|\.|\-|\/)', ' ')
df['waarde'] = df['waarde'].str.replace('(\(|\)|\'|\"|\.|\-|\/)', ' ')
df['waarde'] = df['waarde'].str.replace(' ', ' ')
df['organisatie'] = df['organisatie'].str.replace(' ', ' ')
# remove everything after the comma
def delete_after_comma(x):
ind = x.find(",")
if ind > 0:
return x[:x.find(",")]
else: return x
df['organisatie'] = df['organisatie'].str.strip()
# type is whatever remains after removing the previously extracted organization
df['positionbody'] = df.apply(lambda x: (x['waarde'].replace(x['organisatie'],"")).lower(), axis=1)
df["organisatie"] = df["organisatie"].apply(lambda x: delete_after_comma(x))
df["positionbody"] = df["positionbody"].apply(lambda x: delete_after_comma(x))
df['positionbody'] = df['positionbody'].str.replace('(\(|\)|\'|\"|\,|\.|\-|\/)', ' ')
df['positionbody'] = df['positionbody'].str.strip()
df['positionbody'] = df['positionbody'].str.replace(' ', ' ')
# filter the body by excluding the position
positionstring = positions
positionregex = '|'.join(positionstring)
df['body'] = df['positionbody'].str.replace('(^\S*(' + positionregex + ')\S*)', '', case=False)
# filter the position by excluding the body from the positionbody
df['position'] = df.apply(lambda x: (x['positionbody'].replace(x['body'],"")).lower(), axis=1)
# clean it all
df['body'] = df['body'].str.strip()
df['body'] = df['body'].str.replace(' ', ' ')
df['position'] = df['position'].str.strip()
df['position'] = df['position'].str.replace(' ', ' ')
# print some stats
print (df['positionbody'].value_counts()[:40])
print ("\n")
print (df['position'].value_counts()[:40])
print ("\n")
print (df['body'].value_counts()[:40])
print ("\n")
print (df['organisatie'].value_counts()[:40])
print ("\n")
# merge with original dataset again (from which we removed < 3500 and party/political positions)
df = df.combine_first(df2)
# output to csv
df.to_csv('detailsfiltered.csv', sep=',', columns=['b1-nummer', 'rubriek', 'position', 'body', 'positionbody', 'organisatie', 'waarde', 'datum', 'toelichting'], index=False)
| gpl-3.0 |
vipmike007/virt-test | virttest/arch.py | 17 | 2138 | import platform
from virttest import utils_misc
ARCH = platform.machine()
if ARCH in ('ppc64', 'ppc64le'):
# From include/linux/sockios.h
SIOCSIFHWADDR = 0x8924
SIOCGIFHWADDR = 0x8927
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFNETMASK = 0x891B
SIOCSIFNETMASK = 0x891C
SIOCGIFINDEX = 0x8933
SIOCBRADDIF = 0x89a2
SIOCBRDELIF = 0x89a3
SIOCBRADDBR = 0x89a0
SIOCBRDELBR = 0x89a1
# From linux/include/linux/if_tun.h
TUNSETIFF = 0x800454ca
TUNGETIFF = 0x400454d2
TUNGETFEATURES = 0x400454cf
TUNSETQUEUE = 0x800454d9
IFF_MULTI_QUEUE = 0x0100
IFF_TAP = 0x2
IFF_NO_PI = 0x1000
IFF_VNET_HDR = 0x4000
# From linux/include/linux/if.h
IFF_UP = 0x1
# From linux/netlink.h
NETLINK_ROUTE = 0
NLM_F_REQUEST = 1
NLM_F_ACK = 4
RTM_DELLINK = 17
NLMSG_ERROR = 2
# From linux/socket.h
AF_PACKET = 17
else:
# From include/linux/sockios.h
SIOCSIFHWADDR = 0x8924
SIOCGIFHWADDR = 0x8927
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFNETMASK = 0x891B
SIOCSIFNETMASK = 0x891C
SIOCGIFINDEX = 0x8933
SIOCBRADDIF = 0x89a2
SIOCBRDELIF = 0x89a3
SIOCBRADDBR = 0x89a0
SIOCBRDELBR = 0x89a1
# From linux/include/linux/if_tun.h
TUNSETIFF = 0x400454ca
TUNGETIFF = 0x800454d2
TUNGETFEATURES = 0x800454cf
TUNSETQUEUE = 0x400454d9
IFF_MULTI_QUEUE = 0x0100
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_VNET_HDR = 0x4000
# From linux/include/linux/if.h
IFF_UP = 0x1
# From linux/netlink.h
NETLINK_ROUTE = 0
NLM_F_REQUEST = 1
NLM_F_ACK = 4
RTM_DELLINK = 17
NLMSG_ERROR = 2
# From linux/socket.h
AF_PACKET = 17
def get_kvm_module_list():
if ARCH == 'x86_64':
arch_convert = {'GenuineIntel': 'intel', 'AuthenticAMD': 'amd'}
host_cpu_type = utils_misc.get_cpu_vendor(verbose=False)
return ["kvm", "kvm-%s" % arch_convert[host_cpu_type]]
elif ARCH in ('ppc64', 'ppc64le'):
return ["kvm"]
| gpl-2.0 |
ahmetabdi/SickRage | lib/unidecode/x07f.py | 252 | 4664 | data = (
'Zhui ', # 0x00
'Zi ', # 0x01
'Ke ', # 0x02
'Xiang ', # 0x03
'Jian ', # 0x04
'Mian ', # 0x05
'Lan ', # 0x06
'Ti ', # 0x07
'Miao ', # 0x08
'Qi ', # 0x09
'Yun ', # 0x0a
'Hui ', # 0x0b
'Si ', # 0x0c
'Duo ', # 0x0d
'Duan ', # 0x0e
'Bian ', # 0x0f
'Xian ', # 0x10
'Gou ', # 0x11
'Zhui ', # 0x12
'Huan ', # 0x13
'Di ', # 0x14
'Lu ', # 0x15
'Bian ', # 0x16
'Min ', # 0x17
'Yuan ', # 0x18
'Jin ', # 0x19
'Fu ', # 0x1a
'Ru ', # 0x1b
'Zhen ', # 0x1c
'Feng ', # 0x1d
'Shuai ', # 0x1e
'Gao ', # 0x1f
'Chan ', # 0x20
'Li ', # 0x21
'Yi ', # 0x22
'Jian ', # 0x23
'Bin ', # 0x24
'Piao ', # 0x25
'Man ', # 0x26
'Lei ', # 0x27
'Ying ', # 0x28
'Suo ', # 0x29
'Mou ', # 0x2a
'Sao ', # 0x2b
'Xie ', # 0x2c
'Liao ', # 0x2d
'Shan ', # 0x2e
'Zeng ', # 0x2f
'Jiang ', # 0x30
'Qian ', # 0x31
'Zao ', # 0x32
'Huan ', # 0x33
'Jiao ', # 0x34
'Zuan ', # 0x35
'Fou ', # 0x36
'Xie ', # 0x37
'Gang ', # 0x38
'Fou ', # 0x39
'Que ', # 0x3a
'Fou ', # 0x3b
'Kaakeru ', # 0x3c
'Bo ', # 0x3d
'Ping ', # 0x3e
'Hou ', # 0x3f
'[?] ', # 0x40
'Gang ', # 0x41
'Ying ', # 0x42
'Ying ', # 0x43
'Qing ', # 0x44
'Xia ', # 0x45
'Guan ', # 0x46
'Zun ', # 0x47
'Tan ', # 0x48
'Chang ', # 0x49
'Qi ', # 0x4a
'Weng ', # 0x4b
'Ying ', # 0x4c
'Lei ', # 0x4d
'Tan ', # 0x4e
'Lu ', # 0x4f
'Guan ', # 0x50
'Wang ', # 0x51
'Wang ', # 0x52
'Gang ', # 0x53
'Wang ', # 0x54
'Han ', # 0x55
'[?] ', # 0x56
'Luo ', # 0x57
'Fu ', # 0x58
'Mi ', # 0x59
'Fa ', # 0x5a
'Gu ', # 0x5b
'Zhu ', # 0x5c
'Ju ', # 0x5d
'Mao ', # 0x5e
'Gu ', # 0x5f
'Min ', # 0x60
'Gang ', # 0x61
'Ba ', # 0x62
'Gua ', # 0x63
'Ti ', # 0x64
'Juan ', # 0x65
'Fu ', # 0x66
'Lin ', # 0x67
'Yan ', # 0x68
'Zhao ', # 0x69
'Zui ', # 0x6a
'Gua ', # 0x6b
'Zhuo ', # 0x6c
'Yu ', # 0x6d
'Zhi ', # 0x6e
'An ', # 0x6f
'Fa ', # 0x70
'Nan ', # 0x71
'Shu ', # 0x72
'Si ', # 0x73
'Pi ', # 0x74
'Ma ', # 0x75
'Liu ', # 0x76
'Ba ', # 0x77
'Fa ', # 0x78
'Li ', # 0x79
'Chao ', # 0x7a
'Wei ', # 0x7b
'Bi ', # 0x7c
'Ji ', # 0x7d
'Zeng ', # 0x7e
'Tong ', # 0x7f
'Liu ', # 0x80
'Ji ', # 0x81
'Juan ', # 0x82
'Mi ', # 0x83
'Zhao ', # 0x84
'Luo ', # 0x85
'Pi ', # 0x86
'Ji ', # 0x87
'Ji ', # 0x88
'Luan ', # 0x89
'Yang ', # 0x8a
'Mie ', # 0x8b
'Qiang ', # 0x8c
'Ta ', # 0x8d
'Mei ', # 0x8e
'Yang ', # 0x8f
'You ', # 0x90
'You ', # 0x91
'Fen ', # 0x92
'Ba ', # 0x93
'Gao ', # 0x94
'Yang ', # 0x95
'Gu ', # 0x96
'Qiang ', # 0x97
'Zang ', # 0x98
'Gao ', # 0x99
'Ling ', # 0x9a
'Yi ', # 0x9b
'Zhu ', # 0x9c
'Di ', # 0x9d
'Xiu ', # 0x9e
'Qian ', # 0x9f
'Yi ', # 0xa0
'Xian ', # 0xa1
'Rong ', # 0xa2
'Qun ', # 0xa3
'Qun ', # 0xa4
'Qian ', # 0xa5
'Huan ', # 0xa6
'Zui ', # 0xa7
'Xian ', # 0xa8
'Yi ', # 0xa9
'Yashinau ', # 0xaa
'Qiang ', # 0xab
'Xian ', # 0xac
'Yu ', # 0xad
'Geng ', # 0xae
'Jie ', # 0xaf
'Tang ', # 0xb0
'Yuan ', # 0xb1
'Xi ', # 0xb2
'Fan ', # 0xb3
'Shan ', # 0xb4
'Fen ', # 0xb5
'Shan ', # 0xb6
'Lian ', # 0xb7
'Lei ', # 0xb8
'Geng ', # 0xb9
'Nou ', # 0xba
'Qiang ', # 0xbb
'Chan ', # 0xbc
'Yu ', # 0xbd
'Gong ', # 0xbe
'Yi ', # 0xbf
'Chong ', # 0xc0
'Weng ', # 0xc1
'Fen ', # 0xc2
'Hong ', # 0xc3
'Chi ', # 0xc4
'Chi ', # 0xc5
'Cui ', # 0xc6
'Fu ', # 0xc7
'Xia ', # 0xc8
'Pen ', # 0xc9
'Yi ', # 0xca
'La ', # 0xcb
'Yi ', # 0xcc
'Pi ', # 0xcd
'Ling ', # 0xce
'Liu ', # 0xcf
'Zhi ', # 0xd0
'Qu ', # 0xd1
'Xi ', # 0xd2
'Xie ', # 0xd3
'Xiang ', # 0xd4
'Xi ', # 0xd5
'Xi ', # 0xd6
'Qi ', # 0xd7
'Qiao ', # 0xd8
'Hui ', # 0xd9
'Hui ', # 0xda
'Xiao ', # 0xdb
'Se ', # 0xdc
'Hong ', # 0xdd
'Jiang ', # 0xde
'Di ', # 0xdf
'Cui ', # 0xe0
'Fei ', # 0xe1
'Tao ', # 0xe2
'Sha ', # 0xe3
'Chi ', # 0xe4
'Zhu ', # 0xe5
'Jian ', # 0xe6
'Xuan ', # 0xe7
'Shi ', # 0xe8
'Pian ', # 0xe9
'Zong ', # 0xea
'Wan ', # 0xeb
'Hui ', # 0xec
'Hou ', # 0xed
'He ', # 0xee
'He ', # 0xef
'Han ', # 0xf0
'Ao ', # 0xf1
'Piao ', # 0xf2
'Yi ', # 0xf3
'Lian ', # 0xf4
'Qu ', # 0xf5
'[?] ', # 0xf6
'Lin ', # 0xf7
'Pen ', # 0xf8
'Qiao ', # 0xf9
'Ao ', # 0xfa
'Fan ', # 0xfb
'Yi ', # 0xfc
'Hui ', # 0xfd
'Xuan ', # 0xfe
'Dao ', # 0xff
)
| gpl-3.0 |
hsaputra/tensorflow | tensorflow/python/ops/distributions/normal.py | 72 | 9211 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import special_math
__all__ = [
"Normal",
"NormalWithSoftplusScale",
]
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Normal distribution.
dist = tf.distributions.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tf.distributions.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tf.distributions.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]):
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _log_unnormalized_prob(self, x):
return -0.5 * math_ops.square(self._z(x))
def _log_normalization(self):
return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale * array_ops.ones_like(self.loc)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _quantile(self, p):
return self._inv_z(special_math.ndtri(p))
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
class NormalWithSoftplusScale(Normal):
"""Normal with softplus applied to `scale`."""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = locals()
with ops.name_scope(name, values=[scale]):
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.loc - n_b.loc) / (two * s_b_squared) +
half * (ratio - one - math_ops.log(ratio)))
| apache-2.0 |
blademainer/intellij-community | python/testData/MockSdk3.4/python_stubs/sys.py | 100 | 17611 | # encoding: utf-8
# module sys
# from (built-in)
# by generator 1.135
"""
This module provides access to some objects used or maintained by the
interpreter and to functions that interact strongly with the interpreter.
Dynamic objects:
argv -- command line arguments; argv[0] is the script pathname if known
path -- module search path; path[0] is the script directory, else ''
modules -- dictionary of loaded modules
displayhook -- called to show results in an interactive session
excepthook -- called to handle any uncaught exception other than SystemExit
To customize printing in an interactive session or to install a custom
top-level exception handler, assign other functions to replace these.
stdin -- standard input file object; used by input()
stdout -- standard output file object; used by print()
stderr -- standard error object; used for error messages
By assigning other file objects (or objects that behave like files)
to these, it is possible to redirect all of the interpreter's I/O.
last_type -- type of last uncaught exception
last_value -- value of last uncaught exception
last_traceback -- traceback of last uncaught exception
These three are only available in an interactive session after a
traceback has been printed.
Static objects:
builtin_module_names -- tuple of module names built into this interpreter
copyright -- copyright notice pertaining to this interpreter
exec_prefix -- prefix used to find the machine-specific Python library
executable -- absolute path of the executable binary of the Python interpreter
float_info -- a struct sequence with information about the float implementation.
float_repr_style -- string indicating the style of repr() output for floats
hash_info -- a struct sequence with information about the hash algorithm.
hexversion -- version information encoded as a single integer
implementation -- Python implementation information.
int_info -- a struct sequence with information about the int implementation.
maxsize -- the largest supported length of containers.
maxunicode -- the value of the largest Unicode codepoint
platform -- platform identifier
prefix -- prefix used to find the Python library
thread_info -- a struct sequence with information about the thread implementation.
version -- the version of this interpreter as a string
version_info -- version information as a named tuple
dllhandle -- [Windows only] integer handle of the Python DLL
winver -- [Windows only] version number of the Python DLL
__stdin__ -- the original stdin; don't touch!
__stdout__ -- the original stdout; don't touch!
__stderr__ -- the original stderr; don't touch!
__displayhook__ -- the original displayhook; don't touch!
__excepthook__ -- the original excepthook; don't touch!
Functions:
displayhook() -- print an object to the screen, and save it in builtins._
excepthook() -- print an exception and its traceback to sys.stderr
exc_info() -- return thread-safe information about the current exception
exit() -- exit the interpreter by raising SystemExit
getdlopenflags() -- returns flags to be used for dlopen() calls
getprofile() -- get the global profiling function
getrefcount() -- return the reference count for an object (plus one :-)
getrecursionlimit() -- return the max recursion depth for the interpreter
getsizeof() -- return the size of an object in bytes
gettrace() -- get the global debug tracing function
setcheckinterval() -- control how often the interpreter checks for events
setdlopenflags() -- set the flags to be used for dlopen() calls
setprofile() -- set the global profiling function
setrecursionlimit() -- set the max recursion depth for the interpreter
settrace() -- set the global debug tracing function
"""
# no imports
# Variables with simple values
api_version = 1013
base_exec_prefix = 'C:\\Python34'
base_prefix = 'C:\\Python34'
byteorder = 'little'
copyright = 'Copyright (c) 2001-2014 Python Software Foundation.\nAll Rights Reserved.\n\nCopyright (c) 2000 BeOpen.com.\nAll Rights Reserved.\n\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.\nAll Rights Reserved.\n\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.\nAll Rights Reserved.'
dllhandle = 1705771008
dont_write_bytecode = False
executable = 'C:\\Python34\\python.exe'
exec_prefix = 'C:\\Python34'
float_repr_style = 'short'
hexversion = 50594288
maxsize = 2147483647
maxunicode = 1114111
platform = 'win32'
prefix = 'C:\\Python34'
version = '3.4.1 (v3.4.1:c0e311e010fc, May 18 2014, 10:38:22) [MSC v.1600 32 bit (Intel)]'
winver = '3.4'
_home = None
__egginsert = 1
__plen = 5
# functions
def callstats(): # real signature unknown; restored from __doc__
"""
callstats() -> tuple of integers
Return a tuple of function call statistics, if CALL_PROFILE was defined
when Python was built. Otherwise, return None.
When enabled, this function returns detailed, implementation-specific
details about the number of function calls executed. The return value is
a 11-tuple where the entries in the tuple are counts of:
0. all function calls
1. calls to PyFunction_Type objects
2. PyFunction calls that do not create an argument tuple
3. PyFunction calls that do not create an argument tuple
and bypass PyEval_EvalCodeEx()
4. PyMethod calls
5. PyMethod calls on bound methods
6. PyType calls
7. PyCFunction calls
8. generator calls
9. All other calls
10. Number of stack pops performed by call_function()
"""
return ()
def call_tracing(func, args): # real signature unknown; restored from __doc__
"""
call_tracing(func, args) -> object
Call func(*args), while tracing is enabled. The tracing state is
saved, and restored afterwards. This is intended to be called from
a debugger from a checkpoint, to recursively debug some other code.
"""
return object()
def displayhook(p_object): # real signature unknown; restored from __doc__
"""
displayhook(object) -> None
Print an object to sys.stdout and also save it in builtins._
"""
pass
def excepthook(exctype, value, traceback): # real signature unknown; restored from __doc__
"""
excepthook(exctype, value, traceback) -> None
Handle an exception by displaying it with a traceback on sys.stderr.
"""
pass
def exc_info(): # real signature unknown; restored from __doc__
"""
exc_info() -> (type, value, traceback)
Return information about the most recent exception caught by an except
clause in the current stack frame or in an older stack frame.
"""
pass
def exit(status=None): # real signature unknown; restored from __doc__
"""
exit([status])
Exit the interpreter by raising SystemExit(status).
If the status is omitted or None, it defaults to zero (i.e., success).
If the status is an integer, it will be used as the system exit status.
If it is another kind of object, it will be printed and the system
exit status will be one (i.e., failure).
"""
pass
def getallocatedblocks(): # real signature unknown; restored from __doc__
"""
getallocatedblocks() -> integer
Return the number of memory blocks currently allocated, regardless of their
size.
"""
return 0
def getcheckinterval(): # real signature unknown; restored from __doc__
""" getcheckinterval() -> current check interval; see setcheckinterval(). """
pass
def getdefaultencoding(): # real signature unknown; restored from __doc__
"""
getdefaultencoding() -> string
Return the current default string encoding used by the Unicode
implementation.
"""
return ""
def getfilesystemencoding(): # real signature unknown; restored from __doc__
"""
getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames.
"""
return ""
def getprofile(): # real signature unknown; restored from __doc__
"""
getprofile()
Return the profiling function set with sys.setprofile.
See the profiler chapter in the library manual.
"""
pass
def getrecursionlimit(): # real signature unknown; restored from __doc__
"""
getrecursionlimit()
Return the current value of the recursion limit, the maximum depth
of the Python interpreter stack. This limit prevents infinite
recursion from causing an overflow of the C stack and crashing Python.
"""
pass
def getrefcount(p_object): # real signature unknown; restored from __doc__
"""
getrefcount(object) -> integer
Return the reference count of object. The count returned is generally
one higher than you might expect, because it includes the (temporary)
reference as an argument to getrefcount().
"""
return 0
def getsizeof(p_object, default): # real signature unknown; restored from __doc__
"""
getsizeof(object, default) -> int
Return the size of object in bytes.
"""
return 0
def getswitchinterval(): # real signature unknown; restored from __doc__
""" getswitchinterval() -> current thread switch interval; see setswitchinterval(). """
pass
def gettrace(): # real signature unknown; restored from __doc__
"""
gettrace()
Return the global debug tracing function set with sys.settrace.
See the debugger chapter in the library manual.
"""
pass
def getwindowsversion(): # real signature unknown; restored from __doc__
"""
getwindowsversion()
Return information about the running version of Windows as a named tuple.
The members are named: major, minor, build, platform, service_pack,
service_pack_major, service_pack_minor, suite_mask, and product_type. For
backward compatibility, only the first 5 items are available by indexing.
All elements are numbers, except service_pack which is a string. Platform
may be 0 for win32s, 1 for Windows 9x/ME, 2 for Windows NT/2000/XP/Vista/7,
3 for Windows CE. Product_type may be 1 for a workstation, 2 for a domain
controller, 3 for a server.
"""
pass
def intern(string): # real signature unknown; restored from __doc__
"""
intern(string) -> string
``Intern'' the given string. This enters the string in the (global)
table of interned strings whose purpose is to speed up dictionary lookups.
Return the string itself or the previously interned string object with the
same value.
"""
return ""
def setcheckinterval(n): # real signature unknown; restored from __doc__
"""
setcheckinterval(n)
Tell the Python interpreter to check for asynchronous events every
n instructions. This also affects how often thread switches occur.
"""
pass
def setprofile(function): # real signature unknown; restored from __doc__
"""
setprofile(function)
Set the profiling function. It will be called on each function call
and return. See the profiler chapter in the library manual.
"""
pass
def setrecursionlimit(n): # real signature unknown; restored from __doc__
"""
setrecursionlimit(n)
Set the maximum depth of the Python interpreter stack to n. This
limit prevents infinite recursion from causing an overflow of the C
stack and crashing Python. The highest possible limit is platform-
dependent.
"""
pass
def setswitchinterval(n): # real signature unknown; restored from __doc__
"""
setswitchinterval(n)
Set the ideal thread switching delay inside the Python interpreter
The actual frequency of switching threads can be lower if the
interpreter executes long sequences of uninterruptible code
(this is implementation-specific and workload-dependent).
The parameter must represent the desired switching delay in seconds
A typical value is 0.005 (5 milliseconds).
"""
pass
def settrace(function): # real signature unknown; restored from __doc__
"""
settrace(function)
Set the global debug tracing function. It will be called on each
function call. See the debugger chapter in the library manual.
"""
pass
def _clear_type_cache(): # real signature unknown; restored from __doc__
"""
_clear_type_cache() -> None
Clear the internal type lookup cache.
"""
pass
def _current_frames(): # real signature unknown; restored from __doc__
"""
_current_frames() -> dictionary
Return a dictionary mapping each current thread T's thread id to T's
current stack frame.
This function should be used for specialized purposes only.
"""
return {}
def _debugmallocstats(): # real signature unknown; restored from __doc__
"""
_debugmallocstats()
Print summary info to stderr about the state of
pymalloc's structures.
In Py_DEBUG mode, also perform some expensive internal consistency
checks.
"""
pass
def _getframe(depth=None): # real signature unknown; restored from __doc__
"""
_getframe([depth]) -> frameobject
Return a frame object from the call stack. If optional integer depth is
given, return the frame object that many calls below the top of the stack.
If that is deeper than the call stack, ValueError is raised. The default
for depth is zero, returning the frame at the top of the call stack.
This function should be used for internal and specialized
purposes only.
"""
pass
def __displayhook__(*args, **kwargs): # real signature unknown
"""
displayhook(object) -> None
Print an object to sys.stdout and also save it in builtins._
"""
pass
def __excepthook__(*args, **kwargs): # real signature unknown
"""
excepthook(exctype, value, traceback) -> None
Handle an exception by displaying it with a traceback on sys.stderr.
"""
pass
def __interactivehook__(): # reliably restored by inspect
# no doc
pass
# classes
from .object import object
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
""" Load a built-in module. """
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
argv = [] # real value of type <class 'list'> skipped
builtin_module_names = () # real value of type <class 'tuple'> skipped
flags = (
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
)
float_info = (
1.7976931348623157e+308,
1024,
308,
2.2250738585072014e-308,
-1021,
-307,
15,
53,
2.220446049250313e-16,
2,
1,
)
hash_info = (
32,
2147483647,
314159,
0,
1000003,
'siphash24',
64,
128,
0,
)
implementation = None # (!) real value is ''
int_info = (
15,
2,
)
meta_path = [
__loader__,
None, # (!) real value is ''
None, # (!) real value is ''
None, # (!) real value is ''
]
modules = {} # real value of type <class 'dict'> skipped
path = [
'C:\\work\\ultimate\\out\\classes\\production\\python-helpers',
'C:\\Python34\\lib\\site-packages\\setuptools-4.0.1-py3.4.egg',
'C:\\Windows\\system32\\python34.zip',
'C:\\Python34\\DLLs',
'C:\\Python34\\lib',
'C:\\Python34',
'C:\\Python34\\lib\\site-packages',
]
path_hooks = [
None, # (!) real value is ''
None, # (!) real value is ''
]
path_importer_cache = {} # real value of type <class 'dict'> skipped
stderr = None # (!) real value is ''
stdin = None # (!) real value is ''
stdout = None # (!) real value is ''
thread_info = (
'nt',
None,
None,
)
version_info = (
3,
4,
1,
'final',
0,
)
warnoptions = []
_mercurial = (
'CPython',
'v3.4.1',
'c0e311e010fc',
)
_xoptions = {}
__spec__ = None # (!) real value is ''
__stderr__ = stderr
__stdin__ = stdin
__stdout__ = stdout
# intermittent names
exc_value = Exception()
exc_traceback=None
| apache-2.0 |
neeasade/qutebrowser | qutebrowser/browser/webkit/network/networkmanager.py | 4 | 20527 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Our own QNetworkAccessManager."""
import os
import collections
import netrc
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, PYQT_VERSION, QCoreApplication,
QUrl, QByteArray)
from PyQt5.QtNetwork import (QNetworkAccessManager, QNetworkReply, QSslError,
QSslSocket)
from qutebrowser.config import config
from qutebrowser.utils import (message, log, usertypes, utils, objreg, qtutils,
urlutils, debug)
from qutebrowser.browser.webkit.network import qutescheme, networkreply
from qutebrowser.browser.webkit.network import filescheme
HOSTBLOCK_ERROR_STRING = '%HOSTBLOCK%'
ProxyId = collections.namedtuple('ProxyId', 'type, hostname, port')
_proxy_auth_cache = {}
def _is_secure_cipher(cipher):
"""Check if a given SSL cipher (hopefully) isn't broken yet."""
tokens = [e.upper() for e in cipher.name().split('-')]
if cipher.usedBits() < 128:
# https://codereview.qt-project.org/#/c/75943/
return False
# OpenSSL should already protect against this in a better way
elif cipher.keyExchangeMethod() == 'DH' and os.name == 'nt':
# https://weakdh.org/
return False
elif cipher.encryptionMethod().upper().startswith('RC4'):
# http://en.wikipedia.org/wiki/RC4#Security
# https://codereview.qt-project.org/#/c/148906/
return False
elif cipher.encryptionMethod().upper().startswith('DES'):
# http://en.wikipedia.org/wiki/Data_Encryption_Standard#Security_and_cryptanalysis
return False
elif 'MD5' in tokens:
# http://www.win.tue.nl/hashclash/rogue-ca/
return False
# OpenSSL should already protect against this in a better way
# elif (('CBC3' in tokens or 'CBC' in tokens) and (cipher.protocol() not in
# [QSsl.TlsV1_0, QSsl.TlsV1_1, QSsl.TlsV1_2])):
# # http://en.wikipedia.org/wiki/POODLE
# return False
### These things should never happen as those are already filtered out by
### either the SSL libraries or Qt - but let's be sure.
elif cipher.authenticationMethod() in ['aNULL', 'NULL']:
# Ciphers without authentication.
return False
elif cipher.encryptionMethod() in ['eNULL', 'NULL']:
# Ciphers without encryption.
return False
elif 'EXP' in tokens or 'EXPORT' in tokens:
# Weak export-grade ciphers
return False
elif 'ADH' in tokens:
# No MITM protection
return False
### This *should* happen ;)
else:
return True
def init():
"""Disable insecure SSL ciphers on old Qt versions."""
if qtutils.version_check('5.3.0'):
default_ciphers = QSslSocket.defaultCiphers()
log.init.debug("Default Qt ciphers: {}".format(
', '.join(c.name() for c in default_ciphers)))
else:
# https://codereview.qt-project.org/#/c/75943/
default_ciphers = QSslSocket.supportedCiphers()
log.init.debug("Supported Qt ciphers: {}".format(
', '.join(c.name() for c in default_ciphers)))
good_ciphers = []
bad_ciphers = []
for cipher in default_ciphers:
if _is_secure_cipher(cipher):
good_ciphers.append(cipher)
else:
bad_ciphers.append(cipher)
log.init.debug("Disabling bad ciphers: {}".format(
', '.join(c.name() for c in bad_ciphers)))
QSslSocket.setDefaultCiphers(good_ciphers)
class SslError(QSslError):
"""A QSslError subclass which provides __hash__ on Qt < 5.4."""
def __hash__(self):
try:
# Qt >= 5.4
# pylint: disable=not-callable,useless-suppression
return super().__hash__()
except TypeError:
return hash((self.certificate().toDer(), self.error()))
def __repr__(self):
return utils.get_repr(
self, error=debug.qenum_key(QSslError, self.error()),
string=self.errorString())
class NetworkManager(QNetworkAccessManager):
"""Our own QNetworkAccessManager.
Attributes:
adopted_downloads: If downloads are running with this QNAM but the
associated tab gets closed already, the NAM gets
reparented to the DownloadManager. This counts the
still running downloads, so the QNAM can clean
itself up when this reaches zero again.
_requests: Pending requests.
_scheme_handlers: A dictionary (scheme -> handler) of supported custom
schemes.
_win_id: The window ID this NetworkManager is associated with.
_tab_id: The tab ID this NetworkManager is associated with.
_rejected_ssl_errors: A {QUrl: [SslError]} dict of rejected errors.
_accepted_ssl_errors: A {QUrl: [SslError]} dict of accepted errors.
Signals:
shutting_down: Emitted when the QNAM is shutting down.
"""
shutting_down = pyqtSignal()
def __init__(self, win_id, tab_id, parent=None):
log.init.debug("Initializing NetworkManager")
with log.disable_qt_msghandler():
# WORKAROUND for a hang when a message is printed - See:
# http://www.riverbankcomputing.com/pipermail/pyqt/2014-November/035045.html
super().__init__(parent)
log.init.debug("NetworkManager init done")
self.adopted_downloads = 0
self._win_id = win_id
self._tab_id = tab_id
self._requests = []
self._scheme_handlers = {
'qute': qutescheme.QuteSchemeHandler(win_id),
'file': filescheme.FileSchemeHandler(win_id),
}
self._set_cookiejar(private=config.get('general', 'private-browsing'))
self._set_cache()
self.sslErrors.connect(self.on_ssl_errors)
self._rejected_ssl_errors = collections.defaultdict(list)
self._accepted_ssl_errors = collections.defaultdict(list)
self.authenticationRequired.connect(self.on_authentication_required)
self.proxyAuthenticationRequired.connect(
self.on_proxy_authentication_required)
objreg.get('config').changed.connect(self.on_config_changed)
def _set_cookiejar(self, private=False):
"""Set the cookie jar of the NetworkManager correctly.
Args:
private: Whether we're currently in private browsing mode.
"""
if private:
cookie_jar = objreg.get('ram-cookie-jar')
else:
cookie_jar = objreg.get('cookie-jar')
# We have a shared cookie jar - we restore its parent so we don't
# take ownership of it.
self.setCookieJar(cookie_jar)
app = QCoreApplication.instance()
cookie_jar.setParent(app)
def _set_cache(self):
"""Set the cache of the NetworkManager correctly.
We can't switch the whole cache in private mode because QNAM would
delete the old cache.
"""
# We have a shared cache - we restore its parent so we don't take
# ownership of it.
app = QCoreApplication.instance()
cache = objreg.get('cache')
self.setCache(cache)
cache.setParent(app)
def _ask(self, text, mode, owner=None):
"""Ask a blocking question in the statusbar.
Args:
text: The text to display to the user.
mode: A PromptMode.
owner: An object which will abort the question if destroyed, or
None.
Return:
The answer the user gave or None if the prompt was cancelled.
"""
q = usertypes.Question()
q.text = text
q.mode = mode
self.shutting_down.connect(q.abort)
if owner is not None:
owner.destroyed.connect(q.abort)
# This might be a generic network manager, e.g. one belonging to a
# DownloadManager. In this case, just skip the webview thing.
if self._tab_id is not None:
tab = objreg.get('tab', scope='tab', window=self._win_id,
tab=self._tab_id)
tab.load_started.connect(q.abort)
bridge = objreg.get('message-bridge', scope='window',
window=self._win_id)
bridge.ask(q, blocking=True)
q.deleteLater()
return q.answer
def shutdown(self):
"""Abort all running requests."""
self.setNetworkAccessible(QNetworkAccessManager.NotAccessible)
for request in self._requests:
request.abort()
request.deleteLater()
self.shutting_down.emit()
@pyqtSlot('QNetworkReply*', 'QList<QSslError>')
def on_ssl_errors(self, reply, errors): # pragma: no mccabe
"""Decide if SSL errors should be ignored or not.
This slot is called on SSL/TLS errors by the self.sslErrors signal.
Args:
reply: The QNetworkReply that is encountering the errors.
errors: A list of errors.
"""
errors = [SslError(e) for e in errors]
ssl_strict = config.get('network', 'ssl-strict')
log.webview.debug("SSL errors {!r}, strict {}".format(
errors, ssl_strict))
try:
host_tpl = urlutils.host_tuple(reply.url())
except ValueError:
host_tpl = None
is_accepted = False
is_rejected = False
else:
is_accepted = set(errors).issubset(
self._accepted_ssl_errors[host_tpl])
is_rejected = set(errors).issubset(
self._rejected_ssl_errors[host_tpl])
log.webview.debug("Already accepted: {} / "
"rejected {}".format(is_accepted, is_rejected))
if (ssl_strict and ssl_strict != 'ask') or is_rejected:
return
elif is_accepted:
reply.ignoreSslErrors()
return
if ssl_strict == 'ask':
err_string = '\n'.join('- ' + err.errorString() for err in errors)
answer = self._ask('SSL errors - continue?\n{}'.format(err_string),
mode=usertypes.PromptMode.yesno, owner=reply)
log.webview.debug("Asked for SSL errors, answer {}".format(answer))
if answer:
reply.ignoreSslErrors()
err_dict = self._accepted_ssl_errors
else:
err_dict = self._rejected_ssl_errors
if host_tpl is not None:
err_dict[host_tpl] += errors
else:
log.webview.debug("ssl-strict is False, only warning about errors")
for err in errors:
# FIXME we might want to use warn here (non-fatal error)
# https://github.com/The-Compiler/qutebrowser/issues/114
message.error(self._win_id, 'SSL error: {}'.format(
err.errorString()))
reply.ignoreSslErrors()
self._accepted_ssl_errors[host_tpl] += errors
def clear_all_ssl_errors(self):
"""Clear all remembered SSL errors."""
self._accepted_ssl_errors.clear()
self._rejected_ssl_errors.clear()
@pyqtSlot(QUrl)
def clear_rejected_ssl_errors(self, url):
"""Clear the rejected SSL errors on a reload.
Args:
url: The URL to remove.
"""
try:
del self._rejected_ssl_errors[url]
except KeyError:
pass
@pyqtSlot('QNetworkReply*', 'QAuthenticator*')
def on_authentication_required(self, reply, authenticator):
"""Called when a website needs authentication."""
user, password = None, None
if not hasattr(reply, "netrc_used") and 'HOME' in os.environ:
# We'll get an OSError by netrc if 'HOME' isn't available in
# os.environ. We don't want to log that, so we prevent it
# altogether.
reply.netrc_used = True
try:
net = netrc.netrc()
authenticators = net.authenticators(reply.url().host())
if authenticators is not None:
(user, _account, password) = authenticators
except FileNotFoundError:
log.misc.debug("No .netrc file found")
except OSError:
log.misc.exception("Unable to read the netrc file")
except netrc.NetrcParseError:
log.misc.exception("Error when parsing the netrc file")
if user is None:
# netrc check failed
answer = self._ask("Username ({}):".format(authenticator.realm()),
mode=usertypes.PromptMode.user_pwd,
owner=reply)
if answer is not None:
user, password = answer.user, answer.password
if user is not None:
authenticator.setUser(user)
authenticator.setPassword(password)
@pyqtSlot('QNetworkProxy', 'QAuthenticator*')
def on_proxy_authentication_required(self, proxy, authenticator):
"""Called when a proxy needs authentication."""
proxy_id = ProxyId(proxy.type(), proxy.hostName(), proxy.port())
if proxy_id in _proxy_auth_cache:
user, password = _proxy_auth_cache[proxy_id]
authenticator.setUser(user)
authenticator.setPassword(password)
else:
answer = self._ask(
"Proxy username ({}):".format(authenticator.realm()),
mode=usertypes.PromptMode.user_pwd)
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
_proxy_auth_cache[proxy_id] = answer
@config.change_filter('general', 'private-browsing')
def on_config_changed(self):
"""Set cookie jar when entering/leaving private browsing mode."""
private_browsing = config.get('general', 'private-browsing')
if private_browsing:
# switched from normal mode to private mode
self._set_cookiejar(private=True)
else:
# switched from private mode to normal mode
self._set_cookiejar()
@pyqtSlot()
def on_adopted_download_destroyed(self):
"""Check if we can clean up if an adopted download was destroyed.
See the description for adopted_downloads for details.
"""
self.adopted_downloads -= 1
log.downloads.debug("Adopted download destroyed, {} left.".format(
self.adopted_downloads))
assert self.adopted_downloads >= 0
if self.adopted_downloads == 0:
self.deleteLater()
@pyqtSlot(object) # DownloadItem
def adopt_download(self, download):
"""Adopt a new DownloadItem."""
self.adopted_downloads += 1
log.downloads.debug("Adopted download, {} adopted.".format(
self.adopted_downloads))
download.destroyed.connect(self.on_adopted_download_destroyed)
download.do_retry.connect(self.adopt_download)
def set_referer(self, req, current_url):
"""Set the referer header."""
referer_header_conf = config.get('network', 'referer-header')
try:
if referer_header_conf == 'never':
# Note: using ''.encode('ascii') sends a header with no value,
# instead of no header at all
req.setRawHeader('Referer'.encode('ascii'), QByteArray())
elif (referer_header_conf == 'same-domain' and
not urlutils.same_domain(req.url(), current_url)):
req.setRawHeader('Referer'.encode('ascii'), QByteArray())
# If refer_header_conf is set to 'always', we leave the header
# alone as QtWebKit did set it.
except urlutils.InvalidUrlError:
# req.url() or current_url can be invalid - this happens on
# https://www.playstation.com/ for example.
pass
# WORKAROUND for:
# http://www.riverbankcomputing.com/pipermail/pyqt/2014-September/034806.html
#
# By returning False, we provoke a TypeError because of a wrong return
# type, which does *not* trigger a segfault but invoke our return handler
# immediately.
@utils.prevent_exceptions(False)
def createRequest(self, op, req, outgoing_data):
"""Return a new QNetworkReply object.
Extend QNetworkAccessManager::createRequest to save requests in
self._requests and handle custom schemes.
Args:
op: Operation op
req: const QNetworkRequest & req
outgoing_data: QIODevice * outgoingData
Return:
A QNetworkReply.
"""
scheme = req.url().scheme()
if scheme in self._scheme_handlers:
result = self._scheme_handlers[scheme].createRequest(
op, req, outgoing_data)
if result is not None:
return result
host_blocker = objreg.get('host-blocker')
if (op == QNetworkAccessManager.GetOperation and
host_blocker.is_blocked(req.url())):
log.webview.info("Request to {} blocked by host blocker.".format(
req.url().host()))
return networkreply.ErrorNetworkReply(
req, HOSTBLOCK_ERROR_STRING, QNetworkReply.ContentAccessDenied,
self)
if config.get('network', 'do-not-track'):
dnt = '1'.encode('ascii')
else:
dnt = '0'.encode('ascii')
req.setRawHeader('DNT'.encode('ascii'), dnt)
req.setRawHeader('X-Do-Not-Track'.encode('ascii'), dnt)
# Load custom headers
custom_headers = config.get('network', 'custom-headers')
if custom_headers is not None:
for header, value in custom_headers.items():
req.setRawHeader(header.encode('ascii'), value.encode('ascii'))
# There are some scenarios where we can't figure out current_url:
# - There's a generic NetworkManager, e.g. for downloads
# - The download was in a tab which is now closed.
current_url = QUrl()
if self._tab_id is not None:
try:
tab = objreg.get('tab', scope='tab', window=self._win_id,
tab=self._tab_id)
current_url = tab.url()
except (KeyError, RuntimeError, TypeError):
# https://github.com/The-Compiler/qutebrowser/issues/889
# Catching RuntimeError and TypeError because we could be in
# the middle of the webpage shutdown here.
current_url = QUrl()
self.set_referer(req, current_url)
accept_language = config.get('network', 'accept-language')
if accept_language is not None:
req.setRawHeader('Accept-Language'.encode('ascii'),
accept_language.encode('ascii'))
if PYQT_VERSION < 0x050301:
# WORKAROUND (remove this when we bump the requirements to 5.3.1)
#
# If we don't disable our message handler, we get a freeze if a
# warning is printed due to a PyQt bug, e.g. when clicking a
# currency on http://ch.mouser.com/localsites/
#
# See http://www.riverbankcomputing.com/pipermail/pyqt/2014-June/034420.html
with log.disable_qt_msghandler():
reply = super().createRequest(op, req, outgoing_data)
else:
reply = super().createRequest(op, req, outgoing_data)
self._requests.append(reply)
reply.destroyed.connect(self._requests.remove)
return reply
| gpl-3.0 |
fengbeihong/tempest_automate_ironic | tempest/api/compute/base.py | 3 | 15027 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from oslo_utils import excutils
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest import clients
from tempest.common import credentials
from tempest.common import fixed_network
from tempest import config
from tempest import exceptions
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseComputeTest(tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
_api_version = 2
force_tenant_isolation = False
@classmethod
def skip_checks(cls):
super(BaseComputeTest, cls).skip_checks()
if cls._api_version != 2:
msg = ("Unexpected API version is specified (%s)" %
cls._api_version)
raise exceptions.InvalidConfiguration(message=msg)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(BaseComputeTest, cls).setup_credentials()
# TODO(andreaf) WE should care also for the alt_manager here
# but only once client lazy load in the manager is done
cls.os = cls.get_client_manager()
# Note that we put this here and not in skip_checks because in
# the case of preprovisioned users we won't know if we can get
# two distinct users until we go and lock them
cls.multi_user = cls.check_multi_user()
@classmethod
def setup_clients(cls):
super(BaseComputeTest, cls).setup_clients()
cls.servers_client = cls.os.servers_client
cls.flavors_client = cls.os.flavors_client
cls.images_client = cls.os.images_client
cls.extensions_client = cls.os.extensions_client
cls.floating_ips_client = cls.os.floating_ips_client
cls.keypairs_client = cls.os.keypairs_client
cls.security_groups_client = cls.os.security_groups_client
cls.quotas_client = cls.os.quotas_client
# NOTE(mriedem): os-quota-class-sets is v2 API only
cls.quota_classes_client = cls.os.quota_classes_client
# NOTE(mriedem): os-networks is v2 API only
cls.networks_client = cls.os.networks_client
cls.limits_client = cls.os.limits_client
cls.volumes_extensions_client = cls.os.volumes_extensions_client
cls.volumes_client = cls.os.volumes_client
cls.interfaces_client = cls.os.interfaces_client
cls.fixed_ips_client = cls.os.fixed_ips_client
cls.availability_zone_client = cls.os.availability_zone_client
cls.agents_client = cls.os.agents_client
cls.aggregates_client = cls.os.aggregates_client
cls.services_client = cls.os.services_client
cls.instance_usages_audit_log_client = (
cls.os.instance_usages_audit_log_client)
cls.hypervisor_client = cls.os.hypervisor_client
cls.certificates_client = cls.os.certificates_client
cls.migrations_client = cls.os.migrations_client
cls.security_group_default_rules_client = (
cls.os.security_group_default_rules_client)
@classmethod
def resource_setup(cls):
super(BaseComputeTest, cls).resource_setup()
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.ssh_user = CONF.compute.ssh_user
cls.image_ref = CONF.compute.image_ref
cls.image_ref_alt = CONF.compute.image_ref_alt
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
cls.image_ssh_user = CONF.compute.image_ssh_user
cls.image_ssh_password = CONF.compute.image_ssh_password
cls.servers = []
cls.images = []
cls.security_groups = []
cls.server_groups = []
@classmethod
def resource_cleanup(cls):
cls.clear_images()
cls.clear_servers()
cls.clear_security_groups()
cls.clear_server_groups()
super(BaseComputeTest, cls).resource_cleanup()
@classmethod
def check_multi_user(cls):
# We have a list of accounts now, so just checking if the list is gt 2
if not cls.isolated_creds.is_multi_user():
msg = "Not enough users available for multi-user testing"
raise exceptions.InvalidConfiguration(msg)
return True
@classmethod
def clear_servers(cls):
LOG.debug('Clearing servers: %s', ','.join(
server['id'] for server in cls.servers))
for server in cls.servers:
try:
cls.servers_client.delete_server(server['id'])
except lib_exc.NotFound:
# Something else already cleaned up the server, nothing to be
# worried about
pass
except Exception:
LOG.exception('Deleting server %s failed' % server['id'])
for server in cls.servers:
try:
cls.servers_client.wait_for_server_termination(server['id'])
except Exception:
LOG.exception('Waiting for deletion of server %s failed'
% server['id'])
@classmethod
def server_check_teardown(cls):
"""Checks is the shared server clean enough for subsequent test.
Method will delete the server when it's dirty.
The setUp method is responsible for creating a new server.
Exceptions raised in tearDown class are fails the test case,
This method supposed to use only by tierDown methods, when
the shared server_id is stored in the server_id of the class.
"""
if getattr(cls, 'server_id', None) is not None:
try:
cls.servers_client.wait_for_server_status(cls.server_id,
'ACTIVE')
except Exception as exc:
LOG.exception(exc)
cls.servers_client.delete_server(cls.server_id)
cls.servers_client.wait_for_server_termination(cls.server_id)
cls.server_id = None
raise
@classmethod
def clear_images(cls):
LOG.debug('Clearing images: %s', ','.join(cls.images))
for image_id in cls.images:
try:
cls.images_client.delete_image(image_id)
except lib_exc.NotFound:
# The image may have already been deleted which is OK.
pass
except Exception:
LOG.exception('Exception raised deleting image %s' % image_id)
@classmethod
def clear_security_groups(cls):
LOG.debug('Clearing security groups: %s', ','.join(
str(sg['id']) for sg in cls.security_groups))
for sg in cls.security_groups:
try:
cls.security_groups_client.delete_security_group(sg['id'])
except lib_exc.NotFound:
# The security group may have already been deleted which is OK.
pass
except Exception as exc:
LOG.info('Exception raised deleting security group %s',
sg['id'])
LOG.exception(exc)
@classmethod
def clear_server_groups(cls):
LOG.debug('Clearing server groups: %s', ','.join(cls.server_groups))
for server_group_id in cls.server_groups:
try:
cls.servers_client.delete_server_group(server_group_id)
except lib_exc.NotFound:
# The server-group may have already been deleted which is OK.
pass
except Exception:
LOG.exception('Exception raised deleting server-group %s',
server_group_id)
@classmethod
def create_test_server(cls, **kwargs):
"""Wrapper utility that returns a test server."""
name = data_utils.rand_name(cls.__name__ + "-instance")
if 'name' in kwargs:
name = kwargs.pop('name')
flavor = kwargs.get('flavor', cls.flavor_ref)
image_id = kwargs.get('image_id', cls.image_ref)
kwargs = fixed_network.set_networks_kwarg(
cls.get_tenant_network(), kwargs) or {}
body = cls.servers_client.create_server(
name, image_id, flavor, **kwargs)
# handle the case of multiple servers
servers = [body]
if 'min_count' in kwargs or 'max_count' in kwargs:
# Get servers created which name match with name param.
b = cls.servers_client.list_servers()
servers = [s for s in b['servers'] if s['name'].startswith(name)]
if 'wait_until' in kwargs:
for server in servers:
try:
cls.servers_client.wait_for_server_status(
server['id'], kwargs['wait_until'])
except Exception:
with excutils.save_and_reraise_exception():
if ('preserve_server_on_error' not in kwargs
or kwargs['preserve_server_on_error'] is False):
for server in servers:
try:
cls.servers_client.delete_server(
server['id'])
except Exception:
pass
cls.servers.extend(servers)
return body
@classmethod
def create_security_group(cls, name=None, description=None):
if name is None:
name = data_utils.rand_name(cls.__name__ + "-securitygroup")
if description is None:
description = data_utils.rand_name('description')
body = \
cls.security_groups_client.create_security_group(name,
description)
cls.security_groups.append(body)
return body
@classmethod
def create_test_server_group(cls, name="", policy=None):
if not name:
name = data_utils.rand_name(cls.__name__ + "-Server-Group")
if policy is None:
policy = ['affinity']
body = cls.servers_client.create_server_group(name, policy)
cls.server_groups.append(body['id'])
return body
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
@staticmethod
def _delete_volume(volumes_client, volume_id):
"""Deletes the given volume and waits for it to be gone."""
try:
volumes_client.delete_volume(volume_id)
# TODO(mriedem): We should move the wait_for_resource_deletion
# into the delete_volume method as a convenience to the caller.
volumes_client.wait_for_resource_deletion(volume_id)
except lib_exc.NotFound:
LOG.warn("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?" % volume_id)
@classmethod
def prepare_instance_network(cls):
if (CONF.compute.ssh_auth_method != 'disabled' and
CONF.compute.ssh_connect_method == 'floating'):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server."""
name = data_utils.rand_name(cls.__name__ + "-image")
if 'name' in kwargs:
name = kwargs.pop('name')
image = cls.images_client.create_image(server_id, name)
image_id = data_utils.parse_image_id(image.response['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
cls.images_client.wait_for_image_status(image_id,
kwargs['wait_until'])
image = cls.images_client.get_image(image_id)
if kwargs['wait_until'] == 'ACTIVE':
if kwargs.get('wait_for_server', True):
cls.servers_client.wait_for_server_status(server_id,
'ACTIVE')
return image
@classmethod
def rebuild_server(cls, server_id, **kwargs):
# Destroy an existing server and creates a new one
if server_id:
try:
cls.servers_client.delete_server(server_id)
cls.servers_client.wait_for_server_termination(server_id)
except Exception:
LOG.exception('Failed to delete server %s' % server_id)
server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
cls.password = server['adminPass']
return server['id']
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
cls._delete_volume(cls.volumes_extensions_client, volume_id)
class BaseV2ComputeTest(BaseComputeTest):
_api_version = 2
class BaseComputeAdminTest(BaseComputeTest):
"""Base test case class for Compute Admin API tests."""
@classmethod
def skip_checks(cls):
super(BaseComputeAdminTest, cls).skip_checks()
if not credentials.is_admin_available():
msg = ("Missing Identity Admin API credentials in configuration.")
raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
super(BaseComputeAdminTest, cls).setup_credentials()
creds = cls.isolated_creds.get_admin_creds()
cls.os_adm = clients.Manager(credentials=creds)
@classmethod
def setup_clients(cls):
super(BaseComputeAdminTest, cls).setup_clients()
cls.availability_zone_admin_client = (
cls.os_adm.availability_zone_client)
class BaseV2ComputeAdminTest(BaseComputeAdminTest):
"""Base test case class for Compute Admin V2 API tests."""
_api_version = 2
| apache-2.0 |
MyTunesFreeMusic/privacy-policy | beetsplug/ftintitle.py | 10 | 6188 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Verrus, <github.com/Verrus/beets-plugin-featInTitle>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Moves "featured" artists to the title from the artist field.
"""
from __future__ import division, absolute_import, print_function
import re
from beets import plugins
from beets import ui
from beets.util import displayable_path
def split_on_feat(artist):
"""Given an artist string, split the "main" artist from any artist
on the right-hand side of a string like "feat". Return the main
artist, which is always a string, and the featuring artist, which
may be a string or None if none is present.
"""
# split on the first "feat".
regex = re.compile(plugins.feat_tokens(), re.IGNORECASE)
parts = [s.strip() for s in regex.split(artist, 1)]
if len(parts) == 1:
return parts[0], None
else:
return tuple(parts)
def contains_feat(title):
"""Determine whether the title contains a "featured" marker.
"""
return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
def find_feat_part(artist, albumartist):
"""Attempt to find featured artists in the item's artist fields and
return the results. Returns None if no featured artist found.
"""
feat_part = None
# Look for the album artist in the artist field. If it's not
# present, give up.
albumartist_split = artist.split(albumartist, 1)
if len(albumartist_split) <= 1:
return feat_part
# If the last element of the split (the right-hand side of the
# album artist) is nonempty, then it probably contains the
# featured artist.
elif albumartist_split[-1] != '':
# Extract the featured artist from the right-hand side.
_, feat_part = split_on_feat(albumartist_split[-1])
# Otherwise, if there's nothing on the right-hand side, look for a
# featuring artist on the left-hand side.
else:
lhs, rhs = split_on_feat(albumartist_split[0])
if lhs:
feat_part = lhs
return feat_part
class FtInTitlePlugin(plugins.BeetsPlugin):
def __init__(self):
super(FtInTitlePlugin, self).__init__()
self.config.add({
'auto': True,
'drop': False,
'format': u'feat. {0}',
})
self._command = ui.Subcommand(
'ftintitle',
help=u'move featured artists to the title field')
self._command.parser.add_option(
u'-d', u'--drop', dest='drop',
action='store_true', default=False,
help=u'drop featuring from artists and ignore title update')
if self.config['auto']:
self.import_stages = [self.imported]
def commands(self):
def func(lib, opts, args):
self.config.set_args(opts)
drop_feat = self.config['drop'].get(bool)
write = ui.should_write()
for item in lib.items(ui.decargs(args)):
self.ft_in_title(item, drop_feat)
item.store()
if write:
item.try_write()
self._command.func = func
return [self._command]
def imported(self, session, task):
"""Import hook for moving featuring artist automatically.
"""
drop_feat = self.config['drop'].get(bool)
for item in task.imported_items():
self.ft_in_title(item, drop_feat)
item.store()
def update_metadata(self, item, feat_part, drop_feat):
"""Choose how to add new artists to the title and set the new
metadata. Also, print out messages about any changes that are made.
If `drop_feat` is set, then do not add the artist to the title; just
remove it from the artist field.
"""
# In all cases, update the artist fields.
self._log.info(u'artist: {0} -> {1}', item.artist, item.albumartist)
item.artist = item.albumartist
if item.artist_sort:
# Just strip the featured artist from the sort name.
item.artist_sort, _ = split_on_feat(item.artist_sort)
# Only update the title if it does not already contain a featured
# artist and if we do not drop featuring information.
if not drop_feat and not contains_feat(item.title):
feat_format = self.config['format'].as_str()
new_format = feat_format.format(feat_part)
new_title = u"{0} {1}".format(item.title, new_format)
self._log.info(u'title: {0} -> {1}', item.title, new_title)
item.title = new_title
def ft_in_title(self, item, drop_feat):
"""Look for featured artists in the item's artist fields and move
them to the title.
"""
artist = item.artist.strip()
albumartist = item.albumartist.strip()
# Check whether there is a featured artist on this track and the
# artist field does not exactly match the album artist field. In
# that case, we attempt to move the featured artist to the title.
_, featured = split_on_feat(artist)
if featured and albumartist != artist and albumartist:
self._log.info('{}', displayable_path(item.path))
feat_part = None
# Attempt to find the featured artist.
feat_part = find_feat_part(artist, albumartist)
# If we have a featuring artist, move it to the title.
if feat_part:
self.update_metadata(item, feat_part, drop_feat)
else:
self._log.info(u'no featuring artists found')
| mit |
hfp/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/evaluable.py | 18 | 5277 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`Evaluable` interface (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Evaluable(object):
"""Interface for objects that are evaluatable by, e.g., `Experiment`.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@abc.abstractproperty
def model_dir(self):
"""Returns a path in which the eval process will look for checkpoints."""
raise NotImplementedError
@abc.abstractmethod
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""Evaluates given model with provided evaluation data.
Stop conditions - we evaluate on the given input data until one of the
following:
- If `steps` is provided, and `steps` batches of size `batch_size` are
processed.
- If `input_fn` is provided, and it raises an end-of-input
exception (`OutOfRangeError` or `StopIteration`).
- If `x` is provided, and all items in `x` have been processed.
The return value is a dict containing the metrics specified in `metrics`, as
well as an entry `global_step` which contains the value of the global step
for which this evaluation was performed.
Args:
x: Matrix of shape [n_samples, n_features...] or dictionary of many
matrices
containing the input samples for fitting the model. Can be iterator that
returns
arrays of features or dictionary of array of features. If set,
`input_fn` must
be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs] containing the
label values (class labels in classification, real numbers in
regression) or dictionary of multiple vectors/matrices. Can be iterator
that returns array of targets or dictionary of array of targets. If set,
`input_fn` must be `None`. Note: For classification, label values must
be integers representing the class index (i.e. values from 0 to
n_classes-1).
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
If input_fn is set, `x`, `y`, and `batch_size` must be `None`. If
`steps` is not provided, this should raise `OutOfRangeError` or
`StopIteration` after the desired amount of data (e.g., one epoch) has
been provided. See "Stop conditions" above for specifics.
feed_fn: Function creating a feed dict every time it is called. Called
once per iteration. Must be `None` if `input_fn` is provided.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`, if specified. Must be `None` if `input_fn` is
provided.
steps: Number of steps for which to evaluate model. If `None`, evaluate
until `x` is consumed or `input_fn` raises an end-of-input exception.
See "Stop conditions" above for specifics.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function.
Metric ops should support streaming, e.g., returning `update_op` and
`value` tensors. For example, see the options defined in
`../../../metrics/python/ops/metrics_ops.py`.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
Returns:
Returns `dict` with evaluation results.
"""
raise NotImplementedError
| apache-2.0 |
dongjiaqiang/cassandra | pylib/cqlshlib/displaying.py | 20 | 3779 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
RED = '\033[0;1;31m'
GREEN = '\033[0;1;32m'
YELLOW = '\033[0;1;33m'
BLUE = '\033[0;1;34m'
MAGENTA = '\033[0;1;35m'
CYAN = '\033[0;1;36m'
WHITE = '\033[0;1;37m'
DARK_MAGENTA = '\033[0;35m'
ANSI_RESET = '\033[0m'
def colorme(bval, colormap, colorkey):
if colormap is None:
colormap = DEFAULT_VALUE_COLORS
return FormattedValue(bval, colormap[colorkey] + bval + colormap['reset'])
class FormattedValue:
def __init__(self, strval, coloredval=None, displaywidth=None):
self.strval = strval
if coloredval is None:
coloredval = strval
self.coloredval = coloredval
if displaywidth is None:
displaywidth = len(strval)
# displaywidth is useful for display of special unicode characters
# with
self.displaywidth = displaywidth
def __len__(self):
return len(self.strval)
def _pad(self, width, fill=' '):
if width > self.displaywidth:
return fill * (width - self.displaywidth)
else:
return ''
def ljust(self, width, fill=' ', color=False):
"""
Similar to self.strval.ljust(width), but takes expected terminal
display width into account for special characters, and does not
take color escape codes into account.
"""
if color:
return self.color_ljust(width, fill=fill)
return self.strval + self._pad(width, fill)
def rjust(self, width, fill=' ', color=False):
"""
Similar to self.strval.rjust(width), but takes expected terminal
display width into account for special characters, and does not
take color escape codes into account.
"""
if color:
return self.color_rjust(width, fill=fill)
return self._pad(width, fill) + self.strval
def color_rjust(self, width, fill=' '):
"""
Similar to self.rjust(width), but uses this value's colored
representation, and does not take color escape codes into account
in determining width.
"""
return self._pad(width, fill) + self.coloredval
def color_ljust(self, width, fill=' '):
"""
Similar to self.ljust(width), but uses this value's colored
representation, and does not take color escape codes into account
in determining width.
"""
return self.coloredval + self._pad(width, fill)
DEFAULT_VALUE_COLORS = dict(
default=YELLOW,
text=YELLOW,
error=RED,
blob=DARK_MAGENTA,
timestamp=GREEN,
date=GREEN,
time=GREEN,
int=GREEN,
float=GREEN,
decimal=GREEN,
inet=GREEN,
boolean=GREEN,
uuid=GREEN,
collection=BLUE,
reset=ANSI_RESET,
)
COLUMN_NAME_COLORS = defaultdict(lambda: MAGENTA,
error=RED,
blob=DARK_MAGENTA,
reset=ANSI_RESET,
)
| apache-2.0 |
agx/gerrymander | gerrymander/format.py | 4 | 2398 | #
# Copyright (C) 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
def format_date(then):
if then is None:
return ""
try:
now = time.time()
delta = now - then
return format_delta(delta)
except (TypeError, ValueError):
return ""
def format_delta(delta):
days = delta / (60 * 60 * 24)
hours = delta / (60 * 60)
mins = delta / 60
if days == 1:
return "%d day" % days
elif days > 1:
return "%d days" % days
elif hours == 1:
return "%d hour" % hours
elif hours > 1:
return "%d hours" % hours
elif mins == 1:
return "%d min" % mins
elif mins > 1:
return "%d mins" % mins
else:
return "just now"
STYLES = {
"reset": 0,
"bold": 1,
"underline": 4,
"blinkslow": 5,
"blinkfast": 6,
"reverse": 7
}
COLORS = {
"grey": 0,
"red": 1,
"green": 2,
"yellow": 3,
"blue": 4,
"magenta": 5,
"cyan": 6,
"white": 7
}
FOREGROUND = 30
BACKGROUND = 40
ESCAPE = '\033[%dm'
def format_color(text, usecolor=True, fg=None, bg=None, styles=[]):
if not usecolor:
return text
bits = []
if fg is not None:
if fg not in COLORS:
raise Exception("Unknown color %s" % fg)
bits.append(ESCAPE % (FOREGROUND + COLORS[fg]))
if bg is not None:
if bg not in COLORS:
raise Exception("Unknown color %s" % bg)
bits.append(ESCAPE % (BACKGROUND + COLORS[bg]))
for style in styles:
if style not in STYLES:
raise Exception("Unknown style %s" % style)
bits.append(ESCAPE % (STYLES[style]))
bits.append(text)
bits.append(ESCAPE % STYLES["reset"])
return "".join(bits)
def format_title(text):
width = len(text)
underline = "-" * width
return text + "\n" + underline + "\n"
| apache-2.0 |
abiles/DBProject2014 | myClassManage/cocos2d/plugin/tools/toolsForGame/modifyManifest.py | 263 | 2068 | import sys, string, os
from xml.etree import ElementTree as ET
manifestFile = sys.argv[1]
pluginStr = sys.argv[2]
pluginsDir = sys.argv[3]
androidNS = 'http://schemas.android.com/apk/res/android'
sourceCfgFile = '/android/ForManifest.xml'
def doModify(sourceFile, root):
bRet = False
sourceTree = ET.parse(sourceFile)
sourceRoot = sourceTree.getroot()
# get target content
f = open(manifestFile)
targetContent = f.read()
f.close()
# check config for application
appCfgNode = sourceRoot.find('applicationCfg')
if appCfgNode is not None and len(appCfgNode) > 0:
appKeyWord = appCfgNode.get('keyword')
if appKeyWord != None and len(appKeyWord) > 0:
keyIndex = targetContent.find(appKeyWord)
if -1 == keyIndex:
bRet = True
for node in list(appCfgNode):
root.find('application').append(node)
# check permission config
perCfgNode = sourceRoot.find('permissionCfg')
if perCfgNode is not None and len(perCfgNode) > 0:
for oneNode in list(perCfgNode):
key = '{' + androidNS + '}name'
perAttr = oneNode.get(key)
if perAttr != None and len(perAttr) > 0:
attrIndex = targetContent.find(perAttr)
if -1 == attrIndex:
bRet = True
root.append(oneNode)
return bRet
# parse file AndroidManifest.xml of game project
ET.register_namespace("android", androidNS)
targetTree = ET.parse(manifestFile)
targetRoot = targetTree.getroot()
# traverse all plugins
plugins = pluginStr.split(':')
for pluginName in plugins:
# find the file 'ForManifest.xml'
sourceXml = pluginsDir + '/' + pluginName + sourceCfgFile
if not os.path.exists(sourceXml):
continue
# check & modify target xml
haveChanged = doModify(sourceXml, targetRoot)
if haveChanged:
print 'Modify AndroidManifest.xml for plugin ' + pluginName
targetTree.write(manifestFile, 'UTF-8')
| mit |
aleksandr-bakanov/astropy | astropy/stats/spatial.py | 4 | 12862 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements functions and classes for spatial statistics.
"""
import numpy as np
import math
class RipleysKEstimator:
"""
Estimators for Ripley's K function for two-dimensional spatial data.
See [1]_, [2]_, [3]_, [4]_, [5]_ for detailed mathematical and
practical aspects of those estimators.
Parameters
----------
area : float
Area of study from which the points where observed.
x_max, y_max : float, float, optional
Maximum rectangular coordinates of the area of study.
Required if ``mode == 'translation'`` or ``mode == ohser``.
x_min, y_min : float, float, optional
Minimum rectangular coordinates of the area of study.
Required if ``mode == 'variable-width'`` or ``mode == ohser``.
Examples
--------
>>> import numpy as np
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> from astropy.stats import RipleysKEstimator
>>> z = np.random.uniform(low=5, high=10, size=(100, 2))
>>> Kest = RipleysKEstimator(area=25, x_max=10, y_max=10,
... x_min=5, y_min=5)
>>> r = np.linspace(0, 2.5, 100)
>>> plt.plot(r, Kest.poisson(r)) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='none')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='translation')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ohser')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='var-width')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ripley')) # doctest: +SKIP
References
----------
.. [1] Peebles, P.J.E. *The large scale structure of the universe*.
<http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1980lssu.book.....P&db_key=AST>
.. [2] Spatial descriptive statistics.
<https://en.wikipedia.org/wiki/Spatial_descriptive_statistics>
.. [3] Package spatstat.
<https://cran.r-project.org/web/packages/spatstat/spatstat.pdf>
.. [4] Cressie, N.A.C. (1991). Statistics for Spatial Data,
Wiley, New York.
.. [5] Stoyan, D., Stoyan, H. (1992). Fractals, Random Shapes and
Point Fields, Akademie Verlag GmbH, Chichester.
"""
def __init__(self, area, x_max=None, y_max=None, x_min=None, y_min=None):
self.area = area
self.x_max = x_max
self.y_max = y_max
self.x_min = x_min
self.y_min = y_min
@property
def area(self):
return self._area
@area.setter
def area(self, value):
if isinstance(value, (float, int)) and value > 0:
self._area = value
else:
raise ValueError('area is expected to be a positive number. '
'Got {}.'.format(value))
@property
def y_max(self):
return self._y_max
@y_max.setter
def y_max(self, value):
if value is None or isinstance(value, (float, int)):
self._y_max = value
else:
raise ValueError('y_max is expected to be a real number '
'or None. Got {}.'.format(value))
@property
def x_max(self):
return self._x_max
@x_max.setter
def x_max(self, value):
if value is None or isinstance(value, (float, int)):
self._x_max = value
else:
raise ValueError('x_max is expected to be a real number '
'or None. Got {}.'.format(value))
@property
def y_min(self):
return self._y_min
@y_min.setter
def y_min(self, value):
if value is None or isinstance(value, (float, int)):
self._y_min = value
else:
raise ValueError('y_min is expected to be a real number. '
'Got {}.'.format(value))
@property
def x_min(self):
return self._x_min
@x_min.setter
def x_min(self, value):
if value is None or isinstance(value, (float, int)):
self._x_min = value
else:
raise ValueError('x_min is expected to be a real number. '
'Got {}.'.format(value))
def __call__(self, data, radii, mode='none'):
return self.evaluate(data=data, radii=radii, mode=mode)
def _pairwise_diffs(self, data):
npts = len(data)
diff = np.zeros(shape=(npts * (npts - 1) // 2, 2), dtype=np.double)
k = 0
for i in range(npts - 1):
size = npts - i - 1
diff[k:k + size] = abs(data[i] - data[i+1:])
k += size
return diff
def poisson(self, radii):
"""
Evaluates the Ripley K function for the homogeneous Poisson process,
also known as Complete State of Randomness (CSR).
Parameters
----------
radii : 1D array
Set of distances in which Ripley's K function will be evaluated.
Returns
-------
output : 1D array
Ripley's K function evaluated at ``radii``.
"""
return np.pi * radii * radii
def Lfunction(self, data, radii, mode='none'):
"""
Evaluates the L function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return np.sqrt(self.evaluate(data, radii, mode=mode) / np.pi)
def Hfunction(self, data, radii, mode='none'):
"""
Evaluates the H function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return self.Lfunction(data, radii, mode=mode) - radii
def evaluate(self, data, radii, mode='none'):
"""
Evaluates the Ripley K estimator for a given set of values ``radii``.
Parameters
----------
data : 2D array
Set of observed points in as a n by 2 array which will be used to
estimate Ripley's K function.
radii : 1D array
Set of distances in which Ripley's K estimator will be evaluated.
Usually, it's common to consider max(radii) < (area/2)**0.5.
mode : str
Keyword which indicates the method for edge effects correction.
Available methods are 'none', 'translation', 'ohser', 'var-width',
and 'ripley'.
* 'none'
this method does not take into account any edge effects
whatsoever.
* 'translation'
computes the intersection of rectangular areas centered at
the given points provided the upper bounds of the
dimensions of the rectangular area of study. It assumes that
all the points lie in a bounded rectangular region satisfying
x_min < x_i < x_max; y_min < y_i < y_max. A detailed
description of this method can be found on ref [4].
* 'ohser'
this method uses the isotropized set covariance function of
the window of study as a weight to correct for
edge-effects. A detailed description of this method can be
found on ref [4].
* 'var-width'
this method considers the distance of each observed point to
the nearest boundary of the study window as a factor to
account for edge-effects. See [3] for a brief description of
this method.
* 'ripley'
this method is known as Ripley's edge-corrected estimator.
The weight for edge-correction is a function of the
proportions of circumferences centered at each data point
which crosses another data point of interest. See [3] for
a detailed description of this method.
Returns
-------
ripley : 1D array
Ripley's K function estimator evaluated at ``radii``.
"""
data = np.asarray(data)
if not data.shape[1] == 2:
raise ValueError('data must be an n by 2 array, where n is the '
'number of observed points.')
npts = len(data)
ripley = np.zeros(len(radii))
if mode == 'none':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
for r in range(len(radii)):
ripley[r] = (distances < radii[r]).sum()
ripley = self.area * 2. * ripley / (npts * (npts - 1))
# eq. 15.11 Stoyan book page 283
elif mode == 'translation':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
intersec_area = (((self.x_max - self.x_min) - diff[:, 0]) *
((self.y_max - self.y_min) - diff[:, 1]))
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / intersec_area) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Stoyan book page 123 and eq 15.13
elif mode == 'ohser':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
a = self.area
b = max((self.y_max - self.y_min) / (self.x_max - self.x_min),
(self.x_max - self.x_min) / (self.y_max - self.y_min))
x = distances / math.sqrt(a / b)
u = np.sqrt((x * x - 1) * (x > 1))
v = np.sqrt((x * x - b ** 2) * (x < math.sqrt(b ** 2 + 1)) * (x > b))
c1 = np.pi - 2 * x * (1 + 1 / b) + x * x / b
c2 = 2 * np.arcsin((1 / x) * (x > 1)) - 1 / b - 2 * (x - u)
c3 = (2 * np.arcsin(((b - u * v) / (x * x))
* (x > b) * (x < math.sqrt(b ** 2 + 1)))
+ 2 * u + 2 * v / b - b - (1 + x * x) / b)
cov_func = ((a / np.pi) * (c1 * (x >= 0) * (x <= 1)
+ c2 * (x > 1) * (x <= b)
+ c3 * (b < x) * (x < math.sqrt(b ** 2 + 1))))
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / cov_func) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Cressie book eq 8.2.20 page 616
elif mode == 'var-width':
lt_dist = np.minimum(np.minimum(self.x_max - data[:, 0], self.y_max - data[:, 1]),
np.minimum(data[:, 0] - self.x_min, data[:, 1] - self.y_min))
for r in range(len(radii)):
for i in range(npts):
for j in range(npts):
if i != j:
diff = abs(data[i] - data[j])
dist = math.sqrt((diff * diff).sum())
if dist < radii[r] < lt_dist[i]:
ripley[r] = ripley[r] + 1
lt_dist_sum = (lt_dist > radii[r]).sum()
if not lt_dist_sum == 0:
ripley[r] = ripley[r] / lt_dist_sum
ripley = self.area * ripley / npts
# Cressie book eq 8.4.22 page 640
elif mode == 'ripley':
hor_dist = np.zeros(shape=(npts * (npts - 1)) // 2,
dtype=np.double)
ver_dist = np.zeros(shape=(npts * (npts - 1)) // 2,
dtype=np.double)
for k in range(npts - 1):
min_hor_dist = min(self.x_max - data[k][0],
data[k][0] - self.x_min)
min_ver_dist = min(self.y_max - data[k][1],
data[k][1] - self.y_min)
start = (k * (2 * (npts - 1) - (k - 1))) // 2
end = ((k + 1) * (2 * (npts - 1) - k)) // 2
hor_dist[start: end] = min_hor_dist * np.ones(npts - 1 - k)
ver_dist[start: end] = min_ver_dist * np.ones(npts - 1 - k)
diff = self._pairwise_diffs(data)
dist = np.hypot(diff[:, 0], diff[:, 1])
dist_ind = dist <= np.hypot(hor_dist, ver_dist)
w1 = (1 - (np.arccos(np.minimum(ver_dist, dist) / dist) +
np.arccos(np.minimum(hor_dist, dist) / dist)) / np.pi)
w2 = (3 / 4 - 0.5 * (
np.arccos(ver_dist / dist * ~dist_ind) +
np.arccos(hor_dist / dist * ~dist_ind)) / np.pi)
weight = dist_ind * w1 + ~dist_ind * w2
for r in range(len(radii)):
ripley[r] = ((dist < radii[r]) / weight).sum()
ripley = self.area * 2. * ripley / (npts * (npts - 1))
else:
raise ValueError(f'mode {mode} is not implemented.')
return ripley
| bsd-3-clause |
aforalee/RRally | rally/deployment/engines/existing.py | 10 | 5354 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import objects
from rally import consts
from rally.deployment import engine
@engine.configure(name="ExistingCloud")
class ExistingCloud(engine.Engine):
"""Just use an existing OpenStack deployment without deploying anything.
To use ExistingCloud, you should put endpoint information to the config:
{
"type": "ExistingCloud",
"auth_url": "http://localhost:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "password",
"tenant_name": "demo"
},
"https_insecure": False,
"https_cacert": "",
}
Or, using keystone v3 API endpoint:
{
"type": "ExistingCloud",
"auth_url": "http://localhost:5000/v3/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "admin",
"user_domain_name": "admin",
"project_name": "admin",
"project_domain_name": "admin",
},
"https_insecure": False,
"https_cacert": "",
}
"""
CONFIG_SCHEMA = {
"type": "object",
"definitions": {
"user": {
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
},
"oneOf": [
{
# v2.0 authentication
"properties": {
"tenant_name": {"type": "string"},
},
"required": ["username", "password", "tenant_name"],
},
{
# Authentication in project scope
"properties": {
"user_domain_name": {"type": "string"},
"project_name": {"type": "string"},
"project_domain_name": {"type": "string"},
},
"required": ["username", "password", "project_name"],
}
]
}
},
"properties": {
"type": {"type": "string"},
"auth_url": {"type": "string"},
"region_name": {"type": "string"},
"endpoint_type": {"type": "string",
"enum": [consts.EndpointType.ADMIN,
consts.EndpointType.INTERNAL,
consts.EndpointType.PUBLIC]},
"https_insecure": {"type": "boolean"},
"https_cacert": {"type": "string"},
},
"anyOf": [
{
"properties": {
"admin": {"$ref": "#/definitions/user"}
},
"required": ["type", "auth_url", "admin"]
},
{
"users": {
"type": "array",
"items": {"$ref": "#/definitions/user"}
},
"required": ["type", "auth_url", "users"]
}
]
}
def _create_endpoint(self, common, user, permission):
return objects.Endpoint(
common["auth_url"], user["username"], user["password"],
tenant_name=user.get("project_name", user.get("tenant_name")),
permission=permission,
region_name=common.get("region_name"),
endpoint_type=common.get("endpoint_type",
consts.EndpointType.PUBLIC),
endpoint=common.get("endpoint"),
domain_name=user.get("domain_name"),
user_domain_name=user.get("user_domain_name", "Default"),
admin_domain_name=user.get("admin_domain_name", "Default"),
project_domain_name=user.get("project_domain_name", "Default"),
https_insecure=common.get("https_insecure", False),
https_cacert=common.get("https_cacert")
)
def deploy(self):
permissions = consts.EndpointPermission
users = [self._create_endpoint(self.config, user, permissions.USER)
for user in self.config.get("users", [])]
admin = self._create_endpoint(self.config,
self.config.get("admin"),
permissions.ADMIN)
return {"admin": admin, "users": users}
def cleanup(self):
pass
| apache-2.0 |
egoid/baytree | lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/base.py | 355 | 4939 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from ..constants import namespaces, voidElements, spaceCharacters
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
return {"type": "EndTag",
"name": name,
"namespace": namespace}
def text(self, data):
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None):
return {"type": "Doctype",
"name": name,
"publicId": publicId,
"systemId": systemId}
def entity(self, name):
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| mit |
tlakshman26/cinder-bug-fix-volume-conversion-full | cinder/tests/unit/backup/drivers/test_backup_nfs.py | 15 | 25451 | # Copyright (C) 2015 Tom Barron <tpb@dyncloud.net>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup NFS driver.
"""
import bz2
import filecmp
import hashlib
import os
import shutil
import tempfile
import zlib
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_config import cfg
from cinder.backup.drivers import nfs
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder import utils
CONF = cfg.CONF
FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base'
FAKE_HOST = 'fake_host'
FAKE_EXPORT_PATH = 'fake/export/path'
FAKE_BACKUP_SHARE = '%s:/%s' % (FAKE_HOST, FAKE_EXPORT_PATH)
FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE,
FAKE_EXPORT_PATH)
FAKE_BACKUP_ID_PART1 = 'de'
FAKE_BACKUP_ID_PART2 = 'ad'
FAKE_BACKUP_ID_REST = 'beef-whatever'
FAKE_BACKUP_ID = (FAKE_BACKUP_ID_PART1 + FAKE_BACKUP_ID_PART2 +
FAKE_BACKUP_ID_REST)
UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1,
FAKE_BACKUP_ID_PART2,
FAKE_BACKUP_ID)
class BackupNFSShareTestCase(test.TestCase):
def setUp(self):
super(BackupNFSShareTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.mock_object(nfs, 'LOG')
def test_check_configuration_no_backup_share(self):
self.override_config('backup_share', None)
self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path',
mock.Mock(return_value=FAKE_BACKUP_PATH))
with mock.patch.object(nfs.NFSBackupDriver, '_check_configuration'):
driver = nfs.NFSBackupDriver(self.ctxt)
self.assertRaises(exception.ConfigNotFound,
driver._check_configuration)
def test_init_backup_repo_path(self):
self.override_config('backup_share', FAKE_BACKUP_SHARE)
self.override_config('backup_mount_point_base',
FAKE_BACKUP_MOUNT_POINT_BASE)
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH)
self.mock_object(nfs.NFSBackupDriver, '_check_configuration')
self.mock_object(remotefs_brick, 'RemoteFsClient',
mock.Mock(return_value=mock_remotefsclient))
self.mock_object(utils, 'get_root_helper')
with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'):
driver = nfs.NFSBackupDriver(self.ctxt)
path = driver._init_backup_repo_path()
self.assertEqual(FAKE_BACKUP_PATH, path)
utils.get_root_helper.called_once()
mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE)
mock_remotefsclient.get_mount_point.assert_called_once_with(
FAKE_BACKUP_SHARE)
def fake_md5(arg):
class result(object):
def hexdigest(self):
return 'fake-md5-sum'
ret = result()
return ret
class BackupNFSSwiftBasedTestCase(test.TestCase):
"""Test Cases for based on Swift tempest backup tests."""
def _create_volume_db_entry(self):
vol = {'id': '1234-5678-1234-8888',
'size': 1,
'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, container='test-container',
backup_id=123, parent_id=None):
backup = {'id': backup_id,
'size': 1,
'container': container,
'volume_id': '1234-5678-1234-8888',
'parent_id': parent_id,
'user_id': 'user-id',
'project_id': 'project-id',
}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
super(BackupNFSSwiftBasedTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.stubs.Set(hashlib, 'md5', fake_md5)
self._create_volume_db_entry()
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
self.override_config('backup_share', FAKE_BACKUP_SHARE)
self.override_config('backup_mount_point_base',
'/tmp')
self.override_config('backup_file_size', 52428800)
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=self.temp_dir)
self.mock_object(remotefs_brick, 'RemoteFsClient',
mock.Mock(return_value=mock_remotefsclient))
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
for _i in range(0, 32):
self.volume_file.write(os.urandom(1024))
def test_backup_uncompressed(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='bz2')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='zlib')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
self._create_backup_db_entry(container=None,
backup_id=FAKE_BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_notification')
def test_backup_default_container_notify(self, _send_progress,
_send_progress_end):
self._create_backup_db_entry(container=None)
# If the backup_object_number_per_notification is set to 1,
# the _send_progress method will be called for sure.
CONF.set_override("backup_object_number_per_notification", 1)
CONF.set_override("backup_enable_progress_timer", False)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the backup_object_number_per_notification is increased to
# another value, the _send_progress method will not be called.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the timer is enabled, the _send_progress will be called,
# since the timer can trigger the progress notification.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
CONF.set_override("backup_enable_progress_timer", True)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
def test_backup_custom_container(self):
container_name = 'fake99'
self._create_backup_db_entry(container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self):
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(32 * 1024 / content1['chunk_size'],
len(content1['sha256s']))
def test_backup_cmp_shafiles(self):
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name,
backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
def test_backup_delta_two_objects_change(self):
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(8 * 1024))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 20
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_backup_delta_two_blocks_in_object_change(self):
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(8 * 1024))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_backup_backup_metadata_fail(self):
"""Test of when an exception occurs in backup().
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process of an
exception handler.
"""
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata',
fake_backup_metadata)
# We expect that an exception be notified directly.
self.assertRaises(exception.BackupDriverException,
service.backup,
backup, self.volume_file)
def test_backup_backup_metadata_fail2(self):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete().
"""
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata',
fake_backup_metadata)
def fake_delete(self, backup):
raise exception.BackupOperationError()
# Raise a pseudo exception.BackupOperationError.
self.stubs.Set(nfs.NFSBackupDriver, 'delete', fake_delete)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
service.backup,
backup, self.volume_file)
def test_restore_uncompressed(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
self.flags(backup_sha_block_size_bytes=32)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_bz2(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='bz2')
self.flags(backup_file_size=(1024 * 3))
self.flags(backup_sha_block_size_bytes=1024)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_zlib(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='zlib')
self.flags(backup_file_size=(1024 * 3))
self.flags(backup_sha_block_size_bytes = 1024)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_delta(self):
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size =(1024 * 8))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file, True)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, 124)
service.restore(backup, '1234-5678-1234-8888',
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_delete(self):
self._create_backup_db_entry()
service = nfs.NFSBackupDriver(self.ctxt)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.delete(backup)
def test_get_compressor(self):
service = nfs.NFSBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
self.assertEqual(compressor, zlib)
compressor = service._get_compressor('bz2')
self.assertEqual(compressor, bz2)
self.assertRaises(ValueError, service._get_compressor, 'fake')
def test_prepare_output_data_effective_compression(self):
service = nfs.NFSBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = buffer(bytearray(128))
result = service._prepare_output_data(fake_data)
self.assertEqual('zlib', result[0])
self.assertTrue(len(result) < len(fake_data))
def test_prepare_output_data_no_compresssion(self):
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = buffer(bytearray(128))
result = service._prepare_output_data(fake_data)
self.assertEqual('none', result[0])
self.assertEqual(fake_data, result[1])
def test_prepare_output_data_ineffective_compression(self):
service = nfs.NFSBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = buffer(bytearray(128))
# Pre-compress so that compression in the driver will be ineffective.
already_compressed_data = service.compressor.compress(fake_data)
result = service._prepare_output_data(already_compressed_data)
self.assertEqual('none', result[0])
self.assertEqual(already_compressed_data, result[1])
| apache-2.0 |
staslev/beam | sdks/python/apache_beam/examples/cookbook/bigquery_side_input_test.py | 16 | 2267 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the BigQuery side input example."""
import logging
import unittest
import apache_beam as beam
from apache_beam.examples.cookbook import bigquery_side_input
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class BigQuerySideInputTest(unittest.TestCase):
def test_create_groups(self):
with TestPipeline() as p:
group_ids_pcoll = p | 'CreateGroupIds' >> beam.Create(['A', 'B', 'C'])
corpus_pcoll = p | 'CreateCorpus' >> beam.Create(
[{'f': 'corpus1'}, {'f': 'corpus2'}, {'f': 'corpus3'}])
words_pcoll = p | 'CreateWords' >> beam.Create(
[{'f': 'word1'}, {'f': 'word2'}, {'f': 'word3'}])
ignore_corpus_pcoll = p | 'CreateIgnoreCorpus' >> beam.Create(['corpus1'])
ignore_word_pcoll = p | 'CreateIgnoreWord' >> beam.Create(['word1'])
groups = bigquery_side_input.create_groups(group_ids_pcoll,
corpus_pcoll,
words_pcoll,
ignore_corpus_pcoll,
ignore_word_pcoll)
assert_that(groups, equal_to(
[('A', 'corpus2', 'word2'),
('B', 'corpus2', 'word2'),
('C', 'corpus2', 'word2')]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
rachwong00/wong-byte2 | lib/itsdangerous.py | 296 | 30509 | # -*- coding: utf-8 -*-
"""
itsdangerous
~~~~~~~~~~~~
A module that implements various functions to deal with untrusted
sources. Mainly useful for web applications.
:copyright: (c) 2011 by Armin Ronacher and the Django Software Foundation.
:license: BSD, see LICENSE for more details.
"""
import sys
import hmac
import zlib
import time
import base64
import hashlib
import operator
from datetime import datetime
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode
int_to_byte = chr
number_types = (int, long, float)
else:
from functools import reduce
izip = zip
text_type = str
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
number_types = (int, float)
try:
import simplejson as json
except ImportError:
import json
class _CompactJSON(object):
"""Wrapper around simplejson that strips whitespace.
"""
def loads(self, payload):
return json.loads(payload)
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':'))
compact_json = _CompactJSON()
# 2011/01/01 in UTC
EPOCH = 1293840000
def want_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
s = s.encode(encoding, errors)
return s
def is_text_serializer(serializer):
"""Checks wheather a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
# Starting with 3.3 the standard library has a c-implementation for
# constant time string compares.
_builtin_constant_time_compare = getattr(hmac, 'compare_digest', None)
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right.
"""
if _builtin_constant_time_compare is not None:
return _builtin_constant_time_compare(val1, val2)
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the
base for all exceptions that itsdangerous is currently using.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode('utf-8')
class BadPayload(BadData):
"""This error is raised in situations when payload is loaded without
checking the signature first and an exception happend as a result of
that. The original exception that caused that will be stored on the
exception as :attr:`original_error`.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class BadSignature(BadData):
"""This error is raised if a signature does not match. As of
itsdangerous 0.14 there are helpful attributes on the exception
instances. You can also catch down the baseclass :exc:`BadData`.
"""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised for time based signatures that fail. This is a subclass
of :class:`BadSignature` so you can catch those down as well.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class SignatureExpired(BadTimeSignature):
"""Signature timestamp is older than required max_age. This is a
subclass of :exc:`BadTimeSignature` so you can use the baseclass for
catching the error.
"""
def base64_encode(string):
"""base64 encodes a single bytestring (and is tolerant to getting
called with a unicode string).
The resulting bytestring is safe for putting into URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
def base64_decode(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = want_bytes(string, encoding='ascii', errors='ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def int_to_bytes(num):
assert num >= 0
rv = []
while num:
rv.append(int_to_byte(num & 0xff))
num >>= 8
return b''.join(reversed(rv))
def bytes_to_int(bytestr):
return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0)
class SigningAlgorithm(object):
"""Subclasses of `SigningAlgorithm` have to implement `get_signature` to
provide signature generation functionality.
"""
def get_signature(self, key, value):
"""Returns the signature for the given key and value"""
raise NotImplementedError()
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected signature"""
return constant_time_compare(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""This class provides a algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key, value):
return b''
class HMACAlgorithm(SigningAlgorithm):
"""This class provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to sha1
#: but can be changed for any other function in the hashlib module.
default_digest_method = staticmethod(hashlib.sha1)
def __init__(self, digest_method=None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
def get_signature(self, key, value):
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
class Signer(object):
"""This class can sign bytes and unsign it and validate the signature
provided.
Salt can be used to namespace the hash, so that a signed string is only
valid for a given namespace. Leaving this at the default value or re-using
a salt value across different parts of your application where the same
signed value in one part can mean something different in another part
is a security risk.
See :ref:`the-salt` for an example of what the salt is doing and how you
can utilize it.
.. versionadded:: 0.14
`key_derivation` and `digest_method` were added as arguments to the
class constructor.
.. versionadded:: 0.18
`algorithm` was added as an argument to the class constructor.
"""
#: The digest method to use for the signer. This defaults to sha1 but can
#: be changed for any other function in the hashlib module.
#:
#: .. versionchanged:: 0.14
default_digest_method = staticmethod(hashlib.sha1)
#: Controls how the key is derived. The default is Django style
#: concatenation. Possible values are ``concat``, ``django-concat``
#: and ``hmac``. This is used for deriving a key from the secret key
#: with an added salt.
#:
#: .. versionadded:: 0.14
default_key_derivation = 'django-concat'
def __init__(self, secret_key, salt=None, sep='.', key_derivation=None,
digest_method=None, algorithm=None):
self.secret_key = want_bytes(secret_key)
self.sep = sep
self.salt = 'itsdangerous.Signer' if salt is None else salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm = algorithm
def derive_key(self):
"""This method is called to derive the key. If you're unhappy with
the default key derivation choices you can override them here.
Keep in mind that the key derivation in itsdangerous is not intended
to be used as a security method to make a complex key out of a short
password. Instead you should use large random secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
return self.digest_method(salt + b'signer' +
self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
return mac.digest()
elif self.key_derivation == 'none':
return self.secret_key
else:
raise TypeError('Unknown key derivation method')
def get_signature(self, value):
"""Returns the signature for the given value"""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
sig = base64_decode(sig)
return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature('Signature %r does not match' % sig,
payload=value)
def validate(self, signed_value):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False
class TimestampSigner(Signer):
"""Works like the regular :class:`Signer` but also records the time
of the signing and can be used to expire signatures. The unsign
method can rause a :exc:`SignatureExpired` method if the unsigning
failed because the signature is expired. This exception is a subclass
of :exc:`BadSignature`.
"""
def get_timestamp(self):
"""Returns the current timestamp. This implementation returns the
seconds since 1/1/2011. The function must return an integer.
"""
return int(time.time() - EPOCH)
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from `get_timestamp` into a
datetime object.
"""
return datetime.utcfromtimestamp(ts + EPOCH)
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`~Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If `return_timestamp` is set to `True`
the timestamp of the signature will be returned as naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b''
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in which
# we shouldn't have come except someone uses a time-based serializer
# on non-timestamp data, so catch that.
if not sep in result:
if sig_error:
raise sig_error
raise BadTimeSignature('timestamp missing', payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(text_type(sig_error), payload=value,
date_signed=timestamp)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but well. We handle it nonetheless
if timestamp is None:
raise BadTimeSignature('Malformed timestamp', payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp))
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules but is
slightly differently structured internally. If you want to change the
underlying implementation for parsing and loading you have to override the
:meth:`load_payload` and :meth:`dump_payload` functions.
This implementation uses simplejson if available for dumping and loading
and will fall back to the standard library's json module if it's not
available.
Starting with 0.14 you do not need to subclass this class in order to
switch out or customer the :class:`Signer`. You can instead also pass a
different class to the constructor as well as keyword arguments as
dictionary that should be forwarded::
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
.. versionchanged:: 0.14:
The `signer` and `signer_kwargs` parameters were added to the
constructor.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
def __init__(self, secret_key, salt=b'itsdangerous', serializer=None,
signer=None, signer_kwargs=None):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises :class:`BadPayload`
if the payload is not valid. The `serializer` parameter can be used to
override the serializer stored on the class. The encoded payload is
always byte based.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode('utf-8')
return serializer.loads(payload)
except Exception as e:
raise BadPayload('Could not load the payload because an '
'exception ocurred on unserializing the data',
original_error=e)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always a
bytestring. If the internal serializer is text based the value
will automatically be encoded to utf-8.
"""
return want_bytes(self.serializer.dumps(obj))
def make_signer(self, salt=None):
"""A method that creates a new instance of the signer to be used.
The default implementation uses the :class:`Signer` baseclass.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode('utf-8')
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
return self.load_payload(self.make_signer(salt).unsign(s))
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This is
potentially very dangerous to use depending on how your serializer
works. The return value is ``(signature_okay, payload)`` instead of
just the payload. The first item will be a boolean that indicates
if the signature is okay (``True``) or if it failed. This function
never fails.
Use it for debugging only and if you know that your serializer module
is not exploitable (eg: do not use it with a pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None,
load_payload_kwargs=None):
"""Lowlevel helper function to implement :meth:`loads_unsafe` in
serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return False, self.load_payload(e.payload,
**(load_payload_kwargs or {}))
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)
class TimedSerializer(Serializer):
"""Uses the :class:`TimestampSigner` instead of the default
:meth:`Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails. If a `max_age` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`SignatureExpired` is raised
which is a subclass of :exc:`BadSignature`. All arguments are
forwarded to the signer's :meth:`~TimestampSigner.unsign` method.
"""
base64d, timestamp = self.make_signer(salt) \
.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {'max_age': max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
class JSONWebSignatureSerializer(Serializer):
"""This serializer implements JSON Web Signature (JWS) support. Only
supports the JWS Compact Serialization.
"""
jws_algorithms = {
'HS256': HMACAlgorithm(hashlib.sha256),
'HS384': HMACAlgorithm(hashlib.sha384),
'HS512': HMACAlgorithm(hashlib.sha512),
'none': NoneAlgorithm(),
}
#: The default algorithm to use for signature generation
default_algorithm = 'HS256'
default_serializer = compact_json
def __init__(self, secret_key, salt=None, serializer=None,
signer=None, signer_kwargs=None, algorithm_name=None):
Serializer.__init__(self, secret_key, salt, serializer,
signer, signer_kwargs)
if algorithm_name is None:
algorithm_name = self.default_algorithm
self.algorithm_name = algorithm_name
self.algorithm = self.make_algorithm(algorithm_name)
def load_payload(self, payload, return_header=False):
payload = want_bytes(payload)
if b'.' not in payload:
raise BadPayload('No "." found in value')
base64d_header, base64d_payload = payload.split(b'.', 1)
try:
json_header = base64_decode(base64d_header)
json_payload = base64_decode(base64d_payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
header = Serializer.load_payload(self, json_header,
serializer=json)
if not isinstance(header, dict):
raise BadPayload('Header payload is not a JSON object')
payload = Serializer.load_payload(self, json_payload)
if return_header:
return payload, header
return payload
def dump_payload(self, header, obj):
base64d_header = base64_encode(self.serializer.dumps(header))
base64d_payload = base64_encode(self.serializer.dumps(obj))
return base64d_header + b'.' + base64d_payload
def make_algorithm(self, algorithm_name):
try:
return self.jws_algorithms[algorithm_name]
except KeyError:
raise NotImplementedError('Algorithm not supported')
def make_signer(self, salt=None, algorithm=None):
if salt is None:
salt = self.salt
key_derivation = 'none' if salt is None else None
if algorithm is None:
algorithm = self.algorithm
return self.signer(self.secret_key, salt=salt, sep='.',
key_derivation=key_derivation, algorithm=algorithm)
def make_header(self, header_fields):
header = header_fields.copy() if header_fields else {}
header['alg'] = self.algorithm_name
return header
def dumps(self, obj, salt=None, header_fields=None):
"""Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It
also allows for specifying additional fields to be included in the JWS
Header.
"""
header = self.make_header(header_fields)
signer = self.make_signer(salt, self.algorithm)
return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadSignature('Algorithm mismatch')
if return_header:
return payload, header
return payload
def loads_unsafe(self, s, salt=None, return_header=False):
kwargs = {'return_header': return_header}
return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
"""Works like the regular :class:`JSONWebSignatureSerializer` but also
records the time of the signing and can be used to expire signatures.
JWS currently does not specify this behavior but it mentions a possibility
extension like this in the spec. Expiry date is encoded into the header
similarily as specified in `draft-ietf-oauth-json-web-token
<http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_.
The unsign method can raise a :exc:`SignatureExpired` method if the
unsigning failed because the signature is expired. This exception is a
subclass of :exc:`BadSignature`.
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, expires_in=None, **kwargs):
JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
self.expires_in = expires_in
def make_header(self, header_fields):
header = JSONWebSignatureSerializer.make_header(self, header_fields)
iat = self.now()
exp = iat + self.expires_in
header['iat'] = iat
header['exp'] = exp
return header
def loads(self, s, salt=None, return_header=False):
payload, header = JSONWebSignatureSerializer.loads(
self, s, salt, return_header=True)
if 'exp' not in header:
raise BadSignature('Missing expiry date', payload=payload)
if not (isinstance(header['exp'], number_types)
and header['exp'] > 0):
raise BadSignature('expiry date is not an IntDate',
payload=payload)
if header['exp'] < self.now():
raise SignatureExpired('Signature expired', payload=payload,
date_signed=self.get_issue_date(header))
if return_header:
return payload, header
return payload
def get_issue_date(self, header):
rv = header.get('iat')
if isinstance(rv, number_types):
return datetime.utcfromtimestamp(int(rv))
def now(self):
return int(time.time())
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib compress
the string to make it shorter if necessary. It will also base64 encode
the string so that it can safely be placed in a URL.
"""
def load_payload(self, payload):
decompress = False
if payload.startswith(b'.'):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload('Could not zlib decompress the payload before '
'decoding the payload', original_error=e)
return super(URLSafeSerializerMixin, self).load_payload(json)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b'.' + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`TimedSerializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
| apache-2.0 |
dbs/rdflib | rdflib/plugins/sparql/results/txtresults.py | 3 | 1792 |
from rdflib import URIRef, BNode, Literal
from rdflib.query import ResultSerializer
def _termString(t, namespace_manager):
if t == None:
return "-"
if namespace_manager:
if isinstance(t, URIRef):
return namespace_manager.normalizeUri(t)
elif isinstance(t, BNode):
return t.n3()
elif isinstance(t, Literal):
return t._literal_n3(qname_callback=namespace_manager.normalizeUri)
else:
return t.n3()
class TXTResultSerializer(ResultSerializer):
"""
A write only QueryResult serializer for text/ascii tables
"""
def serialize(self, stream, encoding, namespace_manager = None):
"""
return a text table of query results
"""
def c(s, w):
"""
center the string s in w wide string
"""
w -= len(s)
h1 = h2 = w // 2
if w % 2: h2 += 1
return " " * h1 + s + " " * h2
if self.result.type!='SELECT':
raise Exception("Can only pretty print SELECT results!")
if not self.result:
return "(no results)\n"
else:
keys = sorted(self.result.vars)
maxlen = [0] * len(keys)
b = [[_termString(r[k], namespace_manager) for k in keys] for r in self.result]
for r in b:
for i in range(len(keys)):
maxlen[i] = max(maxlen[i], len(r[i]))
stream.write(
"|".join([c(k, maxlen[i]) for i, k in enumerate(keys)]) + "\n")
stream.write("-" * (len(maxlen)+sum(maxlen)) + "\n")
for r in sorted(b):
stream.write("|".join(
[t + " " * (i - len(t)) for i, t in zip(maxlen, r)]) + "\n")
| bsd-3-clause |
abzaloid/maps | django-project/lib/python2.7/site-packages/crispy_forms/tests/test_layout_objects.py | 10 | 14504 | # -*- coding: utf-8 -*-
import re
from django import forms
from django.template import loader, Context
from django.utils.translation import ugettext as _
from django.utils.translation import activate, deactivate
from .base import CrispyTestCase
from .forms import CheckboxesTestForm, TestForm
from crispy_forms.bootstrap import (
PrependedAppendedText, AppendedText, PrependedText, InlineRadios,
Tab, TabHolder, AccordionGroup, Accordion, Alert, InlineCheckboxes,
FieldWithButtons, StrictButton
)
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
Layout, HTML, Field, MultiWidgetField
)
from crispy_forms.utils import render_crispy_form
class TestLayoutObjects(CrispyTestCase):
def test_multiwidget_field(self):
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy form %}
""")
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
MultiWidgetField(
'datetime_field',
attrs=(
{'rel': 'test_dateinput'},
{'rel': 'test_timeinput', 'style': 'width: 30px;', 'type': "hidden"}
)
)
)
c = Context({'form': test_form})
html = template.render(c)
self.assertEqual(html.count('class="dateinput'), 1)
self.assertEqual(html.count('rel="test_dateinput"'), 1)
self.assertEqual(html.count('rel="test_timeinput"'), 1)
self.assertEqual(html.count('style="width: 30px;"'), 1)
self.assertEqual(html.count('type="hidden"'), 1)
def test_field_type_hidden(self):
template = loader.get_template_from_string(u"""
{% load crispy_forms_tags %}
{% crispy test_form %}
""")
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Field('email', type="hidden", data_test=12),
Field('datetime_field'),
)
c = Context({
'test_form': test_form,
})
html = template.render(c)
# Check form parameters
self.assertEqual(html.count('data-test="12"'), 1)
self.assertEqual(html.count('name="email"'), 1)
self.assertEqual(html.count('class="dateinput'), 1)
self.assertEqual(html.count('class="timeinput'), 1)
def test_field_wrapper_class(self):
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(Field('email', wrapper_class="testing"))
html = render_crispy_form(form)
if self.current_template_pack == 'bootstrap':
self.assertEqual(html.count('class="control-group testing"'), 1)
elif self.current_template_pack == 'bootstrap3':
self.assertEqual(html.count('class="form-group testing"'), 1)
def test_html_with_carriage_returns(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
HTML("""
if (a==b){
// some comment
a+1;
foo();
}
""")
)
html = render_crispy_form(test_form)
if self.current_template_pack == 'uni_form':
self.assertEqual(html.count('\n'), 22)
else:
self.assertEqual(html.count('\n'), 24)
def test_i18n(self):
activate('es')
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(
HTML(_("Enter a valid value."))
)
html = render_crispy_form(form)
self.assertTrue(u"Introduzca un valor correcto" in html)
deactivate()
class TestBootstrapLayoutObjects(CrispyTestCase):
def test_custom_django_widget(self):
class CustomRadioSelect(forms.RadioSelect):
pass
class CustomCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
pass
# Make sure an inherited RadioSelect gets rendered as it
form = CheckboxesTestForm()
form.fields['inline_radios'].widget = CustomRadioSelect()
form.helper = FormHelper()
form.helper.layout = Layout('inline_radios')
html = render_crispy_form(form)
self.assertTrue('class="radio"' in html)
# Make sure an inherited CheckboxSelectMultiple gets rendered as it
form.fields['checkboxes'].widget = CustomCheckboxSelectMultiple()
form.helper.layout = Layout('checkboxes')
html = render_crispy_form(form)
self.assertTrue('class="checkbox"' in html)
def test_prepended_appended_text(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
PrependedAppendedText('email', '@', 'gmail.com'),
AppendedText('password1', '#'),
PrependedText('password2', '$'),
)
html = render_crispy_form(test_form)
# Check form parameters
if self.current_template_pack == 'bootstrap':
self.assertEqual(html.count('<span class="add-on">@</span>'), 1)
self.assertEqual(html.count('<span class="add-on">gmail.com</span>'), 1)
self.assertEqual(html.count('<span class="add-on">#</span>'), 1)
self.assertEqual(html.count('<span class="add-on">$</span>'), 1)
if self.current_template_pack == 'bootstrap3':
self.assertEqual(html.count('<span class="input-group-addon">@</span>'), 1)
self.assertEqual(html.count('<span class="input-group-addon">gmail.com</span>'), 1)
self.assertEqual(html.count('<span class="input-group-addon">#</span>'), 1)
self.assertEqual(html.count('<span class="input-group-addon">$</span>'), 1)
def test_inline_radios(self):
test_form = CheckboxesTestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
InlineRadios('inline_radios')
)
html = render_crispy_form(test_form)
if self.current_template_pack == 'bootstrap':
self.assertEqual(html.count('radio inline"'), 2)
elif self.current_template_pack == 'bootstrap3':
self.assertEqual(html.count('radio-inline"'), 2)
def test_accordion_and_accordiongroup(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Accordion(
AccordionGroup(
'one',
'first_name'
),
AccordionGroup(
'two',
'password1',
'password2'
)
)
)
html = render_crispy_form(test_form)
if self.current_template_pack == 'bootstrap':
self.assertEqual(html.count('<div class="accordion"'), 1)
self.assertEqual(html.count('<div class="accordion-group">'), 2)
self.assertEqual(html.count('<div class="accordion-heading">'), 2)
else:
self.assertEqual(html.count('<div class="panel panel-default"'), 2)
self.assertEqual(html.count('<div class="panel-group"'), 1)
self.assertEqual(html.count('<div class="panel-heading">'), 2)
self.assertEqual(html.count('<div id="one"'), 1)
self.assertEqual(html.count('<div id="two"'), 1)
self.assertEqual(html.count('name="first_name"'), 1)
self.assertEqual(html.count('name="password1"'), 1)
self.assertEqual(html.count('name="password2"'), 1)
def test_alert(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Alert(content='Testing...')
)
html = render_crispy_form(test_form)
self.assertEqual(html.count('<div class="alert"'), 1)
self.assertEqual(html.count('<button type="button" class="close"'), 1)
self.assertEqual(html.count('Testing...'), 1)
def test_alert_block(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Alert(content='Testing...', block=True)
)
html = render_crispy_form(test_form)
self.assertEqual(html.count('<div class="alert alert-block"'), 1)
self.assertEqual(html.count('Testing...'), 1)
def test_tab_and_tab_holder(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
TabHolder(
Tab('one',
'first_name',
css_id="custom-name",
css_class="first-tab-class"
),
Tab('two',
'password1',
'password2'
)
)
)
html = render_crispy_form(test_form)
self.assertEqual(
html.count(
'<li class="tab-pane active"><a href="#custom-name" data-toggle="tab">One</a></li>'
),
1
)
self.assertEqual(html.count('class="tab-pane first-tab-class active"'), 1)
self.assertEqual(html.count('<li class="tab-pane'), 2)
self.assertEqual(html.count('tab-pane'), 4)
self.assertEqual(html.count('<div id="custom-name"'), 1)
self.assertEqual(html.count('<div id="two"'), 1)
self.assertEqual(html.count('name="first_name"'), 1)
self.assertEqual(html.count('name="password1"'), 1)
self.assertEqual(html.count('name="password2"'), 1)
def test_tab_helper_reuse(self):
# this is a proper form, according to the docs.
# note that the helper is a class property here,
# shared between all instances
class TestForm(forms.Form):
val1 = forms.CharField(required=False)
val2 = forms.CharField(required=True)
helper = FormHelper()
helper.layout = Layout(
TabHolder(
Tab('one', 'val1',),
Tab('two', 'val2',)
)
)
# first render of form => everything is fine
test_form = TestForm()
html = render_crispy_form(test_form)
# second render of form => first tab should be active,
# but not duplicate class
test_form = TestForm()
html = render_crispy_form(test_form)
self.assertEqual(html.count('class="tab-pane active active"'), 0)
# render a new form, now with errors
test_form = TestForm(data={'val1': 'foo'})
html = render_crispy_form(test_form)
# tab 1 should not be active
self.assertEqual(html.count('<div id="one" \n class="tab-pane active'), 0)
# tab 2 should be active
self.assertEqual(html.count('<div id="two" \n class="tab-pane active'), 1)
def test_radio_attrs(self):
form = CheckboxesTestForm()
form.fields['inline_radios'].widget.attrs = {'class': "first"}
form.fields['checkboxes'].widget.attrs = {'class': "second"}
html = render_crispy_form(form)
self.assertTrue('class="first"' in html)
self.assertTrue('class="second"' in html)
def test_field_with_buttons(self):
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(
FieldWithButtons(
Field('password1', css_class="span4"),
StrictButton("Go!", css_id="go-button"),
StrictButton("No!", css_class="extra"),
StrictButton("Test", type="submit", name="whatever", value="something"),
css_class="extra",
autocomplete="off"
)
)
html = render_crispy_form(form)
form_group_class = 'control-group'
if self.current_template_pack == 'bootstrap3':
form_group_class = 'form-group'
self.assertEqual(html.count('class="%s extra"' % form_group_class), 1)
self.assertEqual(html.count('autocomplete="off"'), 1)
self.assertEqual(html.count('class="span4'), 1)
self.assertEqual(html.count('id="go-button"'), 1)
self.assertEqual(html.count("Go!"), 1)
self.assertEqual(html.count("No!"), 1)
self.assertEqual(html.count('class="btn"'), 2)
self.assertEqual(html.count('class="btn extra"'), 1)
self.assertEqual(html.count('type="submit"'), 1)
self.assertEqual(html.count('name="whatever"'), 1)
self.assertEqual(html.count('value="something"'), 1)
if self.current_template_pack == 'bootstrap':
self.assertEqual(html.count('class="input-append"'), 1)
elif self.current_template_pack == 'bootstrap3':
self.assertEqual(html.count('class="input-group-btn'), 1)
# Make sure white spaces between buttons are there in bootstrap
self.assertEqual(len(re.findall(r'</button> <', html)), 3)
def test_hidden_fields(self):
form = TestForm()
# All fields hidden
for field in form.fields:
form.fields[field].widget = forms.HiddenInput()
form.helper = FormHelper()
form.helper.layout = Layout(
AppendedText('password1', 'foo'),
PrependedText('password2', 'bar'),
PrependedAppendedText('email', 'bar'),
InlineCheckboxes('first_name'),
InlineRadios('last_name'),
)
html = render_crispy_form(form)
self.assertEqual(html.count("<input"), 5)
self.assertEqual(html.count('type="hidden"'), 5)
self.assertEqual(html.count('<label'), 0)
def test_multiplecheckboxes(self):
test_form = CheckboxesTestForm()
html = render_crispy_form(test_form)
self.assertEqual(html.count('checked="checked"'), 6)
test_form.helper = FormHelper(test_form)
test_form.helper[1].wrap(InlineCheckboxes, inline=True)
html = render_crispy_form(test_form)
if self.current_template_pack == 'bootstrap':
self.assertEqual(html.count('checkbox inline"'), 3)
self.assertEqual(html.count('inline"'), 3)
elif self.current_template_pack == 'bootstrap3':
self.assertEqual(html.count('checkbox-inline"'), 3)
self.assertEqual(html.count('inline="True"'), 4)
| mit |
manazhao/tf_recsys | tensorflow/python/kernel_tests/softplus_op_test.py | 82 | 4807 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = softplus.eval()
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
def testGradient(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGrad(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad, [2, 5], x_init_value=x_init)
print("softplus (float) gradient of gradient err = ", err)
self.assertLess(err, 5e-5)
def testGradGradGrad(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
(grad_grad,) = gradients_impl.gradients(grad, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad_grad, [2, 5], x_init_value=x_init)
print("softplus (float) third-order gradient err = ", err)
self.assertLess(err, 5e-5)
def testWarnInts(self):
# Running the op triggers address sanitizer errors, so we just make it
nn_ops.softplus(constant_op.constant(7))
if __name__ == "__main__":
test.main()
| apache-2.0 |
163gal/Time-Line | timelinelib/wxgui/dialogs/eraseditor/view.py | 2 | 2804 | # Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from timelinelib.wxgui.dialogs.eraseditor.controller import ErasEditorDialogController
from timelinelib.wxgui.framework import Dialog
class ErasEditorDialog(Dialog):
"""
<BoxSizerVertical>
<ListBox name="lb_eras" border="ALL" proportion="1" height="250"
event_EVT_LISTBOX_DCLICK="on_dclick"
/>
<DialogButtonsEditAddRemoveCloseSizer border="LEFT|RIGHT|BOTTOM"
event_EVT_BUTTON__ID_ADD="on_add"
event_EVT_BUTTON__ID_REMOVE="on_remove"
event_EVT_BUTTON__ID_EDIT="on_edit"
/>
</BoxSizerVertical>
"""
def __init__(self, parent, db, config):
Dialog.__init__(self, ErasEditorDialogController, parent, {},
title=_("Edit Era's"), style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.controller.on_init(db, config)
def SetEras(self, eras):
for era in eras:
self.lb_eras.Append(era.get_name(), era)
if len(eras) > 0:
self.lb_eras.SetSelection(0)
self._EnableDisableButtons()
def GetSelectedEra(self):
return self.lb_eras.GetClientData(self.lb_eras.GetSelection())
def UpdateEra(self, era):
self.lb_eras.SetString(self.lb_eras.GetSelection(), era.get_name())
def AppendEra(self, era):
self.lb_eras.Append(era.get_name(), era)
self.lb_eras.Select(self.lb_eras.GetCount() - 1)
self._EnableDisableButtons()
def RemoveEra(self, era):
def select_era(inx):
if self.lb_eras.GetCount() == inx:
inx -= 1
if inx >= 0:
self.lb_eras.SetSelection(inx)
inx = self.lb_eras.GetSelection()
self.lb_eras.Delete(inx)
select_era(inx)
self._EnableDisableButtons()
def _EnableDisableButtons(self):
if self.lb_eras.GetCount() == 0:
self.btn_remove.Enable(False)
self.btn_edit.Enable(False)
else:
self.btn_remove.Enable(True)
self.btn_edit.Enable(True)
| gpl-3.0 |
iamOgunyinka/sproot | migrations/versions/6723848870f0_changed_examtaken_date_taken_from_date_.py | 1 | 2347 | """Changed ExamTaken.date_taken from Date to DateTime
Revision ID: 6723848870f0
Revises:
Create Date: 2017-11-24 14:25:44.434000
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '6723848870f0'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('courses', 'randomize_questions',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=False)
op.alter_column('exams_taken', 'date_taken',
existing_type=sa.DATE(),
type_=sa.DateTime(),
existing_nullable=False)
op.alter_column('exams_taken', 'other_data',
existing_type=sa.BLOB(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('users', 'is_active_premium',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=True)
op.alter_column('users', 'is_confirmed',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'is_confirmed',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=True)
op.alter_column('users', 'is_active_premium',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=True)
op.alter_column('exams_taken', 'other_data',
existing_type=sa.Text(),
type_=sa.BLOB(),
existing_nullable=True)
op.alter_column('exams_taken', 'date_taken',
existing_type=sa.DateTime(),
type_=sa.DATE(),
existing_nullable=False)
op.alter_column('courses', 'randomize_questions',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=False)
# ### end Alembic commands ###
| apache-2.0 |
ericzundel/pants | tests/python/pants_test/bin/test_repro_ignore.py | 11 | 2229 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from functools import partial
from pants.base.build_root import BuildRoot
from pants.bin.repro import Reproducer
from pants.fs.archive import TGZ
from pants.util.contextutil import pushd, temporary_dir
from pants_test.bin.repro_mixin import ReproMixin
from pants_test.subsystem.subsystem_util import global_subsystem_instance
class ReproOptionsTest(unittest.TestCase, ReproMixin):
def test_ignore_dir(self):
"""Verify that passing --repro-ignore option ignores the directory"""
# Buildroot is is based on your cwd so we need to step into a fresh
# directory for repro to look at.
root_instance = BuildRoot()
with temporary_dir() as build_root:
with root_instance.temporary(build_root):
with pushd(build_root):
with temporary_dir() as capture_dir:
add_file = partial(self.add_file, build_root)
add_file('pants.ini', '')
add_file('.git/foo', 'foo')
add_file('dist/bar', 'bar')
add_file('foo/bar', 'baz')
add_file('src/test1', 'test1')
add_file('src/test2', 'test1')
repro_file = os.path.join(capture_dir, 'repro.tar.gz')
options = {
Reproducer.options_scope: dict(
capture=repro_file,
ignore=['src'],
)}
repro_sub = global_subsystem_instance(Reproducer, options=options)
repro = repro_sub.create_repro() # This is normally called in pants_exe.
repro.capture(run_info_dict={})
extract_loc = os.path.join(capture_dir, 'extract')
TGZ.extract(repro_file, extract_loc)
assert_file = partial(self.assert_file, extract_loc)
assert_file('foo/bar', 'baz')
assert_not_exists = partial(self.assert_not_exists, extract_loc)
assert_not_exists('.git')
assert_not_exists('src')
| apache-2.0 |
styxit/HTPC-Manager | libs/cherrypy/test/test_routes.py | 42 | 2411 | import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import cherrypy
from cherrypy.test import helper
import nose
class RoutesDispatchTest(helper.CPWebCase):
def setup_server():
try:
import routes
except ImportError:
raise nose.SkipTest('Install routes to test RoutesDispatcher code')
class Dummy:
def index(self):
return "I said good day!"
class City:
def __init__(self, name):
self.name = name
self.population = 10000
def index(self, **kwargs):
return "Welcome to %s, pop. %s" % (self.name, self.population)
index._cp_config = {'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Language', 'en-GB')]}
def update(self, **kwargs):
self.population = kwargs['pop']
return "OK"
d = cherrypy.dispatch.RoutesDispatcher()
d.connect(action='index', name='hounslow', route='/hounslow',
controller=City('Hounslow'))
d.connect(name='surbiton', route='/surbiton', controller=City('Surbiton'),
action='index', conditions=dict(method=['GET']))
d.mapper.connect('/surbiton', controller='surbiton',
action='update', conditions=dict(method=['POST']))
d.connect('main', ':action', controller=Dummy())
conf = {'/': {'request.dispatch': d}}
cherrypy.tree.mount(root=None, config=conf)
setup_server = staticmethod(setup_server)
def test_Routes_Dispatch(self):
self.getPage("/hounslow")
self.assertStatus("200 OK")
self.assertBody("Welcome to Hounslow, pop. 10000")
self.getPage("/foo")
self.assertStatus("404 Not Found")
self.getPage("/surbiton")
self.assertStatus("200 OK")
self.assertBody("Welcome to Surbiton, pop. 10000")
self.getPage("/surbiton", method="POST", body="pop=1327")
self.assertStatus("200 OK")
self.assertBody("OK")
self.getPage("/surbiton")
self.assertStatus("200 OK")
self.assertHeader("Content-Language", "en-GB")
self.assertBody("Welcome to Surbiton, pop. 1327")
| mit |
bop/foundation | lib/python2.7/site-packages/django/contrib/localflavor/id/forms.py | 87 | 7181 | """
ID-specific Form helpers
"""
from __future__ import absolute_import
import re
import time
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
postcode_re = re.compile(r'^[1-9]\d{4}$')
phone_re = re.compile(r'^(\+62|0)[2-9]\d{7,10}$')
plate_re = re.compile(r'^(?P<prefix>[A-Z]{1,2}) ' + \
r'(?P<number>\d{1,5})( (?P<suffix>([A-Z]{1,3}|[1-9][0-9]{,2})))?$')
nik_re = re.compile(r'^\d{16}$')
class IDPostCodeField(Field):
"""
An Indonesian post code field.
http://id.wikipedia.org/wiki/Kode_pos
"""
default_error_messages = {
'invalid': _('Enter a valid post code'),
}
def clean(self, value):
super(IDPostCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.strip()
if not postcode_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) < 10110:
raise ValidationError(self.error_messages['invalid'])
# 1xxx0
if value[0] == '1' and value[4] != '0':
raise ValidationError(self.error_messages['invalid'])
return u'%s' % (value, )
class IDProvinceSelect(Select):
"""
A Select widget that uses a list of provinces of Indonesia as its
choices.
"""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from django.contrib.localflavor.id.id_choices import PROVINCE_CHOICES
super(IDProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class IDPhoneNumberField(Field):
"""
An Indonesian telephone number field.
http://id.wikipedia.org/wiki/Daftar_kode_telepon_di_Indonesia
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
}
def clean(self, value):
super(IDPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
phone_number = re.sub(r'[\-\s\(\)]', '', smart_unicode(value))
if phone_re.search(phone_number):
return smart_unicode(value)
raise ValidationError(self.error_messages['invalid'])
class IDLicensePlatePrefixSelect(Select):
"""
A Select widget that uses a list of vehicle license plate prefix code
of Indonesia as its choices.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
"""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from django.contrib.localflavor.id.id_choices import LICENSE_PLATE_PREFIX_CHOICES
super(IDLicensePlatePrefixSelect, self).__init__(attrs,
choices=LICENSE_PLATE_PREFIX_CHOICES)
class IDLicensePlateField(Field):
"""
An Indonesian vehicle license plate field.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
Plus: "B 12345 12"
"""
default_error_messages = {
'invalid': _('Enter a valid vehicle license plate number'),
}
def clean(self, value):
# Load data in memory only when it is required, see also #17275
from django.contrib.localflavor.id.id_choices import LICENSE_PLATE_PREFIX_CHOICES
super(IDLicensePlateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
plate_number = re.sub(r'\s+', ' ',
smart_unicode(value.strip())).upper()
matches = plate_re.search(plate_number)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure prefix is in the list of known codes.
prefix = matches.group('prefix')
if prefix not in [choice[0] for choice in LICENSE_PLATE_PREFIX_CHOICES]:
raise ValidationError(self.error_messages['invalid'])
# Only Jakarta (prefix B) can have 3 letter suffix.
suffix = matches.group('suffix')
if suffix is not None and len(suffix) == 3 and prefix != 'B':
raise ValidationError(self.error_messages['invalid'])
# RI plates don't have suffix.
if prefix == 'RI' and suffix is not None and suffix != '':
raise ValidationError(self.error_messages['invalid'])
# Number can't be zero.
number = matches.group('number')
if number == '0':
raise ValidationError(self.error_messages['invalid'])
# CD, CC and B 12345 12
if len(number) == 5 or prefix in ('CD', 'CC'):
# suffix must be numeric and non-empty
if re.match(r'^\d+$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
# Known codes range is 12-124
if prefix in ('CD', 'CC') and not (12 <= int(number) <= 124):
raise ValidationError(self.error_messages['invalid'])
if len(number) == 5 and not (12 <= int(suffix) <= 124):
raise ValidationError(self.error_messages['invalid'])
else:
# suffix must be non-numeric
if suffix is not None and re.match(r'^[A-Z]{,3}$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
return plate_number
class IDNationalIdentityNumberField(Field):
"""
An Indonesian national identity number (NIK/KTP#) field.
http://id.wikipedia.org/wiki/Nomor_Induk_Kependudukan
xx.xxxx.ddmmyy.xxxx - 16 digits (excl. dots)
"""
default_error_messages = {
'invalid': _('Enter a valid NIK/KTP number'),
}
def clean(self, value):
super(IDNationalIdentityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub(r'[\s.]', '', smart_unicode(value))
if not nik_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'])
def valid_nik_date(year, month, day):
try:
t1 = (int(year), int(month), int(day), 0, 0, 0, 0, 0, -1)
d = time.mktime(t1)
t2 = time.localtime(d)
if t1[:3] != t2[:3]:
return False
else:
return True
except (OverflowError, ValueError):
return False
year = int(value[10:12])
month = int(value[8:10])
day = int(value[6:8])
current_year = time.localtime().tm_year
if year < int(str(current_year)[-2:]):
if not valid_nik_date(2000 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
elif not valid_nik_date(1900 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
if value[:6] == '000000' or value[12:] == '0000':
raise ValidationError(self.error_messages['invalid'])
return '%s.%s.%s.%s' % (value[:2], value[2:6], value[6:12], value[12:])
| gpl-2.0 |
kantlove/flask-simple-page | Lib/site-packages/werkzeug/contrib/wrappers.py | 295 | 10331 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
try:
from simplejson import loads
except ImportError:
from json import loads
from werkzeug.exceptions import BadRequest
from werkzeug.utils import cached_property
from werkzeug.http import dump_options_header, parse_options_header
from werkzeug._compat import wsgi_decoding_dance
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(object):
"""Add json method to a request object. This will parse the input data
through simplejson if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not json or if the data itself cannot be parsed as json.
"""
@cached_property
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a JSON request')
try:
return loads(self.data)
except Exception:
raise BadRequest('Unable to read JSON request')
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: http://code.google.com/p/protobuf/
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a Protobuf request')
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
"""
def _get_routing_args(self):
return self.environ.get('wsgiorg.routing_args', (()))[0]
def _set_routing_args(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
routing_args = property(_get_routing_args, _set_routing_args, doc='''
The positional URL arguments as `tuple`.''')
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
rv = self.environ.get('wsgiorg.routing_args')
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
The keyword URL arguments as `dict`.''')
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return path.lstrip('/')
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return path.rstrip('/') + '/'
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = 'latin1'
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return 'latin1'
@cached_property
def charset(self):
"""The charset from the content type."""
header = self.environ.get('CONTENT_TYPE')
if header:
ct, options = parse_options_header(header)
charset = options.get('charset')
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = 'utf-8'
def _get_charset(self):
header = self.headers.get('content-type')
if header:
charset = parse_options_header(header)[1].get('charset')
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
header = self.headers.get('content-type')
ct, options = parse_options_header(header)
if not ct:
raise TypeError('Cannot set charset if Content-Type '
'header is missing.')
options['charset'] = charset
self.headers['Content-Type'] = dump_options_header(ct, options)
charset = property(_get_charset, _set_charset, doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""")
del _get_charset, _set_charset
| mit |
fujita/ryu | ryu/lib/packet/packet.py | 4 | 6138 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import struct
import base64
import six
from . import packet_base
from . import ethernet
from ryu import utils
from ryu.lib.stringify import StringifyMixin
# Packet class dictionary
mod = inspect.getmembers(utils.import_module("ryu.lib.packet"),
lambda cls: (inspect.ismodule(cls)))
cls_list = []
for _, m in mod:
cl = inspect.getmembers(m,
lambda cls: (
inspect.isclass(cls) and
issubclass(cls, packet_base.PacketBase)))
cls_list.extend(list(cl))
PKT_CLS_DICT = dict(cls_list)
class Packet(StringifyMixin):
"""A packet decoder/encoder class.
An instance is used to either decode or encode a single packet.
*data* is a bytearray to describe a raw datagram to decode.
When decoding, a Packet object is iteratable.
Iterated values are protocol (ethernet, ipv4, ...) headers and the payload.
Protocol headers are instances of subclass of packet_base.PacketBase.
The payload is a bytearray. They are iterated in on-wire order.
*data* should be omitted when encoding a packet.
"""
# Ignore data field when outputting json representation.
_base_attributes = ['data']
def __init__(self, data=None, protocols=None, parse_cls=ethernet.ethernet):
super(Packet, self).__init__()
self.data = data
if protocols is None:
self.protocols = []
else:
self.protocols = protocols
if self.data:
self._parser(parse_cls)
def _parser(self, cls):
rest_data = self.data
while cls:
# Ignores an empty buffer
if not six.binary_type(rest_data).strip(b'\x00'):
break
try:
proto, cls, rest_data = cls.parser(rest_data)
except struct.error:
break
if proto:
self.protocols.append(proto)
# If rest_data is all padding, we ignore rest_data
if rest_data and six.binary_type(rest_data).strip(b'\x00'):
self.protocols.append(rest_data)
def serialize(self):
"""Encode a packet and store the resulted bytearray in self.data.
This method is legal only when encoding a packet.
"""
self.data = bytearray()
r = self.protocols[::-1]
for i, p in enumerate(r):
if isinstance(p, packet_base.PacketBase):
if i == len(r) - 1:
prev = None
else:
prev = r[i + 1]
data = p.serialize(self.data, prev)
else:
data = six.binary_type(p)
self.data = bytearray(data + self.data)
@classmethod
def from_jsondict(cls, dict_, decode_string=base64.b64decode,
**additional_args):
protocols = []
for proto in dict_['protocols']:
for key, value in proto.items():
if key in PKT_CLS_DICT:
pkt_cls = PKT_CLS_DICT[key]
protocols.append(pkt_cls.from_jsondict(value))
else:
raise ValueError('unknown protocol name %s' % key)
return cls(protocols=protocols)
def add_protocol(self, proto):
"""Register a protocol *proto* for this packet.
This method is legal only when encoding a packet.
When encoding a packet, register a protocol (ethernet, ipv4, ...)
header to add to this packet.
Protocol headers should be registered in on-wire order before calling
self.serialize.
"""
self.protocols.append(proto)
def get_protocols(self, protocol):
"""Returns a list of protocols that matches to the specified protocol.
"""
if isinstance(protocol, packet_base.PacketBase):
protocol = protocol.__class__
assert issubclass(protocol, packet_base.PacketBase)
return [p for p in self.protocols if isinstance(p, protocol)]
def get_protocol(self, protocol):
"""Returns the firstly found protocol that matches to the
specified protocol.
"""
result = self.get_protocols(protocol)
if len(result) > 0:
return result[0]
return None
def __div__(self, trailer):
self.add_protocol(trailer)
return self
def __truediv__(self, trailer):
return self.__div__(trailer)
def __iter__(self):
return iter(self.protocols)
def __getitem__(self, idx):
return self.protocols[idx]
def __setitem__(self, idx, item):
self.protocols[idx] = item
def __delitem__(self, idx):
del self.protocols[idx]
def __len__(self):
return len(self.protocols)
def __contains__(self, protocol):
if (inspect.isclass(protocol) and
issubclass(protocol, packet_base.PacketBase)):
return protocol in [p.__class__ for p in self.protocols]
return protocol in self.protocols
def __str__(self):
return ', '.join(repr(protocol) for protocol in self.protocols)
__repr__ = __str__ # note: str(list) uses __repr__ for elements
# XXX: Hack for preventing recursive import
def _PacketBase__div__(self, trailer):
pkt = Packet()
pkt.add_protocol(self)
pkt.add_protocol(trailer)
return pkt
packet_base.PacketBase.__div__ = _PacketBase__div__
packet_base.PacketBase.__truediv__ = _PacketBase__div__
| apache-2.0 |
plxaye/chromium | src/remoting/tools/register_host.py | 49 | 3100 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Registers new hosts in chromoting directory.
It asks for username/password and then writes these settings to config file.
"""
import base64
import getpass
import hashlib
import hmac
import json
import os
import random
import socket
import sys
import urllib
import urllib2
import gaia_auth
import keygen
def random_uuid():
return ("%04x%04x-%04x-%04x-%04x-%04x%04x%04x" %
tuple(map(lambda x: random.randrange(0,65536), range(8))))
def main():
server = 'www.googleapis.com'
url = 'https://' + server + '/chromoting/v1/@me/hosts'
settings_filepath = os.path.join(os.path.expanduser('~'),
'.ChromotingConfig.json')
print "Email:",
email = raw_input()
password = getpass.getpass("Password: ")
chromoting_auth = gaia_auth.GaiaAuthenticator('chromoting')
auth_token = chromoting_auth.authenticate(email, password)
host_id = random_uuid()
print "HostId:", host_id
host_name = socket.gethostname()
print "HostName:", host_name
print "Generating RSA key pair...",
(private_key, public_key) = keygen.generateRSAKeyPair()
print "Done"
while 1:
pin = getpass.getpass("Host PIN: ")
if len(pin) < 4:
print "PIN must be at least 4 characters long."
continue
pin2 = getpass.getpass("Confirm host PIN: ")
if pin2 != pin:
print "PINs didn't match. Please try again."
continue
break
host_secret_hash = "hmac:" + base64.b64encode(
hmac.new(str(host_id), pin, hashlib.sha256).digest())
params = { "data": {
"hostId": host_id,
"hostName": host_name,
"publicKey": public_key,
} }
headers = {"Authorization": "GoogleLogin auth=" + auth_token,
"Content-Type": "application/json" }
request = urllib2.Request(url, json.dumps(params), headers)
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
print
print "Registering host with directory service..."
try:
res = urllib2.urlopen(request)
data = res.read()
except urllib2.HTTPError, err:
print >> sys.stderr, "Directory returned error:", err
print >> sys.stderr, err.fp.read()
return 1
print "Done"
# Get token that the host will use to athenticate in talk network.
authenticator = gaia_auth.GaiaAuthenticator('chromiumsync');
auth_token = authenticator.authenticate(email, password)
# Write settings file.
os.umask(0066) # Set permission mask for created file.
settings_file = open(settings_filepath, 'w')
config = {
"xmpp_login" : email,
"xmpp_auth_token" : auth_token,
"host_id" : host_id,
"host_name" : host_name,
"host_secret_hash": host_secret_hash,
"private_key" : private_key,
}
settings_file.write(json.dumps(config, indent=2))
settings_file.close()
print 'Configuration saved in', settings_filepath
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
donkirkby/django | django/core/management/commands/startapp.py | 513 | 1040 | from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
class Command(TemplateCommand):
help = ("Creates a Django app directory structure for the given app "
"name in the current directory or optionally in the given "
"directory.")
missing_args_message = "You must provide an application name."
def handle(self, **options):
app_name, target = options.pop('name'), options.pop('directory')
self.validate_name(app_name, "app")
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as an app "
"name. Please try another name." % app_name)
super(Command, self).handle('app', app_name, target, **options)
| bsd-3-clause |
davidam/python-examples | webscraping/requests/downloadUrls.py | 1 | 1276 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from pprint import pprint
import requests, random, re, os
from lxml import html
#https://elpais.com/hemeroteca/elpais/2017/01/01/m/portada.html
def downloadOneUrl(url, name):
res = requests.get(url)
file = open(name, "w")
file.write(res.text)
def downloadUrls(url, directory):
if (os.path.exists(directory)):
os.chdir(directory)
else:
os.makedirs(directory)
page = requests.get(url)
tree = html.fromstring(page.content)
hrefs = tree.xpath('//a//@href')
pprint(hrefs)
# match = re.search(r'http[s]?://(www\.)?([a-z]*\.)?elpais\b', h)
for h in hrefs:
match = re.search(r'http[s]?://(www\.)?([a-z]*\.)?elpais\b', h)
if match:
print(h) # match.group() ## 'found word:cat'
# r = random.randrange(0, 100, 2)
# downloadOneUrl(h, r)
# res = requests.get(url)
# file = open("elpais-"+str(r)+".html", "w")
# file.write(res.text)
url = 'https://elpais.com/hemeroteca/elpais/2017/01/01/m/portada.html'
downloadOneUrl(url, "elpais-01.html")
downloadUrls(url, "https://elpais.com/hemeroteca/elpais/2017/01/01/m/portada.html")
#downloadUrls(url, "tmp")
| gpl-3.0 |
Shekharrajak/Django-facebook | docs/docs_env/Lib/site-packages/pip-1.0-py2.5.egg/pip/commands/install.py | 34 | 10230 | import os, sys
from pip.req import InstallRequirement, RequirementSet
from pip.req import parse_requirements
from pip.log import logger
from pip.locations import build_prefix, src_prefix
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import InstallationError
class InstallCommand(Command):
name = 'install'
usage = '%prog [OPTIONS] PACKAGE_NAMES...'
summary = 'Install packages'
bundle = False
def __init__(self):
super(InstallCommand, self).__init__()
self.parser.add_option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='VCS+REPOS_URL[@REV]#egg=PACKAGE',
help='Install a package directly from a checkout. Source will be checked '
'out into src/PACKAGE (lower-case) and installed in-place (using '
'setup.py develop). You can run this on an existing directory/checkout (like '
'pip install -e src/mycheckout). This option may be provided multiple times. '
'Possible values for VCS are: svn, git, hg and bzr.')
self.parser.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='FILENAME',
help='Install all the packages listed in the given requirements file. '
'This option can be used multiple times.')
self.parser.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL to look for packages at')
self.parser.add_option(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default='http://pypi.python.org/simple/',
help='Base URL of Python Package Index (default %default)')
self.parser.add_option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url')
self.parser.add_option(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead)')
self.parser.add_option(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help='Use the PyPI mirrors as a fallback in case the main index is down.')
self.parser.add_option(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help='Specific mirror URLs to query when --use-mirrors is used')
self.parser.add_option(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='DIR',
default=None,
help='Unpack packages into DIR (default %s) and build from there' % build_prefix)
self.parser.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='DIR',
default=None,
help='Download packages into DIR instead of installing them')
self.parser.add_option(
'--download-cache',
dest='download_cache',
metavar='DIR',
default=None,
help='Cache downloaded packages in DIR')
self.parser.add_option(
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='DIR',
default=None,
help='Check out --editable packages into DIR (default %s)' % src_prefix)
self.parser.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all packages to the newest available version')
self.parser.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead)')
self.parser.add_option(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help='Ignore package dependencies')
self.parser.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="Download and unpack all packages, but don't actually install them")
self.parser.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="Don't download any packages, just install the ones already downloaded "
"(completes an install run with --no-install)")
self.parser.add_option(
'--install-option',
dest='install_options',
action='append',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/bin\"). "
"Use multiple --install-option options to pass multiple options to setup.py install. "
"If you are using an option with a directory path, be sure to use absolute path.")
self.parser.add_option(
'--global-option',
dest='global_options',
action='append',
help="Extra global options to be supplied to the setup.py"
"call before the install command")
self.parser.add_option(
'--user',
dest='use_user_site',
action='store_true',
help='Install to user-site')
def _build_package_finder(self, options, index_urls):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_mirrors=options.use_mirrors,
mirrors=options.mirrors)
def run(self, options, args):
if not options.build_dir:
options.build_dir = build_prefix
if not options.src_dir:
options.src_dir = src_prefix
if options.download_dir:
options.no_install = True
options.ignore_installed = True
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
install_options.append('--user')
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
finder = self._build_package_finder(options, index_urls)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
download_cache=options.download_cache,
upgrade=options.upgrade,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(name, default_vcs=options.default_vcs))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder, options=options):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
if options.find_links:
raise InstallationError('You must give at least one '
'requirement to %s (maybe you meant "pip install %s"?)'
% (self.name, " ".join(options.find_links)))
raise InstallationError('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % dict(name=self.name))
if (options.use_user_site and
sys.version_info < (2, 6)):
raise InstallationError('--user is only supported in Python version 2.6 and newer')
import setuptools
if (options.use_user_site and
requirement_set.has_editables and
not getattr(setuptools, '_distribute', False)):
raise InstallationError('--user --editable not supported with setuptools, use distribute')
if not options.no_download:
requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle)
else:
requirement_set.locate_files()
if not options.no_install and not self.bundle:
requirement_set.install(install_options, global_options)
installed = ' '.join([req.name for req in
requirement_set.successfully_installed])
if installed:
logger.notify('Successfully installed %s' % installed)
elif not self.bundle:
downloaded = ' '.join([req.name for req in
requirement_set.successfully_downloaded])
if downloaded:
logger.notify('Successfully downloaded %s' % downloaded)
elif self.bundle:
requirement_set.create_bundle(self.bundle_filename)
logger.notify('Created bundle in %s' % self.bundle_filename)
# Clean up
if not options.no_install:
requirement_set.cleanup_files(bundle=self.bundle)
return requirement_set
InstallCommand()
| bsd-3-clause |
AkA84/edx-platform | lms/djangoapps/instructor/views/tools.py | 127 | 8635 | """
Tools for the instructor dashboard
"""
import dateutil
import json
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponseBadRequest
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from courseware.models import StudentFieldOverride
from courseware.field_overrides import disable_overrides
from courseware.student_field_overrides import (
clear_override_for_user,
get_override_for_user,
override_field_for_user,
)
from xmodule.fields import Date
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import UsageKey
from bulk_email.models import CourseAuthorization
DATE_FIELD = Date()
class DashboardError(Exception):
"""
Errors arising from use of the instructor dashboard.
"""
def response(self):
"""
Generate an instance of HttpResponseBadRequest for this error.
"""
error = unicode(self)
return HttpResponseBadRequest(json.dumps({'error': error}))
def handle_dashboard_error(view):
"""
Decorator which adds seamless DashboardError handling to a view. If a
DashboardError is raised during view processing, an HttpResponseBadRequest
is sent back to the client with JSON data about the error.
"""
def wrapper(request, course_id):
"""
Wrap the view.
"""
try:
return view(request, course_id=course_id)
except DashboardError, error:
return error.response()
return wrapper
def bulk_email_is_enabled_for_course(course_id):
"""
Staff can only send bulk email for a course if all the following conditions are true:
1. Bulk email feature flag is on.
2. It is a studio course.
3. Bulk email is enabled for the course.
"""
bulk_email_enabled_globally = (settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] is True)
is_studio_course = (modulestore().get_modulestore_type(course_id) != ModuleStoreEnum.Type.xml)
bulk_email_enabled_for_course = CourseAuthorization.instructor_email_enabled(course_id)
if bulk_email_enabled_globally and is_studio_course and bulk_email_enabled_for_course:
return True
return False
def strip_if_string(value):
if isinstance(value, basestring):
return value.strip()
return value
def get_student_from_identifier(unique_student_identifier):
"""
Gets a student object using either an email address or username.
Returns the student object associated with `unique_student_identifier`
Raises User.DoesNotExist if no user object can be found.
"""
unique_student_identifier = strip_if_string(unique_student_identifier)
if "@" in unique_student_identifier:
student = User.objects.get(email=unique_student_identifier)
else:
student = User.objects.get(username=unique_student_identifier)
return student
def require_student_from_identifier(unique_student_identifier):
"""
Same as get_student_from_identifier() but will raise a DashboardError if
the student does not exist.
"""
try:
return get_student_from_identifier(unique_student_identifier)
except User.DoesNotExist:
raise DashboardError(
_("Could not find student matching identifier: {student_identifier}").format(
student_identifier=unique_student_identifier
)
)
def parse_datetime(datestr):
"""
Convert user input date string into an instance of `datetime.datetime` in
UTC.
"""
try:
return dateutil.parser.parse(datestr).replace(tzinfo=utc)
except ValueError:
raise DashboardError(_("Unable to parse date: ") + datestr)
def find_unit(course, url):
"""
Finds the unit (block, module, whatever the terminology is) with the given
url in the course tree and returns the unit. Raises DashboardError if no
unit is found.
"""
def find(node, url):
"""
Find node in course tree for url.
"""
if node.location.to_deprecated_string() == url:
return node
for child in node.get_children():
found = find(child, url)
if found:
return found
return None
unit = find(course, url)
if unit is None:
raise DashboardError(_("Couldn't find module for url: {0}").format(url))
return unit
def get_units_with_due_date(course):
"""
Returns all top level units which have due dates. Does not return
descendents of those nodes.
"""
units = []
def visit(node):
"""
Visit a node. Checks to see if node has a due date and appends to
`units` if it does. Otherwise recurses into children to search for
nodes with due dates.
"""
if getattr(node, 'due', None):
units.append(node)
else:
for child in node.get_children():
visit(child)
visit(course)
#units.sort(key=_title_or_url)
return units
def title_or_url(node):
"""
Returns the `display_name` attribute of the passed in node of the course
tree, if it has one. Otherwise returns the node's url.
"""
title = getattr(node, 'display_name', None)
if not title:
title = node.location.to_deprecated_string()
return title
def set_due_date_extension(course, unit, student, due_date):
"""
Sets a due date extension. Raises DashboardError if the unit or extended
due date is invalid.
"""
if due_date:
# Check that the new due date is valid:
with disable_overrides():
original_due_date = getattr(unit, 'due', None)
if not original_due_date:
raise DashboardError(_("Unit {0} has no due date to extend.").format(unit.location))
if due_date < original_due_date:
raise DashboardError(_("An extended due date must be later than the original due date."))
override_field_for_user(student, unit, 'due', due_date)
else:
# We are deleting a due date extension. Check that it exists:
if not get_override_for_user(student, unit, 'due'):
raise DashboardError(_("No due date extension is set for that student and unit."))
clear_override_for_user(student, unit, 'due')
def dump_module_extensions(course, unit):
"""
Dumps data about students with due date extensions for a particular module,
specified by 'url', in a particular course.
"""
data = []
header = [_("Username"), _("Full Name"), _("Extended Due Date")]
query = StudentFieldOverride.objects.filter(
course_id=course.id,
location=unit.location,
field='due')
for override in query:
due = DATE_FIELD.from_json(json.loads(override.value))
due = due.strftime("%Y-%m-%d %H:%M")
fullname = override.student.profile.name
data.append(dict(zip(
header,
(override.student.username, fullname, due))))
data.sort(key=lambda x: x[header[0]])
return {
"header": header,
"title": _("Users with due date extensions for {0}").format(
title_or_url(unit)),
"data": data
}
def dump_student_extensions(course, student):
"""
Dumps data about the due date extensions granted for a particular student
in a particular course.
"""
data = []
header = [_("Unit"), _("Extended Due Date")]
units = get_units_with_due_date(course)
units = {u.location: u for u in units}
query = StudentFieldOverride.objects.filter(
course_id=course.id,
student=student,
field='due')
for override in query:
location = override.location.replace(course_key=course.id)
if location not in units:
continue
due = DATE_FIELD.from_json(json.loads(override.value))
due = due.strftime("%Y-%m-%d %H:%M")
title = title_or_url(units[location])
data.append(dict(zip(header, (title, due))))
return {
"header": header,
"title": _("Due date extensions for {0} {1} ({2})").format(
student.first_name, student.last_name, student.username),
"data": data}
def add_block_ids(payload):
"""
rather than manually parsing block_ids from module_ids on the client, pass the block_ids explicitly in the payload
"""
if 'data' in payload:
for ele in payload['data']:
if 'module_id' in ele:
ele['block_id'] = UsageKey.from_string(ele['module_id']).block_id
| agpl-3.0 |
mfarhan12/pyrf | examples/calculate_channel_power.py | 1 | 1477 | #!/usr/bin/env python
from pyrf.devices.thinkrf import WSA
from pyrf.sweep_device import SweepDevice
from pyrf.numpy_util import calculate_channel_power
import sys
import time
import math
from matplotlib.pyplot import plot, figure, axis, xlabel, ylabel, show
import numpy as np
def smooth(list,degree=1):
new_list = []
list_average = np.mean(sorted(list)[int(0.995 * len(list)):-1]) + 5
for n, i in enumerate(list):
start = max(0, n - degree)
stop = min(len(list), n + degree)
points = list[start:stop]
if list[n] > list_average:
new_list.append(list[n])
else:
new_list.append(np.mean(points))
return new_list
# declare sweep constants
START_FREQ = 50e6
STOP_FREQ = 27e9
RBW = 100e3
VBW = 30e3
# connect to wsa
dut = WSA()
dut.connect(sys.argv[1])
dut.request_read_perm()
# declare sweep device
sd = SweepDevice(dut)
# read the spectral data
fstart, fstop, spectra_data = sd.capture_power_spectrum(START_FREQ, STOP_FREQ, RBW,
{'attenuator':0})
# apply the VBW algorith
spectra_data = smooth(spectra_data, max(1, RBW/VBW))
# calculate the channel power
linear = np.power(10, np.divide(spectra_data,20))
channel_power = 10 * np.log10(np.sum(np.square(linear)))
print channel_power
fig = figure(1)
xvalues = np.linspace(fstart, fstop, len(spectra_data))
xlabel("Frequency")
ylabel("Amplitude")
# plot something
plot(xvalues, spectra_data, color='blue')
# show graph
show() | bsd-3-clause |
Ballz0fSteel/Umeko | lib/pip/_vendor/distro.py | 330 | 38349 | # Copyright 2015,2016 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``distro`` package (``distro`` stands for Linux Distribution) provides
information about the Linux distribution it runs on, such as a reliable
machine-readable distro ID, or version information.
It is a renewed alternative implementation for Python's original
:py:func:`platform.linux_distribution` function, but it provides much more
functionality. An alternative implementation became necessary because Python
3.5 deprecated this function, and Python 3.7 is expected to remove it
altogether. Its predecessor function :py:func:`platform.dist` was already
deprecated since Python 2.6 and is also expected to be removed in Python 3.7.
Still, there are many cases in which access to Linux distribution information
is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
more information.
"""
import os
import re
import sys
import json
import shlex
import logging
import subprocess
if not sys.platform.startswith('linux'):
raise ImportError('Unsupported platform: {0}'.format(sys.platform))
_UNIXCONFDIR = '/etc'
_OS_RELEASE_BASENAME = 'os-release'
#: Translation table for normalizing the "ID" attribute defined in os-release
#: files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as defined in the os-release file, translated to lower case,
#: with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_OS_ID = {}
#: Translation table for normalizing the "Distributor ID" attribute returned by
#: the lsb_release command, for use by the :func:`distro.id` method.
#:
#: * Key: Value as returned by the lsb_release command, translated to lower
#: case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_LSB_ID = {
'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
'redhatenterpriseworkstation': 'rhel', # RHEL 6.7
}
#: Translation table for normalizing the distro ID derived from the file name
#: of distro release files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as derived from the file name of a distro release file,
#: translated to lower case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_DISTRO_ID = {
'redhat': 'rhel', # RHEL 6.x, 7.x
}
# Pattern for content of distro release file (reversed)
_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
# Pattern for base file name of distro release file
_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
r'(\w+)[-_](release|version)$')
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
'debian_version',
'lsb-release',
'oem-release',
_OS_RELEASE_BASENAME,
'system-release'
)
def linux_distribution(full_distribution_name=True):
"""
Return information about the current Linux distribution as a tuple
``(id_name, version, codename)`` with items as follows:
* ``id_name``: If *full_distribution_name* is false, the result of
:func:`distro.id`. Otherwise, the result of :func:`distro.name`.
* ``version``: The result of :func:`distro.version`.
* ``codename``: The result of :func:`distro.codename`.
The interface of this function is compatible with the original
:py:func:`platform.linux_distribution` function, supporting a subset of
its parameters.
The data it returns may not exactly be the same, because it uses more data
sources than the original function, and that may lead to different data if
the Linux distribution is not consistent across multiple data sources it
provides (there are indeed such distributions ...).
Another reason for differences is the fact that the :func:`distro.id`
method normalizes the distro ID string to a reliable machine-readable value
for a number of popular Linux distributions.
"""
return _distro.linux_distribution(full_distribution_name)
def id():
"""
Return the distro ID of the current Linux distribution, as a
machine-readable string.
For a number of Linux distributions, the returned distro ID value is
*reliable*, in the sense that it is documented and that it does not change
across releases of the distribution.
This package maintains the following reliable distro ID values:
============== =========================================
Distro ID Distribution
============== =========================================
"ubuntu" Ubuntu
"debian" Debian
"rhel" RedHat Enterprise Linux
"centos" CentOS
"fedora" Fedora
"sles" SUSE Linux Enterprise Server
"opensuse" openSUSE
"amazon" Amazon Linux
"arch" Arch Linux
"cloudlinux" CloudLinux OS
"exherbo" Exherbo Linux
"gentoo" GenToo Linux
"ibm_powerkvm" IBM PowerKVM
"kvmibm" KVM for IBM z Systems
"linuxmint" Linux Mint
"mageia" Mageia
"mandriva" Mandriva Linux
"parallels" Parallels
"pidora" Pidora
"raspbian" Raspbian
"oracle" Oracle Linux (and Oracle Enterprise Linux)
"scientific" Scientific Linux
"slackware" Slackware
"xenserver" XenServer
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
or if you find that the :func:`distro.id` function returns a different
distro ID for one of the listed distros, please create an issue in the
`distro issue tracker`_.
**Lookup hierarchy and transformations:**
First, the ID is obtained from the following sources, in the specified
order. The first available and non-empty value is used:
* the value of the "ID" attribute of the os-release file,
* the value of the "Distributor ID" attribute returned by the lsb_release
command,
* the first part of the file name of the distro release file,
The so determined ID value then passes the following transformations,
before it is returned by this method:
* it is translated to lower case,
* blanks (which should not be there anyway) are translated to underscores,
* a normalization of the ID is performed, based upon
`normalization tables`_. The purpose of this normalization is to ensure
that the ID is as reliable as possible, even across incompatible changes
in the Linux distributions. A common reason for an incompatible change is
the addition of an os-release file, or the addition of the lsb_release
command, with ID values that differ from what was previously determined
from the distro release file name.
"""
return _distro.id()
def name(pretty=False):
"""
Return the name of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the name is returned without version or codename.
(e.g. "CentOS Linux")
If *pretty* is true, the version and codename are appended.
(e.g. "CentOS Linux 7.1.1503 (Core)")
**Lookup hierarchy:**
The name is obtained from the following sources, in the specified order.
The first available and non-empty value is used:
* If *pretty* is false:
- the value of the "NAME" attribute of the os-release file,
- the value of the "Distributor ID" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file.
* If *pretty* is true:
- the value of the "PRETTY_NAME" attribute of the os-release file,
- the value of the "Description" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file, appended
with the value of the pretty version ("<version_id>" and "<codename>"
fields) of the distro release file, if available.
"""
return _distro.name(pretty)
def version(pretty=False, best=False):
"""
Return the version of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the version is returned without codename (e.g.
"7.0").
If *pretty* is true, the codename in parenthesis is appended, if the
codename is non-empty (e.g. "7.0 (Maipo)").
Some distributions provide version numbers with different precisions in
the different sources of distribution information. Examining the different
sources in a fixed priority order does not always yield the most precise
version (e.g. for Debian 8.2, or CentOS 7.1).
The *best* parameter can be used to control the approach for the returned
version:
If *best* is false, the first non-empty version number in priority order of
the examined sources is returned.
If *best* is true, the most precise version number out of all examined
sources is returned.
**Lookup hierarchy:**
In all cases, the version number is obtained from the following sources.
If *best* is false, this order represents the priority order:
* the value of the "VERSION_ID" attribute of the os-release file,
* the value of the "Release" attribute returned by the lsb_release
command,
* the version number parsed from the "<version_id>" field of the first line
of the distro release file,
* the version number parsed from the "PRETTY_NAME" attribute of the
os-release file, if it follows the format of the distro release files.
* the version number parsed from the "Description" attribute returned by
the lsb_release command, if it follows the format of the distro release
files.
"""
return _distro.version(pretty, best)
def version_parts(best=False):
"""
Return the version of the current Linux distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.version_parts(best)
def major_version(best=False):
"""
Return the major version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The major version is the first
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.major_version(best)
def minor_version(best=False):
"""
Return the minor version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The minor version is the second
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.minor_version(best)
def build_number(best=False):
"""
Return the build number of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The build number is the third part
of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.build_number(best)
def like():
"""
Return a space-separated list of distro IDs of distributions that are
closely related to the current Linux distribution in regards to packaging
and programming interfaces, for example distributions the current
distribution is a derivative from.
**Lookup hierarchy:**
This information item is only provided by the os-release file.
For details, see the description of the "ID_LIKE" attribute in the
`os-release man page
<http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
"""
return _distro.like()
def codename():
"""
Return the codename for the release of the current Linux distribution,
as a string.
If the distribution does not have a codename, an empty string is returned.
Note that the returned codename is not always really a codename. For
example, openSUSE returns "x86_64". This function does not handle such
cases in any special way and just returns the string it finds, if any.
**Lookup hierarchy:**
* the codename within the "VERSION" attribute of the os-release file, if
provided,
* the value of the "Codename" attribute returned by the lsb_release
command,
* the value of the "<codename>" field of the distro release file.
"""
return _distro.codename()
def info(pretty=False, best=False):
"""
Return certain machine-readable information items about the current Linux
distribution in a dictionary, as shown in the following example:
.. sourcecode:: python
{
'id': 'rhel',
'version': '7.0',
'version_parts': {
'major': '7',
'minor': '0',
'build_number': ''
},
'like': 'fedora',
'codename': 'Maipo'
}
The dictionary structure and keys are always the same, regardless of which
information items are available in the underlying data sources. The values
for the various keys are as follows:
* ``id``: The result of :func:`distro.id`.
* ``version``: The result of :func:`distro.version`.
* ``version_parts -> major``: The result of :func:`distro.major_version`.
* ``version_parts -> minor``: The result of :func:`distro.minor_version`.
* ``version_parts -> build_number``: The result of
:func:`distro.build_number`.
* ``like``: The result of :func:`distro.like`.
* ``codename``: The result of :func:`distro.codename`.
For a description of the *pretty* and *best* parameters, see the
:func:`distro.version` method.
"""
return _distro.info(pretty, best)
def os_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the os-release file data source of the current Linux distribution.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_info()
def lsb_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the lsb_release command data source of the current Linux distribution.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_info()
def distro_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current Linux distribution.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_info()
def os_release_attr(attribute):
"""
Return a single named information item from the os-release file data source
of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_attr(attribute)
def lsb_release_attr(attribute):
"""
Return a single named information item from the lsb_release command output
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_attr(attribute)
def distro_release_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_attr(attribute)
class LinuxDistribution(object):
"""
Provides information about a Linux distribution.
This package creates a private module-global instance of this class with
default initialization arguments, that is used by the
`consolidated accessor functions`_ and `single source accessor functions`_.
By using default initialization arguments, that module-global instance
returns data about the current Linux distribution (i.e. the distro this
package runs on).
Normally, it is not necessary to create additional instances of this class.
However, in situations where control is needed over the exact data sources
that are used, instances of this class can be created with a specific
distro release file, or a specific os-release file, or without invoking the
lsb_release command.
"""
def __init__(self,
include_lsb=True,
os_release_file='',
distro_release_file=''):
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
Subsequent access to the information items uses these private instance
attributes, so that the data sources are read only once.
Parameters:
* ``include_lsb`` (bool): Controls whether the
`lsb_release command output`_ is included as a data source.
If the lsb_release command is not available in the program execution
path, the data source for the lsb_release command will be empty.
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is to be used as a data source.
An empty string (the default) will cause the default path name to
be used (see `os-release file`_ for details).
If the specified or defaulted os-release file does not exist, the
data source for the os-release file will be empty.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is to be used as a data source.
An empty string (the default) will cause a default search algorithm
to be used (see `distro release file`_ for details).
If the specified distro release file does not exist, or if no default
distro release file can be found, the data source for the distro
release file will be empty.
Public instance attributes:
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
Raises:
* :py:exc:`IOError`: Some I/O issue with an os-release file or distro
release file.
* :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
some issue (other than not being available in the program execution
path).
* :py:exc:`UnicodeError`: A data source has unexpected characters or
uses an unexpected encoding.
"""
self.os_release_file = os_release_file or \
os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
self.distro_release_file = distro_release_file or '' # updated later
self._os_release_info = self._get_os_release_info()
self._lsb_release_info = self._get_lsb_release_info() \
if include_lsb else {}
self._distro_release_info = self._get_distro_release_info()
def __repr__(self):
"""Return repr of all info
"""
return \
"LinuxDistribution(" \
"os_release_file={0!r}, " \
"distro_release_file={1!r}, " \
"_os_release_info={2!r}, " \
"_lsb_release_info={3!r}, " \
"_distro_release_info={4!r})".format(
self.os_release_file,
self.distro_release_file,
self._os_release_info,
self._lsb_release_info,
self._distro_release_info)
def linux_distribution(self, full_distribution_name=True):
"""
Return information about the Linux distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
of its parameters.
For details, see :func:`distro.linux_distribution`.
"""
return (
self.name() if full_distribution_name else self.id(),
self.version(),
self.codename()
)
def id(self):
"""Return the distro ID of the Linux distribution, as a string.
For details, see :func:`distro.id`.
"""
def normalize(distro_id, table):
distro_id = distro_id.lower().replace(' ', '_')
return table.get(distro_id, distro_id)
distro_id = self.os_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
distro_id = self.lsb_release_attr('distributor_id')
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
distro_id = self.distro_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
return ''
def name(self, pretty=False):
"""
Return the name of the Linux distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or ''
def version(self, pretty=False, best=False):
"""
Return the version of the Linux distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', '')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version
def version_parts(self, best=False):
"""
Return the version of the Linux distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
"""
version_str = self.version(best=best)
if version_str:
version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
return major, minor or '', build_number or ''
return '', '', ''
def major_version(self, best=False):
"""
Return the major version number of the current distribution.
For details, see :func:`distro.major_version`.
"""
return self.version_parts(best)[0]
def minor_version(self, best=False):
"""
Return the minor version number of the Linux distribution.
For details, see :func:`distro.minor_version`.
"""
return self.version_parts(best)[1]
def build_number(self, best=False):
"""
Return the build number of the Linux distribution.
For details, see :func:`distro.build_number`.
"""
return self.version_parts(best)[2]
def like(self):
"""
Return the IDs of distributions that are like the Linux distribution.
For details, see :func:`distro.like`.
"""
return self.os_release_attr('id_like') or ''
def codename(self):
"""
Return the codename of the Linux distribution.
For details, see :func:`distro.codename`.
"""
return self.os_release_attr('codename') \
or self.lsb_release_attr('codename') \
or self.distro_release_attr('codename') \
or ''
def info(self, pretty=False, best=False):
"""
Return certain machine-readable information about the Linux
distribution.
For details, see :func:`distro.info`.
"""
return dict(
id=self.id(),
version=self.version(pretty, best),
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best)
),
like=self.like(),
codename=self.codename(),
)
def os_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the os-release file data source of the Linux distribution.
For details, see :func:`distro.os_release_info`.
"""
return self._os_release_info
def lsb_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the Linux
distribution.
For details, see :func:`distro.lsb_release_info`.
"""
return self._lsb_release_info
def distro_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the distro release file data source of the Linux
distribution.
For details, see :func:`distro.distro_release_info`.
"""
return self._distro_release_info
def os_release_attr(self, attribute):
"""
Return a single named information item from the os-release file data
source of the Linux distribution.
For details, see :func:`distro.os_release_attr`.
"""
return self._os_release_info.get(attribute, '')
def lsb_release_attr(self, attribute):
"""
Return a single named information item from the lsb_release command
output data source of the Linux distribution.
For details, see :func:`distro.lsb_release_attr`.
"""
return self._lsb_release_info.get(attribute, '')
def distro_release_attr(self, attribute):
"""
Return a single named information item from the distro release file
data source of the Linux distribution.
For details, see :func:`distro.distro_release_attr`.
"""
return self._distro_release_info.get(attribute, '')
def _get_os_release_info(self):
"""
Get the information items from the specified os-release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(self.os_release_file):
with open(self.os_release_file) as release_file:
return self._parse_os_release_content(release_file)
return {}
@staticmethod
def _parse_os_release_content(lines):
"""
Parse the lines of an os-release file.
Parameters:
* lines: Iterable through the lines in the os-release file.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
lexer = shlex.shlex(lines, posix=True)
lexer.whitespace_split = True
# The shlex module defines its `wordchars` variable using literals,
# making it dependent on the encoding of the Python source file.
# In Python 2.6 and 2.7, the shlex source file is encoded in
# 'iso-8859-1', and the `wordchars` variable is defined as a byte
# string. This causes a UnicodeDecodeError to be raised when the
# parsed content is a unicode object. The following fix resolves that
# (... but it should be fixed in shlex...):
if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
tokens = list(lexer)
for token in tokens:
# At this point, all shell-like parsing has been done (i.e.
# comments processed, quotes and backslash escape sequences
# processed, multi-line values assembled, trailing newlines
# stripped, etc.), so the tokens are now either:
# * variable assignments: var=value
# * commands or their arguments (not allowed in os-release)
if '=' in token:
k, v = token.split('=', 1)
if isinstance(v, bytes):
v = v.decode('utf-8')
props[k.lower()] = v
if k == 'VERSION':
# this handles cases in which the codename is in
# the `(CODENAME)` (rhel, centos, fedora) format
# or in the `, CODENAME` format (Ubuntu).
codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v)
if codename:
codename = codename.group()
codename = codename.strip('()')
codename = codename.strip(',')
codename = codename.strip()
# codename appears within paranthese.
props['codename'] = codename
else:
props['codename'] = ''
else:
# Ignore any tokens that are not variable assignments
pass
return props
def _get_lsb_release_info(self):
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
"""
cmd = 'lsb_release -a'
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout, stderr = stdout.decode('utf-8'), stderr.decode('utf-8')
code = process.returncode
if code == 0:
content = stdout.splitlines()
return self._parse_lsb_release_content(content)
elif code == 127: # Command not found
return {}
else:
if sys.version_info[:2] >= (3, 5):
raise subprocess.CalledProcessError(code, cmd, stdout, stderr)
elif sys.version_info[:2] >= (2, 7):
raise subprocess.CalledProcessError(code, cmd, stdout)
elif sys.version_info[:2] == (2, 6):
raise subprocess.CalledProcessError(code, cmd)
@staticmethod
def _parse_lsb_release_content(lines):
"""
Parse the output of the lsb_release command.
Parameters:
* lines: Iterable through the lines of the lsb_release output.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
for line in lines:
line = line.decode('utf-8') if isinstance(line, bytes) else line
kv = line.strip('\n').split(':', 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
props.update({k.replace(' ', '_').lower(): v.strip()})
return props
def _get_distro_release_info(self):
"""
Get the information items from the specified distro release file.
Returns:
A dictionary containing all information items.
"""
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
distro_info = self._parse_distro_release_file(
self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
distro_info['id'] = match.group(1)
return distro_info
else:
basenames = os.listdir(_UNIXCONFDIR)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
basenames.sort()
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
filepath = os.path.join(_UNIXCONFDIR, basename)
distro_info = self._parse_distro_release_file(filepath)
if 'name' in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
distro_info['id'] = match.group(1)
return distro_info
return {}
def _parse_distro_release_file(self, filepath):
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(filepath):
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
return {}
@staticmethod
def _parse_distro_release_content(line):
"""
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
"""
if isinstance(line, bytes):
line = line.decode('utf-8')
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info['name'] = matches.group(3)[::-1]
if matches.group(2):
distro_info['version_id'] = matches.group(2)[::-1]
if matches.group(1):
distro_info['codename'] = matches.group(1)[::-1]
elif line:
distro_info['name'] = line.strip()
return distro_info
_distro = LinuxDistribution()
def main():
import argparse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
parser = argparse.ArgumentParser(description="Linux distro info tool")
parser.add_argument(
'--json',
'-j',
help="Output in machine readable format",
action="store_true")
args = parser.parse_args()
if args.json:
logger.info(json.dumps(info(), indent=4, sort_keys=True))
else:
logger.info('Name: %s', name(pretty=True))
distribution_version = version(pretty=True)
if distribution_version:
logger.info('Version: %s', distribution_version)
distribution_codename = codename()
if distribution_codename:
logger.info('Codename: %s', distribution_codename)
if __name__ == '__main__':
main()
| gpl-3.0 |
TigerZhang/redis-cerberus | test/cluster_launcher.py | 6 | 1319 | import os
import sys
import time
import tempfile
import subprocess
from cStringIO import StringIO
from redistrib import command
def launch():
template = '''
daemonize yes
port {port}
cluster-node-timeout 5000
pidfile {tmpdir}/redis_cluster_node-{port}.pid
logfile {tmpdir}/redis_cluster_node-{port}.log
save ""
appendonly no
cluster-enabled yes
cluster-config-file {tmpdir}/redis_cluster_node-{port}.conf
'''
for i in xrange(4):
p = subprocess.Popen(['redis-server', '-'], stdin=subprocess.PIPE)
p.communicate(input=template.format(
tmpdir=tempfile.gettempdir(), port=8800 + i))
time.sleep(1)
command.start_cluster_on_multi(
[('127.0.0.1', 8800 + i) for i in xrange(4)])
def kill():
for i in xrange(4):
try:
with open('{tmpdir}/redis_cluster_node-{port}.pid'.format(
tmpdir=tempfile.gettempdir(), port=8800 + i), 'r') as f:
pid = int(f.read())
subprocess.call(['kill', str(pid)])
os.remove('{tmpdir}/redis_cluster_node-{port}.conf'.format(
tmpdir=tempfile.gettempdir(), port=8800 + i))
except (IOError, OSError):
pass
if __name__ == '__main__':
launch() if sys.argv[0] == 'launch' else kill()
| mit |
TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/Python-2.5.2/Lib/encodings/euc_kr.py | 816 | 1027 | #
# euc_kr.py: Python Unicode Codec for EUC_KR
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('euc_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
Plexxi/st2 | st2common/st2common/fields.py | 3 | 20188 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE: BaseList and BaseDict classes below are based on mongoengine code from
https://github.com/MongoEngine/mongoengine/blob/master/mongoengine/base/datastructures.py
Mongoengine is licensed under MIT.
"""
from __future__ import absolute_import
from typing import Optional
from typing import Union
import datetime
import calendar
import enum
import weakref
import orjson
from mongoengine import LongField
from mongoengine import BinaryField
from mongoengine.base.datastructures import mark_as_changed_wrapper
from mongoengine.base.datastructures import mark_key_as_changed_wrapper
from mongoengine.common import _import_class
from st2common.util import date as date_utils
from st2common.util import mongoescape
__all__ = ["ComplexDateTimeField"]
SECOND_TO_MICROSECONDS = 1000000
# Delimiter field used for actual JSON dict field binary value
JSON_DICT_FIELD_DELIMITER = b":"
class JSONDictFieldCompressionAlgorithmEnum(enum.Enum):
"""
Enum which represents compression algorithm (if any) used for a specific JSONDictField value.
"""
NONE = b"n"
ZSTANDARD = b"z"
class JSONDictFieldSerializationFormatEnum(enum.Enum):
"""
Enum which represents serialization format used for a specific JSONDictField value.
"""
ORJSON = b"o"
VALID_JSON_DICT_COMPRESSION_ALGORITHMS = [
JSONDictFieldCompressionAlgorithmEnum.NONE.value,
JSONDictFieldCompressionAlgorithmEnum.ZSTANDARD.value,
]
VALID_JSON_DICT_SERIALIZATION_FORMATS = [
JSONDictFieldSerializationFormatEnum.ORJSON.value,
]
class ComplexDateTimeField(LongField):
"""
Date time field which handles microseconds exactly and internally stores
the timestamp as number of microseconds since the unix epoch.
Note: We need to do that because mongoengine serializes this field as comma
delimited string which breaks sorting.
"""
def _convert_from_datetime(self, val):
"""
Convert a `datetime` object to number of microseconds since epoch representation
(which will be stored in MongoDB). This is the reverse function of
`_convert_from_db`.
"""
result = self._datetime_to_microseconds_since_epoch(value=val)
return result
def _convert_from_db(self, value):
result = self._microseconds_since_epoch_to_datetime(data=value)
return result
def _microseconds_since_epoch_to_datetime(self, data):
"""
Convert a number representation to a `datetime` object (the object you
will manipulate). This is the reverse function of
`_convert_from_datetime`.
:param data: Number of microseconds since the epoch.
:type data: ``int``
"""
result = datetime.datetime.utcfromtimestamp(data // SECOND_TO_MICROSECONDS)
microseconds_reminder = data % SECOND_TO_MICROSECONDS
result = result.replace(microsecond=microseconds_reminder)
result = date_utils.add_utc_tz(result)
return result
def _datetime_to_microseconds_since_epoch(self, value):
"""
Convert datetime in UTC to number of microseconds from epoch.
Note: datetime which is passed to the function needs to be in UTC timezone (e.g. as returned
by ``datetime.datetime.utcnow``).
:rtype: ``int``
"""
# Verify that the value which is passed in contains UTC timezone
# information.
if not value.tzinfo or (value.tzinfo.utcoffset(value) != datetime.timedelta(0)):
raise ValueError(
"Value passed to this function needs to be in UTC timezone"
)
seconds = calendar.timegm(value.timetuple())
microseconds_reminder = value.time().microsecond
result = int(seconds * SECOND_TO_MICROSECONDS) + microseconds_reminder
return result
def __get__(self, instance, owner):
data = super(ComplexDateTimeField, self).__get__(instance, owner)
if data is None:
return None
if isinstance(data, datetime.datetime):
return data
return self._convert_from_db(data)
def __set__(self, instance, value):
value = self._convert_from_datetime(value) if value else value
return super(ComplexDateTimeField, self).__set__(instance, value)
def validate(self, value):
value = self.to_python(value)
if not isinstance(value, datetime.datetime):
self.error("Only datetime objects may used in a " "ComplexDateTimeField")
def to_python(self, value):
original_value = value
try:
return self._convert_from_db(value)
except:
return original_value
def to_mongo(self, value):
value = self.to_python(value)
return self._convert_from_datetime(value)
def prepare_query_value(self, op, value):
return self._convert_from_datetime(value)
class BaseList(list):
"""
Custom list class based on mongoengine.base.datastructures.BaseDict which acts as a
wrapper for list value for JSONDictField which allows us to track changes to the list items.
Tracking changes to the list is important since it allows us to implement more efficient
partial document updates - e.g. if field A on model to be updated hasn't changed, actual
database save operation will only write out field which values have changed.
This works exactly in the same manner mongoengine DictField and DynamicField.
"""
_instance = None
_name = None
def __init__(self, list_items, instance, name):
BaseDocument = _import_class("BaseDocument")
if isinstance(instance, BaseDocument):
self._instance = weakref.proxy(instance)
self._name = name
super().__init__(list_items)
def __getitem__(self, key):
# change index to positive value because MongoDB does not support negative one
if isinstance(key, int) and key < 0:
key = len(self) + key
value = super().__getitem__(key)
if isinstance(key, slice):
# When receiving a slice operator, we don't convert the structure and bind
# to parent's instance. This is buggy for now but would require more work to be handled
# properly
return value
if isinstance(value, dict) and not isinstance(value, BaseDict):
# Replace dict by BaseDict
value = BaseDict(value, None, f"{self._name}.{key}")
super().__setitem__(key, value)
value._instance = self._instance
elif isinstance(value, list) and not isinstance(value, BaseList):
# Replace list by BaseList
value = BaseList(value, None, f"{self._name}.{key}")
super().__setitem__(key, value)
value._instance = self._instance
return value
def __iter__(self):
yield from super().__iter__()
def __getstate__(self):
self.instance = None
return self
def __setstate__(self, state):
self = state
return self
def __setitem__(self, key, value):
changed_key = key
if isinstance(key, slice):
# In case of slice, we don't bother to identify the exact elements being updated
# instead, we simply marks the whole list as changed
changed_key = None
result = super().__setitem__(key, value)
self._mark_as_changed(changed_key)
return result
append = mark_as_changed_wrapper(list.append)
extend = mark_as_changed_wrapper(list.extend)
insert = mark_as_changed_wrapper(list.insert)
pop = mark_as_changed_wrapper(list.pop)
remove = mark_as_changed_wrapper(list.remove)
reverse = mark_as_changed_wrapper(list.reverse)
sort = mark_as_changed_wrapper(list.sort)
__delitem__ = mark_as_changed_wrapper(list.__delitem__)
__iadd__ = mark_as_changed_wrapper(list.__iadd__)
__imul__ = mark_as_changed_wrapper(list.__imul__)
def _mark_as_changed(self, key=None):
if hasattr(self._instance, "_mark_as_changed"):
# Since our type is a special binary type, we always mark top level dict as changes
# since whole dict needs to be saved at once, we can't update just a singel dict
# item.
parent_key_name = self._name.split(".")[0]
self._instance._mark_as_changed(parent_key_name)
class BaseDict(dict):
"""
Custom dictionary class based on mongoengine.base.datastructures.BaseDict which acts as a
wrapper for dict value for JSONDictField which allows us to track changes to the dict.
Tracking changes to the dict is important since it allows us to implement more efficient
partial document updates - e.g. if field A on model to be updated hasn't changed, actual
database save operation will only write out field which values have changed.
This works exactly in the same manner mongoengine DictField and DynamicField.
"""
_instance = None
_name = None
def __init__(self, dict_items, instance, name):
BaseDocument = _import_class("BaseDocument")
if isinstance(instance, BaseDocument):
self._instance = weakref.proxy(instance)
self._name = name
super().__init__(dict_items)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
value = super().__getitem__(key)
if isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, None, f"{self._name}.{key}")
super().__setitem__(key, value)
value._instance = self._instance
# We also need to return a wrapper class in case of a list to ensure updates o the
# list items are correctly racked
elif isinstance(value, list) and not isinstance(value, BaseList):
value = BaseList(value, None, f"{self._name}.{key}")
super().__setitem__(key, value)
value._instance = self._instance
return value
def __getstate__(self):
self.instance = None
return self
def __setstate__(self, state):
self = state
return self
__setitem__ = mark_key_as_changed_wrapper(dict.__setitem__)
__delattr__ = mark_key_as_changed_wrapper(dict.__delattr__)
__delitem__ = mark_key_as_changed_wrapper(dict.__delitem__)
pop = mark_as_changed_wrapper(dict.pop)
clear = mark_as_changed_wrapper(dict.clear)
update = mark_as_changed_wrapper(dict.update)
popitem = mark_as_changed_wrapper(dict.popitem)
setdefault = mark_as_changed_wrapper(dict.setdefault)
def _mark_as_changed(self, key=None):
if hasattr(self._instance, "_mark_as_changed"):
# Since our type is a special binary type, we always mark top level dict as changes
# since whole dict needs to be saved at once, we can't update just a singel dict
# item.
parent_key_name = self._name.split(".")[0]
self._instance._mark_as_changed(parent_key_name)
class JSONDictField(BinaryField):
"""
Custom field types which stores dictionary as JSON serialized strings.
This is done because storing large objects as JSON serialized strings is much more fficient
on the serialize and unserialize paths compared to used EscapedDictField which needs to escape
all the special values ($, .).
Only downside is that to MongoDB those values are plain raw strings which means you can't query
on actual dictionary field values. That's not an issue for us, because in places where we use
it, those values are already treated as plain binary blobs to the database layer and we never
directly query on those field values.
In micro benchmarks we have seen speed ups for up to 10x on write path and up to 6x on read
path. Change also scaled down which means it didn't add any additional overhead for very small
results - in fact, it was also faster for small results dictionaries
More context and numbers are available at https://github.com/StackStorm/st2/pull/4846.
NOTES, LIMITATIONS:
This field type can only be used on dictionary values on which we don't perform direct database
queries (aka filter on a specific dictionary item value or similar).
Good examples of those are "result" field on ExecutionDB, LiveActionDB and TaskExecutionDB,
"output" on WorkflowExecutionDB, etc.
IMPLEMENTATION DETAILS:
If header is used, values are stored in the following format:
<compression type>:<serialization type>:<serialized binary data>.
For example:
n:o:... - No compression, (or)json serialization
z:o:... - Zstandard compression, (or)json serialization
If header is not used, value is stored as a serialized JSON string of the input dictionary.
"""
def __init__(self, *args, **kwargs):
# True if we should use field header which is more future proof approach and also allows
# us to support optional per-field compression, etc.
# This option is only exposed so we can benchmark different approaches and how much overhead
# using a header adds.
self.use_header = kwargs.pop("use_header", False)
self.compression_algorithm = kwargs.pop("compression_algorithm", "none")
super(JSONDictField, self).__init__(*args, **kwargs)
def to_mongo(self, value):
if not isinstance(value, dict):
raise ValueError(
"value argument must be a dictionary (got: %s)" % type(value)
)
data = self._serialize_field_value(value)
return data
def to_python(self, value):
if isinstance(value, dict):
# Already parsed
return value
data = self.parse_field_value(value)
return data
def validate(self, value):
value = self.to_mongo(value)
return super(JSONDictField, self).validate(value)
def parse_field_value(self, value: Optional[Union[bytes, dict]]) -> dict:
"""
Parse provided binary field value and return parsed value (dictionary).
For example:
- (n, o, ...) - no compression, data is serialized using orjson
- (z, o, ...) - zstandard compression, data is serialized using orjson
"""
if not value:
return self.default
if isinstance(value, dict):
# Already deserializaed
return value
if not self.use_header:
return orjson.loads(value)
split = value.split(JSON_DICT_FIELD_DELIMITER, 2)
if len(split) != 3:
raise ValueError(
"Expected 3 values when splitting field value, got %s" % (len(split))
)
compression_algorithm = split[0]
serialization_format = split[1]
data = split[2]
if compression_algorithm not in VALID_JSON_DICT_COMPRESSION_ALGORITHMS:
raise ValueError(
"Invalid or unsupported value for compression algorithm header "
"value: %s" % (compression_algorithm)
)
if serialization_format not in VALID_JSON_DICT_SERIALIZATION_FORMATS:
raise ValueError(
"Invalid or unsupported value for serialization format header "
"value: %s" % (serialization_format)
)
if (
compression_algorithm
== JSONDictFieldCompressionAlgorithmEnum.ZSTANDARD.value
):
# NOTE: At this point zstandard is only test dependency
import zstandard
data = zstandard.ZstdDecompressor().decompress(data)
data = orjson.loads(data)
return data
def _serialize_field_value(self, value: dict) -> bytes:
"""
Serialize and encode the provided field value.
"""
# Orquesta workflows support toSet() YAQL operator which returns a set which used to get
# serialized to list by mongoengine DictField.
#
# For backward compatibility reasons, we need to support serializing set to a list as
# well.
#
# Based on micro benchmarks, using default function adds very little overhead (1%) so it
# should be safe to use default for every operation.
#
# If this turns out to be not true or it adds more overhead in other scenarios, we should
# revisit this decision and only use "default" argument where needed (aka Workflow models).
def default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
if not self.use_header:
return orjson.dumps(value, default=default)
data = orjson.dumps(value, default=default)
if self.compression_algorithm == "zstandard":
# NOTE: At this point zstandard is only test dependency
import zstandard
compression_header = JSONDictFieldCompressionAlgorithmEnum.ZSTANDARD
data = zstandard.ZstdCompressor().compress(data)
else:
compression_header = JSONDictFieldCompressionAlgorithmEnum.NONE
return compression_header.value + b":" + b"o:" + data
def __get__(self, instance, owner):
"""
We return a custom wrapper over dict which tracks changes to the dictionary and allows us
to only write the field to the database on update if the field value has changed - very
important since it means much more efficient partial updates.
"""
value = super().__get__(instance, owner)
if isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, instance, self.name)
# NOTE: It's important this attribute is set, since only this way mongoengine can determine
# if the field has chaned or not when determing if the value should be written to the db or
# not
if instance:
instance._data[self.name] = value
return value
class JSONDictEscapedFieldCompatibilityField(JSONDictField):
"""
Special version of JSONDictField which takes care of compatibility between old EscapedDictField
and EscapedDynamicField format and the new one.
On retrieval, if an old format is detected it's correctly un-serialized and on insertion, we
always insert data in a new format.
"""
def to_mongo(self, value):
if isinstance(value, bytes):
# Already serialized
if value[0] == b"{" and self.use_header:
# Serialized, but doesn't contain header prefix, add it (assume migration from
# format without a header)
return "n:o:" + value
return value
if not isinstance(value, dict):
raise ValueError(
"value argument must be a dictionary (got: %s)" % type(value)
)
return self._serialize_field_value(value)
def to_python(self, value):
if isinstance(value, dict):
# Old format which used a native dict with escaped special characters
# TODO: We can remove that once we assume there is no more old style data in the
# database and save quite some time.
value = mongoescape.unescape_chars(value)
return value
if isinstance(value, bytes):
return self.parse_field_value(value)
return value
| apache-2.0 |
SQbQxeKd3JHD8/simple_ConTeXt | tests/test_deep_dict.py | 2 | 3059 | import copy
import sys
import unittest
from typing import Dict, TypeVar
import hypothesis as hyp
import hypothesis.strategies as st
sys.path.insert(0, "../scripts")
import deep_dict # noqa
K = TypeVar("K")
V = TypeVar("V")
FEW_EXAMPLES = 15
class TestDeepDict(unittest.TestCase):
@hyp.given(
st.dictionaries(st.text(), st.integers()), st.text(), st.integers(),
)
@hyp.settings(max_examples=FEW_EXAMPLES)
def test__set_same_when_flat(
self, dict_a: Dict[K, V], key: K, val: V,
) -> None:
dict_b = copy.deepcopy(dict_a)
dict_a[key] = val
deep_dict.set_(dict_b, [key], val)
self.assertEqual(dict_a, dict_b)
@hyp.given(st.dictionaries(st.text(), st.integers()))
@hyp.settings(max_examples=FEW_EXAMPLES)
def test__get_same_when_flat(self, dict_: Dict[K, V]) -> None:
for key in dict_:
self.assertEqual(deep_dict.get(dict_, [key]), dict_[key])
@hyp.given(st.dictionaries(st.text(), st.integers()), st.text())
@hyp.settings(max_examples=FEW_EXAMPLES)
def test__get_safe_same_when_flat(self, dict_: Dict[K, V], key: K) -> None:
self.assertEqual(deep_dict.get_safe(dict_, [key]), dict_.get(key))
@hyp.given(st.dictionaries(st.text(), st.integers()), st.text())
@hyp.settings(max_examples=FEW_EXAMPLES)
def test__in_same_when_flat(self, dict_: Dict[K, V], key: K) -> None:
self.assertEqual(deep_dict.in_(dict_, [key]), key in dict_)
@hyp.given(st.dictionaries(st.text(), st.integers()))
@hyp.settings(max_examples=FEW_EXAMPLES)
def test__iter_same_when_flat(self, dict_: Dict[K, V]) -> None:
self.assertEqual(
list(deep_dict.iter_(dict_)),
[([k], v) for k, v in dict_.items()],
)
@hyp.given(
st.dictionaries(st.text(), st.integers()),
st.dictionaries(st.text(), st.integers()),
)
@hyp.settings(max_examples=FEW_EXAMPLES)
def test__update_same_when_flat(
self, dict_a: Dict[K, V], new_dict: Dict[K, V],
) -> None:
dict_b = copy.deepcopy(dict_a)
deep_dict.update(dict_a, new_dict)
dict_b.update(new_dict)
self.assertEqual(dict_a, dict_b)
@hyp.given(st.dictionaries(st.text(), st.integers()))
@hyp.settings(max_examples=FEW_EXAMPLES)
def test__del_same_when_flat(self, dict_a: Dict[K, V]) -> None:
keys = list(dict_a)
dict_b = copy.deepcopy(dict_a)
for key in keys:
del dict_a[key]
deep_dict.del_(dict_b, [key])
self.assertEqual(dict_a, dict_b)
@hyp.given(st.dictionaries(st.text(), st.integers()), st.text())
@hyp.settings(max_examples=FEW_EXAMPLES)
def test__del_safe_same_when_flat(
self, dict_a: Dict[K, V], key: K,
) -> None:
dict_b = copy.deepcopy(dict_a)
dict_a.pop(key, None) # type: ignore
deep_dict.del_safe(dict_b, [key])
self.assertEqual(dict_a, dict_b)
def main() -> None:
unittest.main(verbosity=0)
if __name__ == "__main__":
main()
| mit |
Orochimarufan/youtube-dl | youtube_dl/extractor/laola1tv.py | 20 | 9454 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
urlencode_postdata,
xpath_element,
xpath_text,
update_url_query,
js_to_json,
)
class Laola1TvEmbedIE(InfoExtractor):
IE_NAME = 'laola1tv:embed'
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/titanplayer\.php\?.*?\bvideoid=(?P<id>\d+)'
_TESTS = [{
# flashvars.premium = "false";
'url': 'https://www.laola1.tv/titanplayer.php?videoid=708065&type=V&lang=en&portal=int&customer=1024',
'info_dict': {
'id': '708065',
'ext': 'mp4',
'title': 'MA Long CHN - FAN Zhendong CHN',
'uploader': 'ITTF - International Table Tennis Federation',
'upload_date': '20161211',
},
}]
def _extract_token_url(self, stream_access_url, video_id, data):
return self._download_json(
self._proto_relative_url(stream_access_url, 'https:'), video_id,
headers={
'Content-Type': 'application/json',
}, data=json.dumps(data).encode())['data']['stream-access'][0]
def _extract_formats(self, token_url, video_id):
token_doc = self._download_xml(
token_url, video_id, 'Downloading token',
headers=self.geo_verification_headers())
token_attrib = xpath_element(token_doc, './/token').attrib
if token_attrib['status'] != '0':
raise ExtractorError(
'Token error: %s' % token_attrib['comment'], expected=True)
formats = self._extract_akamai_formats(
'%s?hdnea=%s' % (token_attrib['url'], token_attrib['auth']),
video_id)
self._sort_formats(formats)
return formats
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
flash_vars = self._search_regex(
r'(?s)flashvars\s*=\s*({.+?});', webpage, 'flash vars')
def get_flashvar(x, *args, **kwargs):
flash_var = self._search_regex(
r'%s\s*:\s*"([^"]+)"' % x,
flash_vars, x, default=None)
if not flash_var:
flash_var = self._search_regex([
r'flashvars\.%s\s*=\s*"([^"]+)"' % x,
r'%s\s*=\s*"([^"]+)"' % x],
webpage, x, *args, **kwargs)
return flash_var
hd_doc = self._download_xml(
'http://www.laola1.tv/server/hd_video.php', video_id, query={
'play': get_flashvar('streamid'),
'partner': get_flashvar('partnerid'),
'portal': get_flashvar('portalid'),
'lang': get_flashvar('sprache'),
'v5ident': '',
})
_v = lambda x, **k: xpath_text(hd_doc, './/video/' + x, **k)
title = _v('title', fatal=True)
token_url = None
premium = get_flashvar('premium', default=None)
if premium:
token_url = update_url_query(
_v('url', fatal=True), {
'timestamp': get_flashvar('timestamp'),
'auth': get_flashvar('auth'),
})
else:
data_abo = urlencode_postdata(
dict((i, v) for i, v in enumerate(_v('req_liga_abos').split(','))))
stream_access_url = update_url_query(
'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access', {
'videoId': _v('id'),
'target': self._search_regex(r'vs_target = (\d+);', webpage, 'vs target'),
'label': _v('label'),
'area': _v('area'),
})
token_url = self._extract_token_url(stream_access_url, video_id, data_abo)
formats = self._extract_formats(token_url, video_id)
categories_str = _v('meta_sports')
categories = categories_str.split(',') if categories_str else []
is_live = _v('islive') == 'true'
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'upload_date': unified_strdate(_v('time_date')),
'uploader': _v('meta_organisation'),
'categories': categories,
'is_live': is_live,
'formats': formats,
}
class Laola1TvBaseIE(Laola1TvEmbedIE):
def _extract_video(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
if 'Dieser Livestream ist bereits beendet.' in webpage:
raise ExtractorError('This live stream has already finished.', expected=True)
conf = self._parse_json(self._search_regex(
r'(?s)conf\s*=\s*({.+?});', webpage, 'conf'),
display_id,
transform_source=lambda s: js_to_json(re.sub(r'shareurl:.+,', '', s)))
video_id = conf['videoid']
config = self._download_json(conf['configUrl'], video_id, query={
'videoid': video_id,
'partnerid': conf['partnerid'],
'language': conf.get('language', ''),
'portal': conf.get('portalid', ''),
})
error = config.get('error')
if error:
raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True)
video_data = config['video']
title = video_data['title']
is_live = video_data.get('isLivestream') and video_data.get('isLive')
meta = video_data.get('metaInformation')
sports = meta.get('sports')
categories = sports.split(',') if sports else []
token_url = self._extract_token_url(
video_data['streamAccess'], video_id,
video_data['abo']['required'])
formats = self._extract_formats(token_url, video_id)
return {
'id': video_id,
'display_id': display_id,
'title': self._live_title(title) if is_live else title,
'description': video_data.get('description'),
'thumbnail': video_data.get('image'),
'categories': categories,
'formats': formats,
'is_live': is_live,
}
class Laola1TvIE(Laola1TvBaseIE):
IE_NAME = 'laola1tv'
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/[a-z]+-[a-z]+/[^/]+/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html',
'info_dict': {
'id': '227883',
'display_id': 'straubing-tigers-koelner-haie',
'ext': 'flv',
'title': 'Straubing Tigers - Kölner Haie',
'upload_date': '20140912',
'is_live': False,
'categories': ['Eishockey'],
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie',
'info_dict': {
'id': '464602',
'display_id': 'straubing-tigers-koelner-haie',
'ext': 'flv',
'title': 'Straubing Tigers - Kölner Haie',
'upload_date': '20160129',
'is_live': False,
'categories': ['Eishockey'],
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.laola1.tv/de-de/livestream/2016-03-22-belogorie-belgorod-trentino-diatec-lde',
'info_dict': {
'id': '487850',
'display_id': '2016-03-22-belogorie-belgorod-trentino-diatec-lde',
'ext': 'flv',
'title': 'Belogorie BELGOROD - TRENTINO Diatec',
'upload_date': '20160322',
'uploader': 'CEV - Europäischer Volleyball Verband',
'is_live': True,
'categories': ['Volleyball'],
},
'params': {
'skip_download': True,
},
'skip': 'This live stream has already finished.',
}]
def _real_extract(self, url):
return self._extract_video(url)
class EHFTVIE(Laola1TvBaseIE):
IE_NAME = 'ehftv'
_VALID_URL = r'https?://(?:www\.)?ehftv\.com/[a-z]+(?:-[a-z]+)?/[^/]+/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.ehftv.com/int/video/paris-saint-germain-handball-pge-vive-kielce/1166761',
'info_dict': {
'id': '1166761',
'display_id': 'paris-saint-germain-handball-pge-vive-kielce',
'ext': 'mp4',
'title': 'Paris Saint-Germain Handball - PGE Vive Kielce',
'is_live': False,
'categories': ['Handball'],
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
return self._extract_video(url)
class ITTFIE(InfoExtractor):
_VALID_URL = r'https?://tv\.ittf\.com/video/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'https://tv.ittf.com/video/peng-wang-wei-matsudaira-kenta/951802',
'only_matching': True,
}
def _real_extract(self, url):
return self.url_result(
update_url_query('https://www.laola1.tv/titanplayer.php', {
'videoid': self._match_id(url),
'type': 'V',
'lang': 'en',
'portal': 'int',
'customer': 1024,
}), Laola1TvEmbedIE.ie_key())
| unlicense |
rtoma/Diamond | src/collectors/monit/test/testmonit.py | 36 | 2804 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from monit import MonitCollector
################################################################################
class TestMonitCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MonitCollector',
{'byte_unit': 'kilobyte', })
self.collector = MonitCollector(config, None)
def test_import(self):
self.assertTrue(MonitCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('status.xml')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'app_thin_8101.cpu.percent': 0.9,
'app_thin_8101.memory.kilobyte_usage': 216104,
'app_thin_8102.cpu.percent': 1.1,
'app_thin_8102.memory.kilobyte_usage': 212736,
'app_thin_8103.cpu.percent': 0.9,
'app_thin_8103.memory.kilobyte_usage': 204948,
'app_thin_8104.cpu.percent': 0.9,
'app_thin_8104.memory.kilobyte_usage': 212464,
'sshd.cpu.percent': 0.0,
'sshd.memory.kilobyte_usage': 2588,
'rsyslogd.cpu.percent': 0.0,
'rsyslogd.memory.kilobyte_usage': 2664,
'postfix.cpu.percent': 0.0,
'postfix.memory.kilobyte_usage': 2304,
'nginx.cpu.percent': 0.0,
'nginx.memory.kilobyte_usage': 18684,
'haproxy.cpu.percent': 0.0,
'haproxy.memory.kilobyte_usage': 4040,
'cron.cpu.percent': 0.0,
'cron.memory.kilobyte_usage': 1036,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch(
'urllib2.urlopen',
Mock(
return_value=self.getFixture(
'status_blank.xml')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
################################################################################
if __name__ == "__main__":
unittest.main()
| mit |
st135yle/django-site | dbenv/lib/python3.4/site-packages/django/conf/locale/cs/formats.py | 504 | 1702 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. E Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '05.01.2006', '05.01.06'
'%d. %m. %Y', '%d. %m. %y', # '5. 1. 2006', '5. 1. 06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
# Kept ISO formats as one is in first position
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '04:30:59'
'%H.%M', # '04.30'
'%H:%M', # '04:30'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '05.01.2006 04:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '05.01.2006 04:30:59.000200'
'%d.%m.%Y %H.%M', # '05.01.2006 04.30'
'%d.%m.%Y %H:%M', # '05.01.2006 04:30'
'%d.%m.%Y', # '05.01.2006'
'%d. %m. %Y %H:%M:%S', # '05. 01. 2006 04:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '05. 01. 2006 04:30:59.000200'
'%d. %m. %Y %H.%M', # '05. 01. 2006 04.30'
'%d. %m. %Y %H:%M', # '05. 01. 2006 04:30'
'%d. %m. %Y', # '05. 01. 2006'
'%Y-%m-%d %H.%M', # '2006-01-05 04.30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
BillBillBillBill/Tickeys-linux | tickeys/kivy_32/kivy/core/audio/audio_gstplayer.py | 40 | 2636 | '''
Audio Gstplayer
===============
.. versionadded:: 1.8.0
Implementation of a VideoBase with Kivy :class:`~kivy.lib.gstplayer.GstPlayer`
This player is the prefered player, using Gstreamer 1.0, working on both Python
2 and 3.
'''
from kivy.lib.gstplayer import GstPlayer, get_gst_version
from kivy.core.audio import Sound, SoundLoader
from kivy.logger import Logger
from kivy.compat import PY2
from kivy.clock import Clock
from os.path import realpath
if PY2:
from urllib import pathname2url
else:
from urllib.request import pathname2url
Logger.info('AudioGstplayer: Using Gstreamer {}'.format(
'.'.join(map(str, get_gst_version()))))
def _on_gstplayer_message(mtype, message):
if mtype == 'error':
Logger.error('AudioGstplayer: {}'.format(message))
elif mtype == 'warning':
Logger.warning('AudioGstplayer: {}'.format(message))
elif mtype == 'info':
Logger.info('AudioGstplayer: {}'.format(message))
class SoundGstplayer(Sound):
@staticmethod
def extensions():
return ('wav', 'ogg', 'mp3', 'm4a')
def __init__(self, **kwargs):
self.player = None
super(SoundGstplayer, self).__init__(**kwargs)
def _on_gst_eos_sync(self):
Clock.schedule_once(self._on_gst_eos, 0)
def _on_gst_eos(self, *dt):
if self.loop:
self.player.stop()
self.player.play()
else:
self.stop()
def load(self):
self.unload()
uri = self._get_uri()
self.player = GstPlayer(uri, None, self._on_gst_eos_sync,
_on_gstplayer_message)
self.player.load()
def play(self):
# we need to set the volume everytime, it seems that stopping + playing
# the sound reset the volume.
self.player.set_volume(self.volume)
self.player.play()
super(SoundGstplayer, self).play()
def stop(self):
self.player.stop()
super(SoundGstplayer, self).stop()
def unload(self):
if self.player:
self.player.unload()
self.player = None
def seek(self, position):
self.player.seek(position / self.length)
def get_pos(self):
return self.player.get_position()
def _get_length(self):
return self.player.get_duration()
def on_volume(self, instance, volume):
self.player.set_volume(volume)
def _get_uri(self):
uri = self.filename
if not uri:
return
if not '://' in uri:
uri = 'file:' + pathname2url(realpath(uri))
return uri
SoundLoader.register(SoundGstplayer)
| mit |
Medigate/cutiuta-server | cutiuta-server/env/lib/python3.4/site-packages/django/core/management/commands/flush.py | 115 | 3938 | from __future__ import unicode_literals
import sys
from importlib import import_module
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal, sql_flush
from django.db import DEFAULT_DB_ALIAS, connections, transaction
from django.utils import six
from django.utils.six.moves import input
class Command(BaseCommand):
help = (
'Removes ALL DATA from the database, including data added during '
'migrations. Does not achieve a "fresh install" state.'
)
def add_arguments(self, parser):
parser.add_argument(
'--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to flush. Defaults to the "default" database.',
)
def handle(self, **options):
database = options['database']
connection = connections[database]
verbosity = options['verbosity']
interactive = options['interactive']
# The following are stealth options used by Django's internals.
reset_sequences = options.get('reset_sequences', True)
allow_cascade = options.get('allow_cascade', False)
inhibit_post_migrate = options.get('inhibit_post_migrate', False)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
try:
import_module('.management', app_config.name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True,
reset_sequences=reset_sequences,
allow_cascade=allow_cascade)
if interactive:
confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to an empty state.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
except Exception as e:
new_msg = (
"Database %s couldn't be flushed. Possible reasons:\n"
" * The database isn't running or isn't configured correctly.\n"
" * At least one of the expected database tables doesn't exist.\n"
" * The SQL was invalid.\n"
"Hint: Look at the output of 'django-admin sqlflush'. "
"That's the SQL this command wasn't able to run.\n"
"The full error: %s") % (connection.settings_dict['NAME'], e)
six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])
# Empty sql_list may signify an empty database and post_migrate would then crash
if sql_list and not inhibit_post_migrate:
# Emit the post migrate signal. This allows individual applications to
# respond as if the database had been migrated from scratch.
emit_post_migrate_signal(verbosity, interactive, database)
else:
self.stdout.write("Flush cancelled.\n")
| gpl-3.0 |
tensorflow/probability | tensorflow_probability/python/internal/distribution_util.py | 1 | 58073 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import tf_inspect # pylint: disable=g-direct-tensorflow-import
def _convert_to_tensor(x, name, dtype=None):
return None if x is None else tf.convert_to_tensor(x, name=name, dtype=dtype)
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):
"""Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A Tensor with shape `batch_shape + [num_components]`
mean_vector: A Tensor of mixture component means. Has shape `batch_shape +
[num_components]`.
stddev_vector: A Tensor of mixture component standard deviations. Has
shape `batch_shape + [num_components]`.
Returns:
A 1D tensor of shape `batch_shape` representing the standard deviation of
the mixture distribution with given weights and component means and standard
deviations.
Raises:
ValueError: If the shapes of the input tensors are not as expected.
"""
if not tensorshape_util.is_compatible_with(mean_vector.shape,
mixture_weight_vector.shape):
raise ValueError('Expecting means to have same shape as mixture weights.')
if not tensorshape_util.is_compatible_with(stddev_vector.shape,
mixture_weight_vector.shape):
raise ValueError('Expecting stddevs to have same shape as mixture weights.')
weighted_average_means = tf.reduce_sum(
mixture_weight_vector * mean_vector, axis=-1, keepdims=True)
deviations = mean_vector - weighted_average_means
return _hypot(_weighted_norm(mixture_weight_vector, deviations),
_weighted_norm(mixture_weight_vector, stddev_vector))
def _hypot(x, y):
"""Returns sqrt(x**2 + y**2) elementwise without overflow."""
# Notably, this implementation avoids overflow better than
# tf.experimental.numpy.hypot
mag = tf.maximum(tf.math.abs(x), tf.math.abs(y))
normalized_result = tf.sqrt(tf.square(x / mag) + tf.square(y / mag))
return tf.math.multiply_no_nan(normalized_result, mag)
def _weighted_norm(weights, xs):
"""Returns sqrt(sum_{axis=-1}(w_i * x_i**2)) without overflow in x_i**2."""
# Notably, this implementation differs from tf.norm in supporting weights and
# avoiding overflow. The weights are assumed < 1, i.e., incapable of causing
# overflow themselves.
magnitude = tf.reduce_max(tf.math.abs(xs), axis=-1)
xs = xs / magnitude[..., tf.newaxis]
normalized_result = tf.sqrt(tf.reduce_sum(weights * tf.square(xs), axis=-1))
return tf.math.multiply_no_nan(normalized_result, magnitude)
def shapes_from_loc_and_scale(loc, scale, name='shapes_from_loc_and_scale'):
"""Infer distribution batch and event shapes from a location and scale.
Location and scale family distributions determine their batch/event shape by
broadcasting the `loc` and `scale` args. This helper does that broadcast,
statically if possible.
Batch shape broadcasts as per the normal rules.
We allow the `loc` event shape to broadcast up to that of `scale`. We do not
allow `scale`'s event shape to change. Therefore, the last dimension of `loc`
must either be size `1`, or the same as `scale.range_dimension`.
See `MultivariateNormalLinearOperator` for a usage example.
Args:
loc: `Tensor` (already converted to tensor) or `None`. If `None`, or
`rank(loc)==0`, both batch and event shape are determined by `scale`.
scale: A `LinearOperator` instance.
name: A string name to prepend to created ops.
Returns:
batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
Raises:
ValueError: If the last dimension of `loc` is determined statically to be
different than the range of `scale`.
"""
if loc is not None and tensorshape_util.rank(loc.shape) == 0:
loc = None # scalar loc is irrelevant to determining batch/event shape.
with tf.name_scope(name):
# Get event shape.
event_size = tf.compat.dimension_value(scale.range_dimension)
if event_size is None:
event_size = scale.range_dimension_tensor()
event_size_ = tf.get_static_value(ps.convert_to_shape_tensor(event_size))
loc_event_size_ = (None if loc is None
else tf.compat.dimension_value(loc.shape[-1]))
if event_size_ is not None and loc_event_size_ is not None:
# Static check that event shapes match.
if loc_event_size_ != 1 and loc_event_size_ != event_size_:
raise ValueError(
'Event size of `scale` ({}) could not be broadcast up to that '
'of `loc` ({}).'.format(event_size_, loc_event_size_))
elif loc_event_size_ is not None and loc_event_size_ != 1:
event_size_ = loc_event_size_
if event_size_ is None:
event_shape = event_size[tf.newaxis]
else:
event_shape = ps.convert_to_shape_tensor(
np.reshape(event_size_, [1]), dtype=tf.int32, name='event_shape')
# Get batch shape.
batch_shape = scale.batch_shape
if not tensorshape_util.is_fully_defined(batch_shape):
batch_shape = scale.batch_shape_tensor()
else:
batch_shape = ps.convert_to_shape_tensor(batch_shape)
if loc is not None:
loc_batch_shape = tensorshape_util.with_rank_at_least(loc.shape, 1)[:-1]
if (tensorshape_util.rank(loc.shape) is None or
not tensorshape_util.is_fully_defined(loc_batch_shape)):
loc_batch_shape = tf.shape(loc)[:-1]
else:
loc_batch_shape = ps.convert_to_shape_tensor(
loc_batch_shape, dtype=tf.int32, name='loc_batch_shape')
# This is defined in the core util module.
batch_shape = ps.broadcast_shape(batch_shape, loc_batch_shape)
batch_shape = ps.convert_to_shape_tensor(
batch_shape, dtype=tf.int32, name='batch_shape')
return batch_shape, event_shape
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = tf.broadcast_static_shape(s_shape, t.shape)
if tensorshape_util.is_fully_defined(s_shape):
return tensorshape_util.as_list(s_shape)
# Fallback on dynamic.
d_shape = tf.shape(tensors[0])
for t in tensors[1:]:
d_shape = tf.broadcast_dynamic_shape(d_shape, tf.shape(t))
return d_shape
def shape_may_be_nontrivial(shape):
"""Returns `True` if it's possible that `shape` describes a non-scalar."""
static_size = tf.get_static_value(tf.size(shape))
return (static_size is None) or static_size >= 1
def is_diagonal_scale(scale):
"""Returns `True` if `scale` is a `LinearOperator` that is known to be diag.
Args:
scale: `LinearOperator` instance.
Returns:
Python `bool`.
Raises:
TypeError: If `scale` is not a `LinearOperator`.
"""
if not isinstance(scale, tf.linalg.LinearOperator):
raise TypeError('Expected argument `scale` to be instance of '
'`LinearOperator`. Found: `{}`.'.format(scale))
return (isinstance(scale, tf.linalg.LinearOperatorIdentity) or
isinstance(scale, tf.linalg.LinearOperatorScaledIdentity) or
isinstance(scale, tf.linalg.LinearOperatorDiag))
def maybe_check_scalar_distribution(distribution, expected_base_dtype,
validate_args):
"""Helper which checks validity of a scalar `distribution` init arg.
Valid here means:
* `distribution` has scalar batch and event shapes.
* `distribution` is `FULLY_REPARAMETERIZED`
* `distribution` has expected dtype.
Args:
distribution: `Distribution`-like object.
expected_base_dtype: `TensorFlow` `dtype`.
validate_args: Python `bool`. Whether to do additional checks: (i) check
that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add
`tf.Assert` ops to the graph to enforce that distribution is scalar in the
event that this cannot be determined statically.
Returns:
List of `tf.Assert` ops to run to enforce validity checks that could not
be statically determined. Empty if `not validate_args`.
Raises:
ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED
ValueError: If distribution is statically determined to not have both
scalar batch and scalar event shapes.
"""
if distribution.dtype != expected_base_dtype:
raise TypeError('dtype mismatch; '
'distribution.dtype=\'{}\' is not \'{}\''.format(
dtype_util.name(distribution.dtype),
dtype_util.name(expected_base_dtype)))
# Although `reparameterization_type` is a static property, we guard it by
# `validate_args`. This allows users to use a `distribution` which is not
# reparameterized itself. However, we tacitly assume that although the
# distribution is not reparameterized, it only depends on non-trainable
# variables.
if validate_args and (distribution.reparameterization_type !=
reparameterization.FULLY_REPARAMETERIZED):
raise ValueError('Base distribution should be reparameterized or be '
'a function of non-trainable variables; '
'distribution.reparameterization_type = \'{}\' '
'!= \'FULLY_REPARAMETERIZED\'.'.format(
distribution.reparameterization_type))
with tf.name_scope('check_distribution'):
assertions = []
def check_is_scalar(is_scalar, name):
is_scalar_ = tf.get_static_value(is_scalar)
if is_scalar_ is not None:
if not is_scalar_:
raise ValueError('distribution must be scalar; '
'distribution.{}=False is not True'.format(name))
elif validate_args:
assertions.append(
assert_util.assert_equal(
is_scalar,
True,
message=('distribution must be scalar; '
'distribution.{}=False is not True'.format(name))))
check_is_scalar(distribution.is_scalar_event(), 'is_scalar_event')
check_is_scalar(distribution.is_scalar_batch(), 'is_scalar_batch')
return assertions
def pad_mixture_dimensions(x, mixture_distribution, categorical_distribution,
event_ndims):
"""Pad dimensions of event tensors for mixture distributions.
See `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.
Args:
x: event tensor to pad.
mixture_distribution: Base distribution of the mixture.
categorical_distribution: `Categorical` distribution that mixes the base
distribution.
event_ndims: Integer specifying the number of event dimensions in the event
tensor.
Returns:
A padded version of `x` that can broadcast with `categorical_distribution`.
"""
with tf.name_scope('pad_mix_dims'):
def _get_ndims(d):
if tensorshape_util.rank(d.batch_shape) is not None:
return tensorshape_util.rank(d.batch_shape)
return tf.shape(d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(mixture_distribution)
cat_batch_ndims = _get_ndims(categorical_distribution)
pad_ndims = tf.where(categorical_distribution.is_scalar_batch(),
dist_batch_ndims, dist_batch_ndims - cat_batch_ndims)
s = tf.shape(x)
x = tf.reshape(
x,
shape=tf.concat([
s[:-1],
tf.ones([pad_ndims], dtype=tf.int32),
s[-1:],
tf.ones([event_ndims], dtype=tf.int32),
],
axis=0))
return x
def pick_scalar_condition(pred, true_value, false_value, name=None):
"""Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects.
"""
with tf.name_scope(name or 'pick_scalar_condition'):
pred = tf.convert_to_tensor(pred, dtype_hint=tf.bool, name='pred')
true_value = tf.convert_to_tensor(true_value, name='true_value')
false_value = tf.convert_to_tensor(false_value, name='false_value')
pred_ = tf.get_static_value(pred)
if pred_ is None:
return tf.where(pred, true_value, false_value)
return true_value if pred_ else false_value
def move_dimension(x, source_idx, dest_idx):
"""Move a single tensor dimension within its shape.
This is a special case of `tf.transpose()`, which applies
arbitrary permutations to tensor dimensions.
Args:
x: Tensor of rank `ndims`.
source_idx: Integer index into `x.shape` (negative indexing is supported).
dest_idx: Integer index into `x.shape` (negative indexing is supported).
Returns:
x_perm: Tensor of rank `ndims`, in which the dimension at original
index `source_idx` has been moved to new index `dest_idx`, with
all other dimensions retained in their original order.
Example:
```python
x = tf.placeholder(shape=[200, 30, 4, 1, 6])
x_perm = _move_dimension(x, 1, 1) # no-op
x_perm = _move_dimension(x, 0, 3) # result shape [30, 4, 1, 200, 6]
x_perm = _move_dimension(x, 0, -2) # equivalent to previous
x_perm = _move_dimension(x, 4, 2) # result shape [200, 30, 6, 4, 1]
```
"""
dtype = dtype_util.common_dtype([source_idx, dest_idx],
dtype_hint=tf.int32)
ndims = ps.cast(ps.rank(x), dtype)
source_idx = ps.convert_to_shape_tensor(source_idx, dtype=dtype)
dest_idx = ps.convert_to_shape_tensor(dest_idx, dtype=dtype)
# Handle negative indexing.
source_idx = ps.where(source_idx < 0, ndims + source_idx, source_idx)
dest_idx = ps.where(dest_idx < 0, ndims + dest_idx, dest_idx)
# Construct the appropriate permutation of dimensions, depending
# whether the source is before or after the destination.
def move_left_permutation():
return ps.concat([
ps.range(0, dest_idx, dtype=dtype),
[source_idx],
ps.range(dest_idx, source_idx, dtype=dtype),
ps.range(source_idx + 1, ndims, dtype=dtype)
], axis=0)
def move_right_permutation():
return ps.concat([
ps.range(0, source_idx, dtype=dtype),
ps.range(source_idx + 1, dest_idx + 1, dtype=dtype),
[source_idx],
ps.range(dest_idx + 1, ndims, dtype=dtype)
], axis=0)
def x_permuted():
return tf.transpose(
a=x,
perm=ps.cond(source_idx < dest_idx,
move_right_permutation,
move_left_permutation))
# One final conditional to handle the special case where source
# and destination indices are equal.
return ps.cond(ps.equal(source_idx, dest_idx), lambda: x, x_permuted)
def assert_integer_form(x,
summarize=None,
message=None,
atol=None,
rtol=None,
name='assert_integer_form'):
"""Assert that x has integer components (or floats near integers).
Args:
x: Floating-point or integer `Tensor`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
atol: Tensor. Same dtype as, and broadcastable to, x. The absolute
tolerance. Default is 10 * eps.
rtol: Tensor. Same dtype as, and broadcastable to, x. The relative
tolerance. Default is 10 * eps.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `round(x) != x` within tolerance.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(x, name='x')
if dtype_util.is_integer(x.dtype):
return tf.no_op()
message = message or '{} has non-integer components'.format(x)
return assert_util.assert_near(
x, tf.round(x), atol=atol, rtol=rtol,
summarize=summarize, message=message, name=name)
def assert_casting_closed(x,
target_dtype,
summarize=None,
message=None,
name='assert_casting_closed'):
"""Assert that x is fixed under round-trip casting to `target_dtype`.
Note that even when `target_dtype` is the integer dtype of the same width as
the dtype of `x`, this is stronger than `assert_integer_form`. This is
because any given floating-point format can represent integers outside the
range of the equally wide integer format.
Args:
x: Floating-point `Tensor`
target_dtype: A `tf.dtype` used to cast `x` to.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, target_dtype) != x`.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(x, name='x')
if message is None:
message = 'Tensor values must be representable as {}.'.format(
target_dtype)
return assert_util.assert_equal(
x,
tf.cast(tf.cast(x, target_dtype), x.dtype),
summarize=summarize,
message=message,
name=name)
def assert_symmetric(matrix):
matrix_t = tf.linalg.matrix_transpose(matrix)
return with_dependencies(
[assert_util.assert_near(matrix, matrix_t)], matrix)
def assert_nondecreasing(x, summarize=None, message=None, name=None):
"""Assert (batched) elements in `x` are non decreasing."""
with tf.name_scope(name or 'assert_non_decreasing'):
if message is None:
message = '`Tensor` contained decreasing values.'
x = tf.convert_to_tensor(x)
x_ = tf.get_static_value(x)
if x_ is not None:
if not np.all(x_[..., :-1] <= x_[..., 1:]):
raise ValueError(message)
return x
return assert_util.assert_less_equal(
x[..., :-1],
x[..., 1:],
summarize=summarize,
message=message)
def assert_nonnegative_integer_form(
x, atol=None, rtol=None, name='assert_nonnegative_integer_form'):
"""Assert x is a non-negative tensor, and optionally of integers."""
with tf.name_scope(name):
x = tf.convert_to_tensor(x, name='x')
assertions = [
assert_util.assert_non_negative(
x, message='`{}` must be non-negative.'.format(x)),
]
if not dtype_util.is_integer(x.dtype):
assertions += [
assert_integer_form(
x, atol=atol, rtol=rtol,
message='`{}` cannot contain fractional components.'.format(x)),
]
return assertions
def embed_check_nonnegative_integer_form(
x, atol=None, rtol=None, name='embed_check_nonnegative_integer_form'):
"""Assert x is a non-negative tensor, and optionally of integers."""
with tf.name_scope(name):
x = tf.convert_to_tensor(x, name='x')
return with_dependencies(assert_nonnegative_integer_form(
x, atol=atol, rtol=rtol), x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = tf.convert_to_tensor(a, name='a')
b = tf.convert_to_tensor(b, name='b')
# Here we can't just do tf.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in tf.equal.
def all_shapes_equal():
return tf.reduce_all(
tf.equal(
tf.concat([tf.shape(a), tf.shape(b)], 0),
tf.concat([tf.shape(b), tf.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return tf.cond(
pred=tf.equal(tf.rank(a), tf.rank(b)),
true_fn=all_shapes_equal,
false_fn=lambda: tf.constant(False))
def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tf.get_static_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype)
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
tf.bool: True,
tf.uint8: True,
tf.uint16: True,
}.get(dtype_util.base_dtype(dt), False)
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
tf.float16: True,
tf.float32: True,
tf.float64: True,
tf.int8: True,
tf.int16: True,
tf.int32: True,
tf.int64: True,
}.get(dtype_util.base_dtype(dt), False)
def _is_known_dtype(dt):
"""Helper returning True if dtype is known."""
return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError('Unrecognized dtype: {}'.format(dtype_util.name(dt)))
if dtype_util.is_floating(dt):
return int(2**(np.finfo(dtype_util.as_numpy_dtype(dt)).nmant + 1))
if dtype_util.is_integer(dt):
return np.iinfo(dtype_util.as_numpy_dtype(dt)).max
if dtype_util.base_dtype(dt) == tf.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError('Unrecognized dtype: {}'.format(dtype_util.name(dt)))
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError('Unrecognized dtype: {}'.format(dtype_util.name(dt)))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_integer or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError('Unrecognized dtype: {}'.format(dtype_util.name(dt)))
return dtype_util.is_integer(dt) or dtype_util.base_dtype(dt) == tf.bool
def assert_categorical_event_shape(
categorical_param, name='assert_check_categorical_event_shape'):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
tf.float16: int(2**11), # Largest int as a float16.
tf.float32: int(2**24),
tf.float64: int(2**53),
}.get(dtype_util.base_dtype(categorical_param.dtype), 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
assertions: Python `list` of assertions.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(categorical_param, name='categorical_param')
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = dtype_util.base_dtype(x.dtype)
max_event_size = (
_largest_integer_by_dtype(x_dtype)
if dtype_util.is_floating(x_dtype) else 0)
if max_event_size == 0:
raise TypeError('Unable to validate size of unrecognized dtype '
'({}).'.format(dtype_util.name(x_dtype)))
try:
x_shape_static = tensorshape_util.with_rank_at_least(x.shape, 1)
except ValueError:
raise ValueError('A categorical-distribution parameter must have '
'at least 1 dimension.')
event_size = tf.compat.dimension_value(x_shape_static[-1])
if event_size is not None:
if event_size < 2:
raise ValueError('A categorical-distribution parameter must have at '
'least 2 events.')
if event_size > max_event_size:
raise ValueError('Number of classes exceeds `dtype` precision, i.e., '
'{} implies shape ({}) cannot exceed {}.'.format(
dtype_util.name(x_dtype), event_size,
max_event_size))
return []
event_size = tf.shape(x, out_type=tf.int64, name='x_shape')[-1]
return [
assert_util.assert_rank_at_least(
x,
1,
message=('A categorical-distribution parameter must have '
'at least 1 dimension.')),
assert_util.assert_greater_equal(
tf.shape(x)[-1],
2,
message=('A categorical-distribution parameter must have at '
'least 2 events.')),
assert_util.assert_less_equal(
event_size,
tf.convert_to_tensor(max_event_size, dtype=tf.int64),
message='Number of classes exceeds `dtype` precision, '
'i.e., {} dtype cannot exceed {} shape.'.format(
dtype_util.name(x_dtype), max_event_size)),
]
def embed_check_categorical_event_shape(
categorical_param, name='embed_check_categorical_event_shape'):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
tf.float16: int(2**11), # Largest int as a float16.
tf.float32: int(2**24),
tf.float64: int(2**53),
}.get(dtype_util.base_dtype(categorical_param.dtype), 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(categorical_param, name='categorical_param')
assertions = assert_categorical_event_shape(x)
if not assertions:
return x
return with_dependencies(assertions, x)
def embed_check_integer_casting_closed(x,
target_dtype,
assert_nonnegative=True,
assert_positive=False,
name='embed_check_casting_closed'):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
assert_positive: `bool` indicating `x` should contain positive values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(x, name='x')
if (not _is_integer_like_by_dtype(x.dtype) and
not dtype_util.is_floating(x.dtype)):
raise TypeError('{}.dtype must be floating- or '
'integer-type.'.format(dtype_util.name(x.dtype)))
if (not _is_integer_like_by_dtype(target_dtype) and
not dtype_util.is_floating(target_dtype)):
raise TypeError('target_dtype ({}) must be floating- or '
'integer-type.'.format(dtype_util.name(target_dtype)))
if (not _is_integer_like_by_dtype(x.dtype) and
not _is_integer_like_by_dtype(target_dtype)):
raise TypeError('At least one of {}.dtype ({}) and target_dtype ({}) '
'must be integer-type.'.format(
x, dtype_util.name(x.dtype),
dtype_util.name(target_dtype)))
assertions = []
if assert_positive:
assertions += [
assert_util.assert_positive(x, message='Elements must be positive.'),
]
elif assert_nonnegative:
assertions += [
assert_util.assert_non_negative(
x, message='Elements must be non-negative.'),
]
if dtype_util.is_floating(x.dtype):
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_casting_closed(
x,
target_dtype,
message='Elements must be {}-equivalent.'.format(
dtype_util.name(target_dtype))),
]
else:
if (_largest_integer_by_dtype(x.dtype) >
_largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
assert_util.assert_less_equal(
x,
_largest_integer_by_dtype(target_dtype),
message=('Elements cannot exceed {}.'.format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and (_smallest_integer_by_dtype(
x.dtype) < _smallest_integer_by_dtype(target_dtype))):
assertions += [
assert_util.assert_greater_equal(
x,
_smallest_integer_by_dtype(target_dtype),
message=('Elements cannot be smaller than {}.'.format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return with_dependencies(assertions, x)
def rotate_transpose(x, shift, name='rotate_transpose'):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random.normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(x, name='x')
shift = ps.convert_to_shape_tensor(shift, name='shift')
# We do not assign back to preserve constant-ness.
assert_util.assert_integer(shift)
shift_value_static = tf.get_static_value(shift)
ndims = tensorshape_util.rank(x.shape)
if ndims is not None and shift_value_static is not None:
if ndims < 2:
return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0:
return x
perm = np.roll(np.arange(ndims), shift_value_static)
return tf.transpose(a=x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = tf.rank(x)
shift = tf.where(
tf.less(shift, 0), -shift % ndims, ndims - shift % ndims)
first = tf.range(0, shift)
last = tf.range(shift, ndims)
perm = tf.concat([last, first], 0)
return tf.transpose(a=x, perm=perm)
def pick_vector(cond, true_vector, false_vector, name='pick_vector'):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example:
```python
pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18)) # [10, 11]
pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18)) # [15, 16, 17]
```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with tf.name_scope(name):
cond = tf.convert_to_tensor(cond, dtype_hint=tf.bool, name='cond')
if cond.dtype != tf.bool:
raise TypeError(
'{}.dtype={} which is not {}'.format(cond, cond.dtype, tf.bool))
true_vector = tf.convert_to_tensor(true_vector, name='true_vector')
false_vector = tf.convert_to_tensor(false_vector, name='false_vector')
if true_vector.dtype != false_vector.dtype:
raise TypeError(
'{}.dtype={} does not match {}.dtype={}'.format(
true_vector, true_vector.dtype, false_vector, false_vector.dtype))
cond_value_static = tf.get_static_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
n = tf.shape(true_vector)[0]
return tf.slice(
tf.concat([true_vector, false_vector], 0), [tf.where(cond, 0, n)],
[tf.where(cond, n, -1)])
def prefer_static_broadcast_shape(shape1,
shape2,
name='prefer_static_broadcast_shape'):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with tf.name_scope(name):
def make_shape_tensor(x):
return tf.convert_to_tensor(x, name='shape', dtype=tf.int32)
def get_tensor_shape(s):
if isinstance(s, tf.TensorShape):
return s
s_ = tf.get_static_value(make_shape_tensor(s))
if s_ is not None:
return tf.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tf.TensorShape):
return make_shape_tensor(s)
if tensorshape_util.is_fully_defined(s):
return make_shape_tensor(tensorshape_util.as_list(s))
raise ValueError('Cannot broadcast from partially '
'defined `TensorShape`.')
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return tf.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return tf.broadcast_dynamic_shape(shape1_, shape2_)
def prefer_static_rank(x):
"""Return static rank of tensor `x` if available, else `tf.rank(x)`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static rank is obtainable), else `Tensor`.
"""
return ps.rank(x)
def prefer_static_shape(x):
"""Return static shape of tensor `x` if available, else `tf.shape(x)`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static shape is obtainable), else `Tensor`.
"""
return ps.shape(x)
def prefer_static_value(x):
"""Return static value of tensor `x` if available, else `x`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `Tensor`.
"""
static_x = tf.get_static_value(x)
if static_x is not None:
return static_x
return x
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode('utf-8')
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
# TODO(b/35290280): Add unit-tests.
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't 'constant-in, constant-out', we must first check the
# static shape or fallback to dynamic shape.
s = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, np.abs(axis))[axis])
if s is not None:
return s
return tf.shape(x)[axis]
def process_quadrature_grid_and_probs(quadrature_grid_and_probs,
dtype,
validate_args,
name=None):
"""Validates quadrature grid, probs or computes them as necessary.
Args:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
dtype: The expected `dtype` of `grid` and `probs`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight.
Raises:
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
"""
with tf.name_scope(name or 'process_quadrature_grid_and_probs'):
if quadrature_grid_and_probs is None:
grid, probs = np.polynomial.hermite.hermgauss(deg=8)
grid = grid.astype(dtype_util.as_numpy_dtype(dtype))
probs = probs.astype(dtype_util.as_numpy_dtype(dtype))
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
grid = tf.convert_to_tensor(grid, name='grid', dtype=dtype)
probs = tf.convert_to_tensor(probs, name='probs', dtype=dtype)
return grid, probs
grid, probs = tuple(quadrature_grid_and_probs)
grid = tf.convert_to_tensor(grid, name='grid', dtype=dtype)
probs = tf.convert_to_tensor(probs, name='unnormalized_probs', dtype=dtype)
probs /= tf.norm(probs, ord=1, axis=-1, keepdims=True, name='probs')
def _static_event_size(x):
"""Returns the static size of a specific dimension or `None`."""
return tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
m, n = _static_event_size(probs), _static_event_size(grid)
if m is not None and n is not None:
if m != n:
raise ValueError('`quadrature_grid_and_probs` must be a `tuple` of '
'same-length zero-th-dimension `Tensor`s '
'(saw lengths {}, {})'.format(m, n))
elif validate_args:
assertions = [
assert_util.assert_equal(
dimension_size(probs, axis=-1),
dimension_size(grid, axis=-1),
message=('`quadrature_grid_and_probs` must be a `tuple` of '
'same-length zero-th-dimension `Tensor`s')),
]
with tf.control_dependencies(assertions):
grid = tf.identity(grid)
probs = tf.identity(probs)
return grid, probs
def pad(x, axis, front=False, back=False, value=0, count=1, name=None):
"""Pads `value` to the front and/or back of a `Tensor` dim, `count` times.
Args:
x: `Tensor` input.
axis: Scalar `int`-like `Tensor` representing the single dimension to pad.
(Negative indexing is supported.)
front: Python `bool`; if `True` the beginning of the `axis` dimension is
padded with `value`, `count` times. If `False` no front padding is made.
back: Python `bool`; if `True` the end of the `axis` dimension is padded
with `value`, `count` times. If `False` no end padding is made.
value: Scalar `int`-like `Tensor` representing the actual value added to the
front and/or back of the `axis` dimension of `x`.
count: Scalar `int`-like `Tensor` representing number of elements added to
the front and/or back of the `axis` dimension of `x`. E.g., if `front =
back = True` then `2 * count` elements are added.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pad: The padded version of input `x`.
Raises:
ValueError: if both `front` and `back` are `False`.
TypeError: if `count` is not `int`-like.
"""
with tf.name_scope(name or 'pad'):
x = tf.convert_to_tensor(x, name='x')
value = tf.convert_to_tensor(value, dtype=x.dtype, name='value')
count = ps.convert_to_shape_tensor(count, name='count')
if not dtype_util.is_integer(count.dtype):
raise TypeError('`count.dtype` (`{}`) must be `int`-like.'.format(
dtype_util.name(count.dtype)))
if not front and not back:
raise ValueError('At least one of `front`, `back` must be `True`.')
ndims = (
tensorshape_util.rank(x.shape)
if tensorshape_util.rank(x.shape) is not None else tf.rank(
x, name='ndims'))
axis = tf.convert_to_tensor(axis, name='axis')
axis_ = tf.get_static_value(axis)
if axis_ is not None:
axis = axis_
if axis < 0:
axis = ndims + axis
count_ = tf.get_static_value(count)
if axis_ >= 0 or tensorshape_util.rank(x.shape) is not None:
head = x.shape[:axis]
mid_dim_value = tf.compat.dimension_value(x.shape[axis])
if count_ is None or mid_dim_value is None:
middle = tf.TensorShape(None)
else:
middle = tf.TensorShape(mid_dim_value + count_ * (front + back))
tail = x.shape[axis + 1:]
final_shape = tensorshape_util.concatenate(
head, tensorshape_util.concatenate(middle, tail))
else:
final_shape = None
else:
axis = tf.where(axis < 0, ndims + axis, axis)
final_shape = None
x = tf.pad(
x,
paddings=ps.one_hot(
indices=ps.stack([axis if front else -1, axis if back else -1]),
depth=ndims,
axis=0,
on_value=count,
dtype=tf.int32),
constant_values=value)
if final_shape is not None:
tensorshape_util.set_shape(x, final_shape)
return x
def parent_frame_arguments():
"""Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
"""
# All arguments and the names used for *varargs, and **kwargs
arg_names, variable_arg_name, keyword_arg_name, local_vars = (
tf_inspect._inspect.getargvalues( # pylint: disable=protected-access
# Get the first frame of the caller of this method.
tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access
# Remove the *varargs, and flatten the **kwargs. Both are
# nested lists.
local_vars.pop(variable_arg_name, {})
keyword_args = local_vars.pop(keyword_arg_name, {})
final_args = {}
# Copy over arguments and their values. In general, local_vars
# may contain more than just the arguments, since this method
# can be called anywhere in a function.
for arg_name in arg_names:
final_args[arg_name] = local_vars.pop(arg_name)
final_args.update(keyword_args)
return final_args
class AppendDocstring(object):
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@AppendDocstring(
additional_note='A special note!',
kwargs_dict={'foo': 'An extra arg.'})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note='', kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing specific kwargs
expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError('Parameter name \'%s\' contains whitespace.' % key)
value = value.lstrip()
if '\n' in value:
raise ValueError(
'Parameter description for \'%s\' contains newlines.' % key)
bullets.append('* `%s`: %s' % (key, value))
self._additional_note += ('\n\n##### `kwargs`:\n\n' + '\n'.join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += '\n%s' % self._additional_note
return _fn
def expand_to_vector(x, tensor_name=None, op_name=None, validate_args=False):
"""Transform a 0-D or 1-D `Tensor` to be 1-D.
For user convenience, many parts of the TensorFlow Probability API accept
inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed
to the API as either `5` or `[5]`. This function can be used to transform
such an argument to always be 1-D.
NOTE: Python or NumPy values will be converted to `Tensor`s with standard type
inference/conversion. In particular, an empty list or tuple will become an
empty `Tensor` with dtype `float32`. Callers should convert values to
`Tensor`s before calling this function if different behavior is desired
(e.g. converting empty lists / other values to `Tensor`s with dtype `int32`).
Args:
x: A 0-D or 1-D `Tensor`.
tensor_name: Python `str` name for `Tensor`s created by this function.
op_name: Python `str` name for `Op`s created by this function.
validate_args: Python `bool, default `False`. When `True`, arguments may be
checked for validity at execution time, possibly degrading runtime
performance. When `False`, invalid inputs may silently render incorrect
outputs.
Returns:
vector: a 1-D `Tensor`.
"""
with tf.name_scope(op_name or 'expand_to_vector'):
x_orig = x
x = ps.convert_to_shape_tensor(x, name='x')
ndims = tensorshape_util.rank(x.shape)
if ndims is None:
# Maybe expand ndims from 0 to 1.
if validate_args:
x = with_dependencies([
assert_util.assert_rank_at_most(
x, 1, message='Input is neither scalar nor vector.')
], x)
ndims = ps.rank(x)
expanded_shape = pick_vector(
ps.equal(ndims, 0), np.array([1], dtype=np.int32), ps.shape(x))
return ps.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand ndims from 0 to 1.
return ps.convert_to_shape_tensor(
ps.reshape(x_orig, [1]), name=tensor_name)
elif ndims != 1:
raise ValueError('Input is neither scalar nor vector.')
# ndims == 1
return x
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be consumed
externally only after some other dependencies have run first. This function
returns `output_tensor`, but only after all operations in `dependencies` have
run. Note that this means that there is no guarantee that `output_tensor` will
be evaluated after any `dependencies` have run.
See also `tf.tuple` and `tf.group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
output_with_deps: Same as `output_tensor` but with embedded dependencies.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
if tf.executing_eagerly():
return output_tensor
with tf.name_scope(name or 'control_dependency') as name:
with tf.control_dependencies(d for d in dependencies if d is not None):
output_tensor = tf.convert_to_tensor(output_tensor)
if isinstance(output_tensor, tf.Tensor):
return tf.identity(output_tensor, name=name)
else:
return tf.IndexedSlices(
tf.identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
def is_distribution_instance(d):
"""Standardizes our definition of being a `tfd.Distribution`."""
return (not tf_inspect.isclass(d) and
hasattr(d, 'log_prob') and
hasattr(d, 'sample'))
def extend_cdf_outside_support(x, computed_cdf, low=None, high=None):
"""Returns a CDF correctly extended outside a distribution's support interval.
This helper is useful when the natural formula for computing a CDF computes
the wrong thing outside the distribution's support. For instance, a `nan` due
to invoking some special function with parameters out of bounds.
Note that correct gradients may require the "double-where" trick. For that,
the caller must compute the `computed_cdf` Tensor with a doctored input that
replaces all out-of-support values of `x` with a "safe" in-support value that
is guaranteed not to produce a `nan` in the `computed_cdf` Tensor. After
calling `extend_cdf_outside_support` those doctored CDF values will be ignored
in the primal computation, and any `nan`s thus avoided will not pollute the
gradients.
Args:
x: Tensor of input values at which the CDF is desired.
computed_cdf: Tensor of values computed for the CDF. Must broadcast with
`x`. Entries corresponding to points `x` falling below or above the given
support are ignored and replaced with 0 or 1, respectively.
low: Tensor of lower bounds for the support. Must broadcast with `x`.
high: Tensor of upper bounds for the support. Must broadcast with `x`.
Returns:
cdf: Tensor of corrected CDF values. Each entry is either 0 if the
corresponding entry of `x` is outside the support from below, or the
computed CDF value if `x` is in the support, or 1 if `x` is outside the
support from above.
"""
if low is not None:
computed_cdf = tf.where(x >= low, computed_cdf, 0.)
if high is not None:
computed_cdf = tf.where(x < high, computed_cdf, 1.)
return computed_cdf
| apache-2.0 |
elifesciences/elife-metrics | src/article_metrics/scopus/citations.py | 1 | 5339 | import requests
import logging
from django.conf import settings
from article_metrics import models, handler, utils
from article_metrics.utils import first, flatten, simple_rate_limiter, lmap, lfilter, isint, ParseError, ensure
LOG = logging.getLogger(__name__)
URL = "https://api.elsevier.com/content/search/scopus"
MAX_PER_SECOND = 3
@simple_rate_limiter(MAX_PER_SECOND) # no more than this per second
def fetch_page(api_key, doi_prefix, page=0, per_page=25):
"fetches a page of scopus search results"
params = {
'query': 'DOI("%s/*")' % doi_prefix,
# 'query': 'DOI("10.7554/eLife.00471")',
# 'field': 'citedby-count', # not too useful unless we combine it with other fields
# 'view': 'COMPLETE' # verboten
'start': page, # a 400 is thrown when we page out
'count': per_page,
'sort': 'citedby-count',
}
LOG.debug('calling scopus with params: %s', params)
headers = {
'Accept': 'application/json',
'X-ELS-APIKey': api_key,
'User-Agent': settings.USER_AGENT,
}
# https://dev.elsevier.com/tecdoc_cited_by_in_scopus.html
# http://api.elsevier.com/documentation/SCOPUSSearchAPI.wadl
return handler.requests_get(URL, params=params, headers=headers)
def search(api_key=settings.SCOPUS_KEY, doi_prefix=settings.DOI_PREFIX):
"""searches scopus, returning a generator that will iterate through each page
of results until all pages have been consumed.
results are cached and expire daily"""
page = 0
per_page = 25 # max per page
data = fetch_page(api_key, doi_prefix, page=page, per_page=per_page).json()
yield data['search-results']
# I think this is 'total pages'
# you can certainly query far far beyond 'totalResults / per_page'
total_pages = int(data['search-results']['opensearch:totalResults'])
# I think we're capped at 10k/day ? can't find their docs on this
# eLife tends to hit 0 citations at about the 2.5k mark
max_pages = 5000
# figure out where to stop
end_page = max_pages if total_pages > max_pages else total_pages
try:
for page in range(page + 1, end_page):
try:
data = fetch_page(api_key, doi_prefix, page=page, per_page=per_page).json()
yield data['search-results']
# find the first entry in the search results with a 'citedby-count'.
# this is typically the first but we have results where it's missing
fltrfn = lambda d: 'citedby-count' in d and isint(d['citedby-count'])
entry = first(lfilter(fltrfn, data['search-results']['entry']))
# exit early if we start hitting 0 results
if entry and int(entry['citedby-count']) == 0:
raise GeneratorExit("no more articles with citations")
# every ten pages print out our progress
if page % 10 == 0:
LOG.info("page %s of %s, last citation count: %s" % (page, end_page, entry['citedby-count']))
except requests.HTTPError as err:
raise GeneratorExit(str(err))
except GeneratorExit:
return
@handler.capture_parse_error
def parse_entry(entry):
"parses a single search result from scopus"
try:
citedby_link = first(lfilter(lambda d: d["@ref"] == "scopus-citedby", entry['link']))
ensure('prism:doi' in entry, "entry is missing 'doi'!", ParseError)
ensure('citedby-count' in entry, "entry is missing 'citedby-count'!", ParseError)
ensure(isint(entry['citedby-count']), "citedby count isn't an integer", ParseError)
if isinstance(entry['prism:doi'], list):
weird_key = "$"
for struct in entry['prism:doi']:
doi = struct[weird_key]
if utils.doi2msid(doi, safe=True, allow_subresource=False):
entry['prism:doi'] = doi
break
utils.doi2msid(entry['prism:doi'], allow_subresource=False) # throws AssertionError
return {
'doi': entry['prism:doi'],
'num': int(entry['citedby-count']),
'source': models.SCOPUS,
'source_id': citedby_link['@href']
}
# errors handled here won't be caught by handler.capture_parse_error
except AssertionError:
LOG.warn("discarding scopus citation: failed to parse doi", extra={'response': entry})
return {'bad': entry}
except ParseError:
LOG.warn("discarding scopus citation: failed to parse entry", extra={'response': entry})
return {'bad': entry}
def parse_results(search_result):
"parses citation counts from a page of search results from scopus"
return lmap(parse_entry, search_result['entry'])
def all_entries(search_result_list):
"returns a list of 'entries', citation information for articles from a *list* of search result pages"
return flatten([parse_results(result) for result in search_result_list])
def is_abstract(entry):
# ll 10.7554/eLife.22757.001
return len(entry['doi'].split('.')) == 4
def not_abstract(entry):
return not is_abstract(entry)
#
#
#
def all_todays_entries():
"convenience"
# return filter(not_abstract, all_entries(list(search())))
return all_entries(list(search()))
| gpl-3.0 |
psi-rking/psi4 | samples/python/cc54/input.py | 10 | 2940 | #! CCSD dipole with user-specified basis set
import psi4
psi4.set_output_file("output.dat", False)
h2o = psi4.geometry("""
0 1
H
O 1 0.957
H 2 0.957 1 104.5
""")
psi4.set_options({'freeze_core': 'false'})
psi4.basis_helper("""
# Sadlej-pVTZ
spherical
****
H 0
S 4 1.00
33.8650140000 0.0060680000
5.0947880000 0.0453160000
1.1587860000 0.2028460000
0.3258400000 0.5037090000
S 1 1.00
0.1027410000 1.0000000000
S 1 1.00
0.0324000000 1.0000000000
P 2 1.00
1.1588000000 0.1884400000
0.3258000000 0.8824200000
P 2 1.00
0.1027000000 0.1178000000
0.0324000000 0.0042000000
****
C 0
S 5 1.00
5240.6353000000 0.0009370000
782.2048000000 0.0072280000
178.3508300000 0.0363440000
50.8159420000 0.1306000000
16.8235620000 0.3189310000
S 2 1.00
6.1757760000 0.4387420000
2.4180490000 0.2149740000
S 1 1.00
0.5119000000 1.0000000000
S 1 1.00
0.1565900000 1.0000000000
S 1 1.00
0.0479000000 1.0000000000
P 4 1.00
18.8418000000 0.0138870000
4.1592400000 0.0862790000
1.2067100000 0.2887440000
0.3855400000 0.4994110000
P 1 1.00
0.1219400000 1.0000000000
P 1 1.00
0.0385680000 1.0000000000
D 2 1.00
1.2067000000 0.2628500000
0.3855000000 0.8043000000
D 2 1.00
0.1219000000 0.6535000000
0.0386000000 0.8636000000
****
O 0
S 5 1.00
10662.2850000000 0.0007990000
1599.7097000000 0.0061530000
364.7252600000 0.0311570000
103.6517900000 0.1155960000
33.9058050000 0.3015520000
S 2 1.00
12.2874690000 0.4448700000
4.7568050000 0.2431720000
S 1 1.00
1.0042710000 1.0000000000
S 1 1.00
0.3006860000 1.0000000000
S 1 1.00
0.0900300000 1.0000000000
P 4 1.00
34.8564630000 0.0156480000
7.8431310000 0.0981970000
2.3062490000 0.3077680000
0.7231640000 0.4924700000
P 1 1.00
0.2148820000 1.0000000000
P 1 1.00
0.0638500000 1.0000000000
D 2 1.00
2.3062000000 0.2027000000
0.7232000000 0.5791000000
D 2 1.00
0.2149000000 0.7854500000
0.0639000000 0.5338700000
****
""")
ccsd_e, wfn = psi4.properties('ccsd',properties=['dipole'],return_wfn=True)
psi4.oeprop(wfn,"DIPOLE", "QUADRUPOLE", title="(OEPROP)CC")
psi4.core.print_variables()
| lgpl-3.0 |
josephsuh/extra-specs | nova/api/openstack/compute/views/servers.py | 1 | 7868 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as views_addresses
from nova.api.openstack.compute.views import flavors as views_flavors
from nova.api.openstack.compute.views import images as views_images
from nova import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "servers"
_progress_statuses = (
"ACTIVE",
"BUILD",
"REBUILD",
"RESIZE",
"VERIFY_RESIZE",
)
_fault_statuses = (
"ERROR",
)
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
self._address_builder = views_addresses.ViewBuilder()
self._flavor_builder = views_flavors.ViewBuilder()
self._image_builder = views_images.ViewBuilder()
def _skip_precooked(func):
def wrapped(self, request, instance):
if instance.get("_is_precooked"):
return dict(server=instance)
else:
return func(self, request, instance)
return wrapped
def create(self, request, instance):
"""View that should be returned when an instance is created."""
return {
"server": {
"id": instance["uuid"],
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
@_skip_precooked
def basic(self, request, instance):
"""Generic, non-detailed view of an instance."""
return {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
@_skip_precooked
def show(self, request, instance):
"""Detailed view of a single instance."""
server = {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"status": self._get_vm_state(instance),
"tenant_id": instance.get("project_id") or "",
"user_id": instance.get("user_id") or "",
"metadata": self._get_metadata(instance),
"hostId": self._get_host_id(instance) or "",
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": utils.isotime(instance["created_at"]),
"updated": utils.isotime(instance["updated_at"]),
"addresses": self._get_addresses(request, instance),
"accessIPv4": instance.get("access_ip_v4") or "",
"accessIPv6": instance.get("access_ip_v6") or "",
"key_name": instance.get("key_name") or "",
"config_drive": instance.get("config_drive"),
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
_inst_fault = self._get_fault(request, instance)
if server["server"]["status"] in self._fault_statuses and _inst_fault:
server['server']['fault'] = _inst_fault
if server["server"]["status"] in self._progress_statuses:
server["server"]["progress"] = instance.get("progress", 0)
return server
def index(self, request, instances):
"""Show a list of servers without many details."""
return self._list_view(self.basic, request, instances)
def detail(self, request, instances):
"""Detailed view of a list of instance."""
return self._list_view(self.show, request, instances)
def _list_view(self, func, request, servers):
"""Provide a view for a list of servers."""
server_list = [func(request, server)["server"] for server in servers]
servers_links = self._get_collection_links(request,
servers,
self._collection_name)
servers_dict = dict(servers=server_list)
if servers_links:
servers_dict["servers_links"] = servers_links
return servers_dict
@staticmethod
def _get_metadata(instance):
metadata = instance.get("metadata", [])
return dict((item['key'], item['value']) for item in metadata)
@staticmethod
def _get_vm_state(instance):
return common.status_from_state(instance.get("vm_state"),
instance.get("task_state"))
@staticmethod
def _get_host_id(instance):
host = instance.get("host")
project = str(instance.get("project_id"))
if host:
sha_hash = hashlib.sha224(project + host) # pylint: disable=E1101
return sha_hash.hexdigest()
def _get_addresses(self, request, instance):
context = request.environ["nova.context"]
networks = common.get_networks_for_instance(context, instance)
return self._address_builder.index(networks)["addresses"]
def _get_image(self, request, instance):
image_ref = instance["image_ref"]
image_id = str(common.get_id_from_href(image_ref))
bookmark = self._image_builder._get_bookmark_link(request,
image_id,
"images")
return {
"id": image_id,
"links": [{
"rel": "bookmark",
"href": bookmark,
}],
}
def _get_flavor(self, request, instance):
flavor_id = instance["instance_type"]["flavorid"]
flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
flavor_id,
"flavors")
return {
"id": str(flavor_id),
"links": [{
"rel": "bookmark",
"href": flavor_bookmark,
}],
}
def _get_fault(self, request, instance):
fault = instance.get("fault", None)
if not fault:
return None
fault_dict = {
"code": fault["code"],
"created": utils.isotime(fault["created_at"]),
"message": fault["message"],
}
if fault.get('details', None):
is_admin = False
context = getattr(request, 'context', None)
if context:
is_admin = getattr(request.context, 'is_admin', False)
if is_admin or fault['code'] != 500:
fault_dict['details'] = fault["details"]
return fault_dict
| apache-2.0 |
UofS-CTLE/Projtrack3 | ctleweb/d2lstat/d2lstat.py | 1 | 19487 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
d2lstat.py
python d2lstat.py usage_data.csv full_time.csv part_time.csv semester_name number_of_courses_running
Given a correct data set and lists of full- and part-time teachers, this program will generate usage statistics on a
given template, which can be edited under the generate_document function.
Authors:
Dan Ricker <daniel.ricker@scranton.edu>
Sean Batzel <sean.batzel@scranton.edu>
This program is the property of the UofS-CTLE.
"""
from __future__ import division
import csv
import os
from django.conf import settings
# GLOBAL SETTINGS - DO NOT CHANGE UNLESS ABSOLUTELY NECESSARY.
DELIMITER = '|' # Determines what character the program wil break data rows on.
ASSIGNMENTS = 13 # The column that assignments data is located in.
GRADE = 15 # The column that grade item data is located in.
GRADED = 16 # The column that graded grade item data is located in.
DISCUSSION = 18 # The column for discussion post usage.
USAGE_ROYAL = 3 # The column where the instructor's R number is located in the usage file.
FAC_ROYAL = 0 # Where the faculty member's R number is located in the full-/part-time CSV files.
def filter_for_semester(files_data, semester):
"""
Used to filter the entire set of usage data for a given semester.
:param files_data: The list of all course data taken from the usage CSV file.
:param semester: The year and semester (e.g. 2018_Fall) to search for in the usage data.
:return: The list of courses pertaining to the given semester.
"""
final = list()
for x in files_data:
y = x.split(DELIMITER)
if semester in y[9]: # NOTE This will always work, provided the semester string is given correctly.
final.append(x)
return final
def get_rows_with_usage(files_data):
"""
Filters the semester's courses on which ones had activity in D2L.
:param files_data: The filtered list of courses for the semester.
:return: The list of courses that had relevant usage in D2L.
"""
final = list()
for x in files_data:
y = x.split(DELIMITER)
if int(y[ASSIGNMENTS]) > 0 or int(y[GRADE]) > 2 or int(y[GRADED]) > 0 or int(y[DISCUSSION]) > 0:
final.append(x)
return final
def remove_duplicate_crn(files_data):
"""
Makes sure that none of the courses in the list repeat.
:param files_data: Filtered data passed in from get_rows_with_usage.
:return: The list of coures without any repeated.
"""
seen_crns = []
ret_val = []
for x in files_data:
y = x.split(DELIMITER)
# This checks the last 5 characters of y[9] for a CRN.
# Make sure this is where the CRN is still located before running.
if y[9][-5:] not in seen_crns:
seen_crns.append(y[9][-5:])
ret_val.append(x)
return ret_val
def remove_duplicate_royal(files_data):
"""
Filters and removes duplicate instructor RIDs.
:param files_data: Filtered data passed in from get_rows_with_usage.
:return: The list of courses with no instructor R numbers duplicated.
"""
seen_royal = []
ret_val = []
for x in files_data:
y = x.split(DELIMITER)
if y[USAGE_ROYAL] not in seen_royal:
seen_royal.append(y[USAGE_ROYAL])
ret_val.append(x)
return ret_val
def parse_files(usage, full_time, part_time, semester, total_courses):
"""
Reads the given CSV files and splits up the contents into digestible data structures.
:param usage: The name of the usage CSV file.
:param full_time: The name of the CSV of full-time faculty members.
:param part_time: The name of the CSV of part-time faculty members.
:param semester: The semester in the form YYYY_Session (e.g. 2018_Fall).
:param total_courses: (Provided externally) The number of courses running for the given semester.
:return: A data structure containing all of the data required for the rest of the program.
"""
with open(usage, mode='rU') as infile:
reader = csv.reader(infile, dialect='excel', delimiter=',', quotechar='"')
rows = list()
for row in reader:
rows.append(row)
with open(usage, mode='w') as outfile:
writer = csv.writer(outfile, delimiter=DELIMITER)
writer.writerows(rows)
with open(full_time, mode='rU') as infile:
reader = csv.reader(infile, dialect='excel', delimiter=',', quotechar='"')
rows = list()
for row in reader:
rows.append(row)
with open(full_time, mode='w') as outfile:
writer = csv.writer(outfile, delimiter=DELIMITER)
writer.writerows(rows)
with open(part_time, mode='rU') as infile:
reader = csv.reader(infile, dialect='excel', delimiter=',', quotechar='"')
rows = list()
for row in reader:
rows.append(row)
with open(part_time, mode='w') as outfile:
writer = csv.writer(outfile, delimiter=DELIMITER)
writer.writerows(rows)
one = filter_for_semester(open(usage, 'r').readlines(), semester)
two = get_rows_with_usage(one)
usage_file = remove_duplicate_crn(two)
no_dup_r = remove_duplicate_royal(two)
full_time_file = open(full_time, 'r').readlines()
full_r = list()
part_r = list()
for x in full_time_file:
y = x.split(DELIMITER)
full_r.append(y[FAC_ROYAL])
part_time_file = open(part_time, 'r').readlines()
for x in part_time_file:
y = x.split(DELIMITER)
part_r.append(y[FAC_ROYAL])
full = list()
part = list()
staff = list()
for x in range(len(part_r)):
part_r[x] = part_r[x].strip("\"")
for x in range(len(full_r)):
full_r[x] = full_r[x].strip("\"")
for x in no_dup_r:
y = x.split(DELIMITER)
if y[USAGE_ROYAL] in full_r:
full.append(y)
elif y[USAGE_ROYAL] in part_r:
part.append(y)
else:
staff.append(y)
return {'semester_no_dup_crn': usage_file,
'semester_no_dup_r': no_dup_r,
'semester': two,
'full_time': full,
'len_full': len(full_time_file),
'part_time': part,
'len_part': len(part_time_file),
'staff': staff,
'total_courses': total_courses}
def calculate_stats(file_data):
"""
Carries out the actual logic for generating the numbers that will be included in the report.
:param file_data: The data produced by the parse function.
:return: The statistics data required for the report generator.
"""
specifics = {
'assignments': 0,
'grade': 0,
'graded': 0,
'discussion': 0
}
for course in file_data['semester_no_dup_crn']:
x = course.split(DELIMITER)
if int(x[ASSIGNMENTS]) > 0:
specifics['assignments'] += 1
if int(x[GRADE]) > 2:
specifics['grade'] += 1
if int(x[GRADED]) > 0:
specifics['graded'] += 1
if int(x[DISCUSSION]) > 0:
specifics['discussion'] += 1
return {'semester': file_data['semester'],
'courses_with_usage': len(file_data['semester_no_dup_crn']),
'faculty_with_usage': len(file_data['semester_no_dup_r']),
'full_time': len(file_data['full_time']),
'total_full_time': file_data['len_full'],
'part_time': len(file_data['part_time']),
'total_part_time': file_data['len_part'],
'staff': len(file_data['staff']),
'specifics': specifics,
'total_courses': file_data['total_courses']}
def generate_document(stats, semester):
"""
Generates an HTML document and PDF with the requested stats for the semester's D2L usage.
:param stats: The dictionary returned by the generate_stats function.
:param semester: The semester value passed in as a command-line argument.
"""
with open(os.path.join(settings.BASE_DIR, 'd2lstat/templates/d2lstat/raw_html.html'), 'r') as f:
string = f.read()
string = string.format(semester,
stats['faculty_with_usage'],
stats['full_time'],
stats['total_full_time'],
round((stats['full_time'] / stats['total_full_time']) * 100, 1),
stats['part_time'],
stats['total_part_time'],
round((stats['part_time'] / stats['total_part_time']) * 100, 1),
stats['staff'],
stats['courses_with_usage'],
stats['total_courses'],
round((stats['courses_with_usage'] / int(stats['total_courses'])) * 100, 1),
stats['specifics']['assignments'],
stats['specifics']['grade'],
stats['specifics']['graded'],
stats['specifics']['discussion'])
with open(os.path.join(settings.BASE_DIR, 'd2lstat/templates/d2lstat/report.html'), 'w') as f:
f.write(string)
f.close()
def process_file(usage, full_time, part_time, semester, total_courses):
"""
:param usage:
:param full_time:
:param part_time:
:param total_courses
:param semester:
"""
res = parse_files(usage, full_time, part_time, semester, total_courses)
res = calculate_stats(res)
generate_document(res, semester)
def calculateVirtualClassroomStats(usage, fullTime, partTime, VCDataFile):
resultList = []
# read in the data from the lti (learning tools integration) file
virtualClassroomDataFile = open(VCDataFile, 'rU')
virtualClassroomDataReader = csv.reader(virtualClassroomDataFile)
virtualClassroomData = []
seenVirtualClassRoomOrgUnitIds = []
for row in virtualClassroomDataReader:
if ('youseeu' in row[5] and row[
1] not in seenVirtualClassRoomOrgUnitIds): # get the org unit ids of the courses in which there was created at least one virtual classroom meeting
virtualClassroomData.append(row)
seenVirtualClassRoomOrgUnitIds.append(row[1])
# read in the instructor usage data file that was obtained from desire 2 learn data hub
instructorUsageDataFile = open(usage, 'rU')
instructorUsageDataReader = csv.reader(instructorUsageDataFile)
instructorUsageData = []
seenRIds = []
numberOfFacultyMembersUsingVirtualClassroom = 0
fullDataOnInstructors = []
# print('Instructors That Have Created at Least 1 Virtual Classroom Meeting:')
resultList.append('Instructors That Have Created at Least 1 Virtual Classroom Meeting:')
for row in instructorUsageDataReader:
if (row[
10] in seenVirtualClassRoomOrgUnitIds): # filter the rows to just be the rows for faculty members that have created at least one virtual classroom meeting
if (row[3] not in seenRIds): # make sure that each instructor is onlt accounted for once
seenRIds.append(row[3])
fullDataOnInstructors.append([row[3], row[1], row[2]])
# print(row[3] + ': ' + row[1] + ', ' + row[2])
resultList.append(row[3] + ': ' + row[1] + ', ' + row[2])
numberOfFacultyMembersUsingVirtualClassroom = len(seenRIds)
# print('Number of Instructors That Have Created at Least 1 Virtual Classroom Meeting: ' + str(
# numberOfFacultyMembersUsingVirtualClassroom))
resultList.append('Number of Instructors That Have Created at Least 1 Virtual Classroom Meeting: ' + str(
numberOfFacultyMembersUsingVirtualClassroom))
seenFullAndPartTimeRIds = [] # this is needed to keep track of the Rids that belong to either full or part time faculty members in order to determine which rids are left over, the left over rids are the rids of staff members teaching part time
# Full time faculty members that have created at least one virtual classroom meeting
fullTimeFacultyDataFile = open(fullTime, 'rU')
fullTimeFacultyDataReader = csv.reader(fullTimeFacultyDataFile)
fullTimeFacultyUsingVirtualClassroomRids = []
for row in fullTimeFacultyDataReader:
if (row[0] in seenRIds):
fullTimeFacultyUsingVirtualClassroomRids.append(row[0])
seenFullAndPartTimeRIds.append(row[0])
# Part time faculty members that have created at least one virtual classroom meeting
partTimeFacultyDataFile = open(partTime, 'rU')
partTimeFacultyDataReader = csv.reader(partTimeFacultyDataFile)
partTimeFacultyUsingVirtualClassroomRids = []
for row in partTimeFacultyDataReader:
if (row[0] in seenRIds):
partTimeFacultyUsingVirtualClassroomRids.append(row[0])
seenFullAndPartTimeRIds.append(row[0])
staffTeachingPartTimeRids = []
for rid in seenRIds:
if (rid in seenFullAndPartTimeRIds):
staffTeachingPartTimeRids.append(rid)
# sort full instructor data based upon the rids in each category
fullTimeFacultyUsingVC = []
partTimeFacultyUsingVC = []
staffUsingVC = []
# sort out full time faculty
for rid in fullTimeFacultyUsingVirtualClassroomRids:
for row in fullDataOnInstructors:
if (row[0] == rid):
fullTimeFacultyUsingVC.append(row)
# sort out part time faculty
for rid in partTimeFacultyUsingVirtualClassroomRids:
for row in fullDataOnInstructors:
if (row[0] == rid):
partTimeFacultyUsingVC.append(row)
# sort out staff
for row in fullDataOnInstructors:
if (row[0] not in seenFullAndPartTimeRIds):
staffUsingVC.append(row)
resultList.append("Full Time Faculty Using Virtual Classroom:")
# print("Full Time Faculty Using Virtual Classroom:")
for row in fullTimeFacultyUsingVC:
# print(row[0] + ': ' + row[2] + ', ' + row[1])
resultList.append(row[0] + ': ' + row[2] + ', ' + row[1])
# print("The Number of Full Time Faculty Using Virtual Classroom: " + str(len(fullTimeFacultyUsingVC)))
resultList.append("The Number of Full Time Faculty Using Virtual Classroom: " + str(len(fullTimeFacultyUsingVC)))
# print("Part Time Faculty Using Virtual Classroom:")
resultList.append("Part Time Faculty Using Virtual Classroom:")
for row in partTimeFacultyUsingVC:
# print(row[0] + ': ' + row[2] + ', ' + row[1])
resultList.append(row[0] + ': ' + row[2] + ', ' + row[1])
# print("The Number of Part Time Faculty Using Virtual Classroom: " + str(len(partTimeFacultyUsingVC)))
resultList.append("The Number of Part Time Faculty Using Virtual Classroom: " + str(len(partTimeFacultyUsingVC)))
# print("Staff Teaching Part Time Using Virtual Classroom:")
resultList.append("Staff Teaching Part Time Using Virtual Classroom:")
for row in staffUsingVC:
# print(row[0] + ': ' + row[2] + ', ' + row[1])
resultList.append(row[0] + ': ' + row[2] + ', ' + row[1])
print("The Number of Staff Teaching Part Time Using Virtual Classroom: " + str(len(staffUsingVC)))
resultList.append("The Number of Staff Teaching Part Time Using Virtual Classroom: " + str(len(staffUsingVC)))
return resultList
def facultyNotUsingD2LCalculation(usage, fullTime, partTime, semester):
resultList = []
usageFile = open(usage, 'rU')
fullTimeFile = open(fullTime, 'rU')
partTimeFile = open(partTime, 'rU')
usageFileReader = csv.reader(usageFile)
fullTimeFileReader = csv.reader(fullTimeFile)
partTimeFileReader = csv.reader(partTimeFile)
usageDataRaw = []
usageData = []
fullTimeDataRaw = []
partTimeDataRaw = []
fullTimeNotUsing = []
partTimeNotUsing = []
staffTeachingPartTimeNotUsing = []
fullTimeRIds = []
partTimeRIds = []
ridsOfInstructorsUsing = []
seenRidsOfInstructorsUsing = []
ridsOfInstructorsUsingDuplicatesRemoved = []
# read in the usage data
for row in usageFileReader:
usageDataRaw.append(row)
# Get the rids of the instructors using D2L
for row in usageDataRaw:
# print(row[13] + ',' +row[15] + ',' +row[16] + ',' +row[18])
try:
if((int(row[13])>0 or int(row[15])>2 or int(row[16])>0 or int(row[18])>0)):
if(semester in row[9]):
ridsOfInstructorsUsing.append(row[3])
except Exception:
print(Exception)
seenRIds = []
# Filter the rids of instuctors using for duplicates
for row in ridsOfInstructorsUsing:
if(row not in seenRidsOfInstructorsUsing):
ridsOfInstructorsUsingDuplicatesRemoved.append(row)
seenRidsOfInstructorsUsing.append(row)
#get the Rids of instructors not using
for row in usageDataRaw:
if(row[3] not in ridsOfInstructorsUsingDuplicatesRemoved and semester in row[9]):
usageData.append(row)
for row in fullTimeFileReader:
fullTimeDataRaw.append(row)
for row in partTimeFileReader:
partTimeDataRaw.append(row)
for row in fullTimeDataRaw:
fullTimeRIds.append(row[0])
for row in partTimeDataRaw:
partTimeRIds.append(row[0])
for row in usageData:
if(row[3] in fullTimeRIds and row[3] not in seenRIds):
fullTimeDepartment = ''
for fullTimeRow in fullTimeDataRaw:
if(row[3]==fullTimeRow[0]):
fullTimeDepartment = fullTimeRow[5]
fullTimeNotUsing.append([row[3], row[1], row[2], fullTimeDepartment])
seenRIds.append(row[3])
elif(row[3] in partTimeRIds and row[3] not in seenRIds):
if(row[3] in partTimeRIds):
partTimeDepartment = ''
for partTimeRow in partTimeDataRaw:
if(row[3]==partTimeRow[0]):
partTimeDepartment = partTimeRow[5]
partTimeNotUsing.append([row[3], row[1], row[2], partTimeDepartment])
seenRIds.append(row[3])
else:
if(row[3] not in seenRIds):
staffTeachingPartTimeNotUsing.append([row[3], row[1], row[2]])
seenRIds.append(row[3])
resultList.append('The Total Number of Faculty Not Using Desire 2 Learn: ' + str(len(fullTimeNotUsing)+len(partTimeNotUsing) + len(staffTeachingPartTimeNotUsing)))
resultList.append('The Number of Full Time Faculty Not Using D2L: ' + str(len(fullTimeNotUsing)))
resultList.append('Full Time Faculty Not Using Desire 2 Learn:')
for row in fullTimeNotUsing:
resultList.append(row[0] + ', ' + row[1] + ', ' + row[2] + ', ' + row[3])
resultList.append('')
resultList.append('')
resultList.append('The Number of Part Time Faculty Not Using D2L: ' + str(len(partTimeNotUsing)))
resultList.append('Part Time Faculty Not Using Desire 2 Learn:')
for row in partTimeNotUsing:
resultList.append(row[0] + ', ' + row[1] + ', ' + row[2] + ', ' + row[3])
resultList.append('')
resultList.append('')
resultList.append(
'The Number of Staff Teaching Part Time Faculty Not Using D2L: ' + str(len(staffTeachingPartTimeNotUsing)))
resultList.append('Staff Teaching Part Time Not Using Desire 2 Learn:')
for row in staffTeachingPartTimeNotUsing:
resultList.append(row[0] + ', ' + row[1] + ', ' + row[2])
return resultList | gpl-3.0 |
bitjammer/swift | utils/split_file.py | 65 | 1410 | #!/usr/bin/env python
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import argparse
import os
import re
import sys
parser = argparse.ArgumentParser(
description="""
Take the file at <path> and write it to multiple files, switching to a new file
every time an annotation of the form "// BEGIN file1.swift" is encountered. If
<dir> is specified, place the files in <dir>; otherwise, put them in the
current directory.
""")
parser.add_argument(
"-o", dest="out_dir", default=".", metavar="<dir>",
help="directory path where the output files are placed in. "
"(defaults to current directory)")
parser.add_argument(
"input", type=argparse.FileType("r"), nargs="?", default=sys.stdin,
metavar="<path>",
help="input file. (defaults to stdin)")
args = parser.parse_args()
fp_out = None
for line in args.input:
m = re.match(r'^//\s*BEGIN\s+([^\s]+)\s*$', line)
if m:
if fp_out:
fp_out.close()
fp_out = open(os.path.join(args.out_dir, m.group(1)), 'w')
elif fp_out:
fp_out.write(line)
args.input.close()
if fp_out:
fp_out.close()
| apache-2.0 |
dhimmel/networkx | networkx/algorithms/coloring/greedy_coloring.py | 30 | 9683 | # -*- coding: utf-8 -*-
"""
Greedy graph coloring using various strategies.
"""
# Copyright (C) 2014 by
# Christian Olsson <chro@itu.dk>
# Jan Aagaard Meier <jmei@itu.dk>
# Henrik Haugbølle <hhau@itu.dk>
# All rights reserved.
# BSD license.
import networkx as nx
import random
import itertools
from . import greedy_coloring_with_interchange as _interchange
__author__ = "\n".join(["Christian Olsson <chro@itu.dk>",
"Jan Aagaard Meier <jmei@itu.dk>",
"Henrik Haugbølle <hhau@itu.dk>"])
__all__ = [
'greedy_color',
'strategy_largest_first',
'strategy_random_sequential',
'strategy_smallest_last',
'strategy_independent_set',
'strategy_connected_sequential',
'strategy_connected_sequential_dfs',
'strategy_connected_sequential_bfs',
'strategy_saturation_largest_first'
]
def min_degree_node(G):
return min(G, key=G.degree)
def max_degree_node(G):
return max(G, key=G.degree)
def strategy_largest_first(G, colors):
"""
Largest first (lf) ordering. Ordering the nodes by largest degree
first.
"""
nodes = G.nodes()
nodes.sort(key=lambda node: -G.degree(node))
return nodes
def strategy_random_sequential(G, colors):
"""
Random sequential (RS) ordering. Scrambles nodes into random ordering.
"""
nodes = G.nodes()
random.shuffle(nodes)
return nodes
def strategy_smallest_last(G, colors):
"""
Smallest last (sl). Picking the node with smallest degree first,
subtracting it from the graph, and starting over with the new smallest
degree node. When the graph is empty, the reverse ordering of the one
built is returned.
"""
len_g = len(G)
available_g = G.copy()
nodes = [None] * len_g
for i in range(len_g):
node = min_degree_node(available_g)
available_g.remove_node(node)
nodes[len_g - i - 1] = node
return nodes
def strategy_independent_set(G, colors):
"""
Greedy independent set ordering (GIS). Generates a maximal independent
set of nodes, and assigns color C to all nodes in this set. This set
of nodes is now removed from the graph, and the algorithm runs again.
"""
len_g = len(G)
no_colored = 0
k = 0
uncolored_g = G.copy()
while no_colored < len_g: # While there are uncolored nodes
available_g = uncolored_g.copy()
while len(available_g): # While there are still nodes available
node = min_degree_node(available_g)
colors[node] = k # assign color to values
no_colored += 1
uncolored_g.remove_node(node)
# Remove node and its neighbors from available
available_g.remove_nodes_from(available_g.neighbors(node) + [node])
k += 1
return None
def strategy_connected_sequential_bfs(G, colors):
"""
Connected sequential ordering (CS). Yield nodes in such an order, that
each node, except the first one, has at least one neighbour in the
preceeding sequence. The sequence is generated using BFS)
"""
return strategy_connected_sequential(G, colors, 'bfs')
def strategy_connected_sequential_dfs(G, colors):
"""
Connected sequential ordering (CS). Yield nodes in such an order, that
each node, except the first one, has at least one neighbour in the
preceeding sequence. The sequence is generated using DFS)
"""
return strategy_connected_sequential(G, colors, 'dfs')
def strategy_connected_sequential(G, colors, traversal='bfs'):
"""
Connected sequential ordering (CS). Yield nodes in such an order, that
each node, except the first one, has at least one neighbour in the
preceeding sequence. The sequence can be generated using both BFS and
DFS search (using the strategy_connected_sequential_bfs and
strategy_connected_sequential_dfs method). The default is bfs.
"""
for component_graph in nx.connected_component_subgraphs(G):
source = component_graph.nodes()[0]
yield source # Pick the first node as source
if traversal == 'bfs':
tree = nx.bfs_edges(component_graph, source)
elif traversal == 'dfs':
tree = nx.dfs_edges(component_graph, source)
else:
raise nx.NetworkXError(
'Please specify bfs or dfs for connected sequential ordering')
for (_, end) in tree:
# Then yield nodes in the order traversed by either BFS or DFS
yield end
def strategy_saturation_largest_first(G, colors):
"""
Saturation largest first (SLF). Also known as degree saturation (DSATUR).
"""
len_g = len(G)
no_colored = 0
distinct_colors = {}
for node in G.nodes_iter():
distinct_colors[node] = set()
while no_colored != len_g:
if no_colored == 0:
# When sat. for all nodes is 0, yield the node with highest degree
no_colored += 1
node = max_degree_node(G)
yield node
for neighbour in G.neighbors_iter(node):
distinct_colors[neighbour].add(0)
else:
highest_saturation = -1
highest_saturation_nodes = []
for node, distinct in distinct_colors.items():
if node not in colors: # If the node is not already colored
saturation = len(distinct)
if saturation > highest_saturation:
highest_saturation = saturation
highest_saturation_nodes = [node]
elif saturation == highest_saturation:
highest_saturation_nodes.append(node)
if len(highest_saturation_nodes) == 1:
node = highest_saturation_nodes[0]
else:
# Return the node with highest degree
max_degree = -1
max_node = None
for node in highest_saturation_nodes:
degree = G.degree(node)
if degree > max_degree:
max_node = node
max_degree = degree
node = max_node
no_colored += 1
yield node
color = colors[node]
for neighbour in G.neighbors_iter(node):
distinct_colors[neighbour].add(color)
def greedy_color(G, strategy=strategy_largest_first, interchange=False):
"""Color a graph using various strategies of greedy graph coloring.
The strategies are described in [1]_.
Attempts to color a graph using as few colors as possible, where no
neighbours of a node can have same color as the node itself.
Parameters
----------
G : NetworkX graph
strategy : function(G, colors)
A function that provides the coloring strategy, by returning nodes
in the ordering they should be colored. G is the graph, and colors
is a dict of the currently assigned colors, keyed by nodes.
You can pass your own ordering function, or use one of the built in:
* strategy_largest_first
* strategy_random_sequential
* strategy_smallest_last
* strategy_independent_set
* strategy_connected_sequential_bfs
* strategy_connected_sequential_dfs
* strategy_connected_sequential
(alias of strategy_connected_sequential_bfs)
* strategy_saturation_largest_first (also known as DSATUR)
interchange: bool
Will use the color interchange algorithm described by [2]_ if set
to true.
Note that saturation largest first and independent set do not
work with interchange. Furthermore, if you use interchange with
your own strategy function, you cannot rely on the values in the
colors argument.
Returns
-------
A dictionary with keys representing nodes and values representing
corresponding coloring.
Examples
--------
>>> G = nx.cycle_graph(4)
>>> d = nx.coloring.greedy_color(G, strategy=nx.coloring.strategy_largest_first)
>>> d in [{0: 0, 1: 1, 2: 0, 3: 1}, {0: 1, 1: 0, 2: 1, 3: 0}]
True
References
----------
.. [1] Adrian Kosowski, and Krzysztof Manuszewski,
Classical Coloring of Graphs, Graph Colorings, 2-19, 2004.
ISBN 0-8218-3458-4.
.. [2] Maciej M. Syslo, Marsingh Deo, Janusz S. Kowalik,
Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983.
ISBN 0-486-45353-7.
"""
colors = {} # dictionary to keep track of the colors of the nodes
if len(G):
if interchange and (
strategy == strategy_independent_set or
strategy == strategy_saturation_largest_first):
raise nx.NetworkXPointlessConcept(
'Interchange is not applicable for GIS and SLF')
nodes = strategy(G, colors)
if nodes:
if interchange:
return (_interchange
.greedy_coloring_with_interchange(G, nodes))
else:
for node in nodes:
# set to keep track of colors of neighbours
neighbour_colors = set()
for neighbour in G.neighbors_iter(node):
if neighbour in colors:
neighbour_colors.add(colors[neighbour])
for color in itertools.count():
if color not in neighbour_colors:
break
# assign the node the newly found color
colors[node] = color
return colors
| bsd-3-clause |
kiwicopple/MyMDb | venv/Lib/site-packages/sphinx/builders/texinfo.py | 11 | 7881 | # -*- coding: utf-8 -*-
"""
sphinx.builders.texinfo
~~~~~~~~~~~~~~~~~~~~~~~
Texinfo builder.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from docutils import nodes
from docutils.io import FileOutput
from docutils.utils import new_document
from docutils.frontend import OptionParser
from sphinx import addnodes
from sphinx.locale import _
from sphinx.builders import Builder
from sphinx.environment import NoUri
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import SEP, copyfile
from sphinx.util.console import bold, darkgreen
from sphinx.writers.texinfo import TexinfoWriter
TEXINFO_MAKEFILE = '''\
# Makefile for Sphinx Texinfo output
infodir ?= /usr/share/info
MAKEINFO = makeinfo --no-split
MAKEINFO_html = makeinfo --no-split --html
MAKEINFO_plaintext = makeinfo --no-split --plaintext
TEXI2PDF = texi2pdf --batch --expand
INSTALL_INFO = install-info
ALLDOCS = $(basename $(wildcard *.texi))
all: info
info: $(addsuffix .info,$(ALLDOCS))
plaintext: $(addsuffix .txt,$(ALLDOCS))
html: $(addsuffix .html,$(ALLDOCS))
pdf: $(addsuffix .pdf,$(ALLDOCS))
install-info: info
\tfor f in *.info; do \\
\t cp -t $(infodir) "$$f" && \\
\t $(INSTALL_INFO) --info-dir=$(infodir) "$$f" ; \\
\tdone
uninstall-info: info
\tfor f in *.info; do \\
\t rm -f "$(infodir)/$$f" ; \\
\t $(INSTALL_INFO) --delete --info-dir=$(infodir) "$$f" ; \\
\tdone
%.info: %.texi
\t$(MAKEINFO) -o '$@' '$<'
%.txt: %.texi
\t$(MAKEINFO_plaintext) -o '$@' '$<'
%.html: %.texi
\t$(MAKEINFO_html) -o '$@' '$<'
%.pdf: %.texi
\t-$(TEXI2PDF) '$<'
\t-$(TEXI2PDF) '$<'
\t-$(TEXI2PDF) '$<'
clean:
\trm -f *.info *.pdf *.txt *.html
\trm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ky *.pg
\trm -f *.vr *.tp *.fn *.fns *.def *.defs *.cp *.cps *.ge *.ges *.mo
.PHONY: all info plaintext html pdf install-info uninstall-info clean
'''
class TexinfoBuilder(Builder):
"""
Builds Texinfo output to create Info documentation.
"""
name = 'texinfo'
format = 'texinfo'
supported_image_types = ['image/png', 'image/jpeg',
'image/gif',]
def init(self):
self.docnames = []
self.document_data = []
def get_outdated_docs(self):
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
# ignore source path
return self.get_target_uri(to, typ)
def init_document_data(self):
preliminary_document_data = map(list, self.config.texinfo_documents)
if not preliminary_document_data:
self.warn('no "texinfo_documents" config value found; no documents '
'will be written')
return
# assign subdirs to titles
self.titles = []
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
self.warn('"texinfo_documents" config value references unknown '
'document %s' % docname)
continue
self.document_data.append(entry)
if docname.endswith(SEP+'index'):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
def write(self, *ignored):
self.init_document_data()
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
direntry = description = category = ''
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
if len(entry) > 7:
toctree_only = entry[7]
destination = FileOutput(
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
self.info("processing " + targetname + "... ", nonl=1)
doctree = self.assemble_doctree(docname, toctree_only,
appendices=(self.config.texinfo_appendices or []))
self.info("writing... ", nonl=1)
self.post_process_images(doctree)
docwriter = TexinfoWriter(self)
settings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
read_config_files=True).get_default_values()
settings.author = author
settings.title = title
settings.texinfo_filename = targetname[:-5] + '.info'
settings.texinfo_elements = self.config.texinfo_elements
settings.texinfo_dir_entry = direntry or ''
settings.texinfo_dir_category = category or ''
settings.texinfo_dir_description = description or ''
settings.docname = docname
doctree.settings = settings
docwriter.write(doctree, destination)
self.info("done")
def assemble_doctree(self, indexfile, toctree_only, appendices):
self.docnames = set([indexfile] + appendices)
self.info(darkgreen(indexfile) + " ", nonl=1)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile
if toctree_only:
# extract toctree nodes from the tree and put them in a
# fresh document
new_tree = new_document('<texinfo output>')
new_sect = nodes.section()
new_sect += nodes.title(u'<Set title in conf.py>',
u'<Set title in conf.py>')
new_tree += new_sect
for node in tree.traverse(addnodes.toctree):
new_sect += node
tree = new_tree
largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
darkgreen)
largetree['docname'] = indexfile
for docname in appendices:
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
largetree.append(appendix)
self.info()
self.info("resolving references...")
self.env.resolve_references(largetree, indexfile, self)
# TODO: add support for external :ref:s
for pendingnode in largetree.traverse(addnodes.pending_xref):
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
newnodes = [nodes.emphasis(sectname, sectname)]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
newnodes.append(nodes.emphasis(title, title))
newnodes.append(nodes.Text(')', ')'))
break
else:
pass
pendingnode.replace_self(newnodes)
return largetree
def finish(self):
# copy image files
if self.images:
self.info(bold('copying images...'), nonl=1)
for src, dest in self.images.iteritems():
self.info(' '+src, nonl=1)
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, dest))
self.info()
self.info(bold('copying Texinfo support files... '), nonl=True)
# copy Makefile
fn = path.join(self.outdir, 'Makefile')
self.info(fn, nonl=1)
try:
mkfile = open(fn, 'w')
try:
mkfile.write(TEXINFO_MAKEFILE)
finally:
mkfile.close()
except (IOError, OSError), err:
self.warn("error writing file %s: %s" % (fn, err))
self.info(' done')
| mit |
wuyuewen/libcloud | libcloud/test/dns/test_vultr.py | 17 | 13075 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.dns.drivers.vultr import VultrDNSDriver
from libcloud.dns.types import RecordType
from libcloud.utils.py3 import httplib
from libcloud.test import MockHttp
from libcloud.test.secrets import VULTR_PARAMS
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.types import ZoneAlreadyExistsError
from libcloud.dns.base import Zone, Record
class VultrTests(unittest.TestCase):
def setUp(self):
VultrMockHttp.type = None
VultrDNSDriver.connectionCls.conn_classes = (
None, VultrMockHttp)
self.driver = VultrDNSDriver(*VULTR_PARAMS)
self.test_zone = Zone(id='test.com', type='master', ttl=None,
domain='test.com', extra={}, driver=self)
self.test_record = Record(id='31', type=RecordType.A, name='test',
zone=self.test_zone, data='127.0.0.1',
driver=self, extra={})
def test_list_zones_empty(self):
VultrMockHttp.type = 'EMPTY_ZONES_LIST'
zones = self.driver.list_zones()
self.assertEqual(zones, [])
def test_list_zones_success(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 4)
zone = zones[0]
self.assertEqual(zone.id, 'example.com')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'example.com')
self.assertEqual(zone.ttl, None)
zone = zones[1]
self.assertEqual(zone.id, 'zupo.com')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'zupo.com')
self.assertEqual(zone.ttl, None)
zone = zones[2]
self.assertEqual(zone.id, 'oltjano.com')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'oltjano.com')
self.assertEqual(zone.ttl, None)
zone = zones[3]
self.assertEqual(zone.id, '13.com')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, '13.com')
self.assertEqual(zone.ttl, None)
def test_get_zone_zone_does_not_exist(self):
VultrMockHttp.type = 'GET_ZONE_ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='test.com')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, 'test.com')
else:
self.fail('Exception was not thrown')
def test_get_zone_success(self):
VultrMockHttp.type = 'GET_ZONE_SUCCESS'
zone = self.driver.get_zone(zone_id='zupo.com')
self.assertEqual(zone.id, 'zupo.com')
self.assertEqual(zone.domain, 'zupo.com')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.ttl, None)
def test_delete_zone_zone_does_not_exist(self):
VultrMockHttp.type = 'DELETE_ZONE_ZONE_DOES_NOT_EXIST'
try:
self.driver.delete_zone(zone=self.test_zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, self.test_zone.id)
else:
self.fail('Exception was not thrown')
def test_delete_zone_success(self):
zone = self.driver.list_zones()[0]
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_create_zone_success(self):
zone = self.driver.create_zone(domain='test.com',
extra={'serverip': '127.0.0.1'})
self.assertEqual(zone.id, 'test.com')
self.assertEqual(zone.domain, 'test.com')
self.assertEqual(zone.type, 'master'),
self.assertEqual(zone.ttl, None)
def test_create_zone_zone_already_exists(self):
VultrMockHttp.type = 'CREATE_ZONE_ZONE_ALREADY_EXISTS'
try:
self.driver.create_zone(domain='example.com',
extra={'serverip': '127.0.0.1'})
except ZoneAlreadyExistsError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, 'example.com')
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
VultrMockHttp.type = 'GET_RECORD_RECORD_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='zupo.com', record_id='1300')
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, '1300')
else:
self.fail('Exception was not thrown')
def test_list_records_zone_does_not_exist(self):
VultrMockHttp.type = 'LIST_RECORDS_ZONE_DOES_NOT_EXIST'
try:
self.driver.list_records(zone=self.test_zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, self.test_zone.id)
else:
self.fail('Exception was not thrown')
def test_list_records_empty(self):
VultrMockHttp.type = 'EMPTY_RECORDS_LIST'
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(records, [])
def test_list_records_success(self):
zone = self.driver.get_zone(zone_id='zupo.com')
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 2)
arecord = records[0]
self.assertEqual(arecord.id, '13')
self.assertEqual(arecord.name, 'arecord')
self.assertEqual(arecord.type, RecordType.A)
self.assertEqual(arecord.data, '127.0.0.1')
def test_get_record_success(self):
VultrMockHttp.type = 'GET_RECORD'
record = self.driver.get_record(zone_id='zupo.com', record_id='1300')
self.assertEqual(record.id, '1300')
self.assertEqual(record.name, 'zupo')
self.assertEqual(record.data, '127.0.0.1')
self.assertEqual(record.type, RecordType.A)
def test_delete_record_record_does_not_exist(self):
VultrMockHttp.type = 'DELETE_RECORD_RECORD_DOES_NOT_EXIST'
try:
self.driver.delete_record(record=self.test_record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, self.test_record.id)
else:
self.fail('Exception was not thrown')
def test_delete_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
status = self.driver.delete_record(record=record)
self.assertTrue(status)
class VultrMockHttp(MockHttp):
fixtures = DNSFileFixtures('vultr')
def _v1_dns_list(self, method, url, body, headers):
body = self.fixtures.load('list_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_records(self, method, url, body, headers):
body = self.fixtures.load('list_records.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_ZONE_DOES_NOT_EXIST(self, method, url, body, headers):
body = self.fixtures.load('list_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_EMPTY_ZONES_LIST(self, method, url, body, headers):
body = self.fixtures.load('empty_zones_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_GET_ZONE_ZONE_DOES_NOT_EXIST(self, method, url, body,
headers):
body = self.fixtures.load('list_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_GET_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_EMPTY_RECORDS_LIST(self, method, url, body, headers):
body = self.fixtures.load('list_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_records_EMPTY_RECORDS_LIST(self, method, url, body, headers):
body = self.fixtures.load('empty_records_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_GET_RECORD(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_records_GET_RECORD(self, method, url, body, headers):
body = self.fixtures.load('get_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_GET_RECORD_RECORD_DOES_NOT_EXIST(self, method, url, body,
headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_records_GET_RECORD_RECORD_DOES_NOT_EXIST(self, method, url,
body, headers):
body = self.fixtures.load('list_records.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_delete_domain(self, method, url, body, headers):
body = ''
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_delete_record(self, method, url, body, headers):
body = ''
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_create_domain(self, method, url, body, headers):
body = ''
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_CREATE_ZONE_ZONE_ALREADY_EXISTS(self, method, url, body,
headers):
body = self.fixtures.load('list_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_create_domain_CREATE_ZONE_ZONE_ALREADY_EXISTS(self, method,
url, body,
headers):
body = ''
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_DELETE_ZONE_ZONE_DOES_NOT_EXIST(self, method, url, body,
headers):
body = self.fixtures.load('list_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_delete_domain_DELETE_ZONE_ZONE_DOES_NOT_EXIST(self, method,
url, body,
headers):
body = ''
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_records_DELETE_RECORD_RECORD_DOES_NOT_EXIST(self, method, url,
body, headers):
body = self.fixtures.load('list_records.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_delete_record_DELETE_RECORD_RECORD_DOES_NOT_EXIST(self, method,
url, body,
headers):
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_DELETE_RECORD_RECORD_DOES_NOT_EXIST(self, method, url,
body, headers):
body = self.fixtures.load('test_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_list_LIST_RECORDS_ZONE_DOES_NOT_EXIST(self, method, url, body,
headers):
body = self.fixtures.load('list_domains.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_dns_records_LIST_RECORDS_ZONE_DOES_NOT_EXIST(self, method, url,
body, headers):
body = ''
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
DTUWindEnergy/HAWC2Wrapper | docs/conf.py | 2 | 9412 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# HAWC2Wrapper documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 17 14:20:52 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import glob
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, os.path.abspath('../hawc2_wrapper/')) # leo
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon']
files = glob.glob('../hawc2_wrapper/*.py')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HAWC2Wrapper'
copyright = '2016, DTU Wind Energy'
author = 'DTU Wind Energy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'HAWC2Wrapperdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HAWC2Wrapper.tex', 'HAWC2Wrapper Documentation',
'DTU Wind Energy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hawc2wrapper', 'HAWC2Wrapper Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HAWC2Wrapper', 'HAWC2Wrapper Documentation',
author, 'HAWC2Wrapper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| agpl-3.0 |
deadshield/deadshield | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
CLVsol/clvsol_odoo_addons | clv_survey/wizard/survey_code_renew.py | 1 | 6551 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class SurveyCodeRenew(models.TransientModel):
_description = 'Survey Code Renew'
_name = 'clv.survey.code_renew'
def _default_survey_ids(self):
return self._context.get('active_ids')
survey_ids = fields.Many2many(
comodel_name='survey.survey',
relation='clv_survey_code_renew_rel',
string='Surveys',
default=_default_survey_ids
)
@api.multi
def do_survey_code_renew(self):
self.ensure_one()
for survey in self.survey_ids:
_logger.info(u'%s %s', '>>>>>', survey.title)
_logger.info(u'%s %s', '>>>>>', survey.code)
new_survey_code = 'x' + survey.code
new_page_sequence = 0
for page in survey.page_ids:
new_page_sequence += 10
if new_page_sequence < 100:
new_page_code = new_survey_code + '_0' + str(int(new_page_sequence / 10))
else:
new_page_code = new_survey_code + '_' + str(int(new_page_sequence / 10))
_logger.info(
u'%s %s: %s, %s: %s',
'>>>>>>>>>>', page.code, page.sequence, new_page_code[1:], new_page_sequence
)
new_question_sequence = 0
for question in page.question_ids:
_type_ = question.type
new_question_sequence += 10
if new_question_sequence < 100:
new_question_code = new_page_code + '_0' + str(int(new_question_sequence / 10))
else:
new_question_code = new_page_code + '_' + str(int(new_question_sequence / 10))
_logger.info(
u'%s %s: %s, %s: %s',
'>>>>>>>>>>>>>>>',
question.code, question.sequence, new_question_code[1:], new_question_sequence
)
if _type_ == 'free_text' or _type_ == 'textbox' or _type_ == 'datetime':
pass
if _type_ == 'simple_choice':
new_label_sequence = 0
for label in question.labels_ids:
new_label_sequence += 10
if new_label_sequence < 100:
new_label_code = new_question_code + '_0' + str(int(new_label_sequence / 10))
else:
new_label_code = new_question_code + '_' + str(int(new_label_sequence / 10))
_logger.info(
u'%s %s: %s, %s: %s',
'>>>>>>>>>>>>>>>>>>>>',
label.code, label.sequence, new_label_code[1:], new_label_sequence
)
label.sequence = new_label_sequence
label.code = new_label_code
if _type_ == 'multiple_choice':
new_label_sequence = 0
for label in question.labels_ids:
new_label_sequence += 10
if new_label_sequence < 100:
new_label_code = new_question_code + '_0' + str(int(new_label_sequence / 10))
else:
new_label_code = new_question_code + '_' + str(int(new_label_sequence / 10))
_logger.info(
u'%s %s: %s, %s: %s',
'>>>>>>>>>>>>>>>>>>>>',
label.code, label.sequence, new_label_code[1:], new_label_sequence
)
label.sequence = new_label_sequence
label.code = new_label_code
if _type_ == 'matrix':
new_label_sequence = 0
for label in question.labels_ids_2:
new_label_sequence += 10
if new_label_sequence < 100:
new_label_code = new_question_code + '_0' + str(int(new_label_sequence / 10))
else:
new_label_code = new_question_code + '_' + str(int(new_label_sequence / 10))
_logger.info(
u'%s %s: %s, %s: %s',
'>>>>>>>>>>>>>>>>>>>>',
label.code, label.sequence, new_label_code[1:], new_label_sequence
)
label.sequence = new_label_sequence
label.code = new_label_code
for label in question.labels_ids:
new_label_sequence += 10
if new_label_sequence < 100:
new_label_code = new_question_code + '_0' + str(int(new_label_sequence / 10))
else:
new_label_code = new_question_code + '_' + str(int(new_label_sequence / 10))
_logger.info(
u'%s %s: %s, %s: %s',
'>>>>>>>>>>>>>>>>>>>>',
label.code, label.sequence, new_label_code[1:], new_label_sequence
)
label.sequence = new_label_sequence
label.code = new_label_code
question.sequence = new_question_sequence
question.code = new_question_code
page.sequence = new_page_sequence
page.code = new_page_code
for page in survey.page_ids:
page.code = page.code[1:]
for question in page.question_ids:
question.code = question.code[1:]
for label in question.labels_ids_2:
label.code = label.code[1:]
for label in question.labels_ids:
label.code = label.code[1:]
return True
| agpl-3.0 |
40223220/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/dom/pulldom.py | 850 | 11761 | import xml.sax
import xml.sax.handler
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print(exception)
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def __next__(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if isinstance(stream_or_string, str):
stream = open(stream_or_string, 'rb')
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
from io import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| gpl-3.0 |
molly/Wikisource-to-LaTeX | core.py | 1 | 3449 | # -*- coding: utf-8 -*-
# Copyright (c) 2013–2015Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import codecs, logging, os, util
from tokenizer import Tokenizer
from tokenparser import Parser
from api import Document
def setup_logging():
logger=logging.getLogger("W2L")
logger.setLevel(logging.DEBUG)
console_formatter = logging.Formatter("%(asctime)s - %(levelname)s"
": %(message)s", datefmt="%I:%M:%S %p")
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(console_formatter)
logger.addHandler(consolehandler)
return logger
if __name__ == "__main__":
logger = setup_logging()
doc = Document()
doc.organize()
if not os.path.exists(os.curdir + '/raw'):
logger.debug("Getting raw text files.")
doc.call()
if not os.path.exists(os.curdir + '/text'):
logger.debug("Parsing JSON to TXT.")
doc.json_to_text()
# Open and read files
tokenizer = Tokenizer()
progress = util.ProgressChecker()
parser = Parser(progress)
if not os.path.exists(os.curdir + '/latex'):
os.mkdir(os.curdir + '/latex')
if not os.path.exists(os.curdir + '/latex'):
os.mkdir(os.curdir + '/latex')
#folders = sorted(os.listdir(path=(os.curdir + '/text')), key=int)
folders = ['0', '1', '2', '3']
for folder in folders:
files = sorted(os.listdir(path=(os.curdir + '/text/' + folder)), key=lambda x: int(x[0]))
if folder == '3':
files = ['0.txt', '1.txt']
with codecs.open(os.curdir + '/latex/' + folder + '.tex', 'w+', 'utf-8') as outputfile:
last_open = os.curdir + '/latex/' + folder + '.tex'
for file in files:
logger.debug("Parsing " + folder + "/" + file + " to " + folder + ".tex.")
with codecs.open(os.curdir + '/text/' + folder + '/' + file, 'r', 'utf-8') as f:
data = f.read()
token_list = tokenizer.analyze(data)
parser.begin(outputfile)
parser.dispatch(token_list)
print("Total number of pages included in main pages: " + str(doc.num_pages))
progress.get_statistics()
# with codecs.open(last_open, 'a', 'utf-8') as outputfile:
# contributors = doc.attribute()
# parser.end_matter(contributors, outputfile)
logger.debug("Parsing complete.") | mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7.2/Lib/curses/textpad.py | 212 | 7338 | """Simple textbox editing widget with Emacs-like keybindings."""
import curses
import curses.ascii
def rectangle(win, uly, ulx, lry, lrx):
"""Draw a rectangle with corners at the provided upper-left
and lower-right coordinates.
"""
win.vline(uly+1, ulx, curses.ACS_VLINE, lry - uly - 1)
win.hline(uly, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
win.hline(lry, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
win.vline(uly+1, lrx, curses.ACS_VLINE, lry - uly - 1)
win.addch(uly, ulx, curses.ACS_ULCORNER)
win.addch(uly, lrx, curses.ACS_URCORNER)
win.addch(lry, lrx, curses.ACS_LRCORNER)
win.addch(lry, ulx, curses.ACS_LLCORNER)
class Textbox:
"""Editing widget using the interior of a window object.
Supports the following Emacs-like key bindings:
Ctrl-A Go to left edge of window.
Ctrl-B Cursor left, wrapping to previous line if appropriate.
Ctrl-D Delete character under cursor.
Ctrl-E Go to right edge (stripspaces off) or end of line (stripspaces on).
Ctrl-F Cursor right, wrapping to next line when appropriate.
Ctrl-G Terminate, returning the window contents.
Ctrl-H Delete character backward.
Ctrl-J Terminate if the window is 1 line, otherwise insert newline.
Ctrl-K If line is blank, delete it, otherwise clear to end of line.
Ctrl-L Refresh screen.
Ctrl-N Cursor down; move down one line.
Ctrl-O Insert a blank line at cursor location.
Ctrl-P Cursor up; move up one line.
Move operations do nothing if the cursor is at an edge where the movement
is not possible. The following synonyms are supported where possible:
KEY_LEFT = Ctrl-B, KEY_RIGHT = Ctrl-F, KEY_UP = Ctrl-P, KEY_DOWN = Ctrl-N
KEY_BACKSPACE = Ctrl-h
"""
def __init__(self, win, insert_mode=False):
self.win = win
self.insert_mode = insert_mode
(self.maxy, self.maxx) = win.getmaxyx()
self.maxy = self.maxy - 1
self.maxx = self.maxx - 1
self.stripspaces = 1
self.lastcmd = None
win.keypad(1)
def _end_of_line(self, y):
"""Go to the location of the first blank on the given line,
returning the index of the last non-blank character."""
last = self.maxx
while True:
if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP:
last = min(self.maxx, last+1)
break
elif last == 0:
break
last = last - 1
return last
def _insert_printable_char(self, ch):
(y, x) = self.win.getyx()
if y < self.maxy or x < self.maxx:
if self.insert_mode:
oldch = self.win.inch()
# The try-catch ignores the error we trigger from some curses
# versions by trying to write into the lowest-rightmost spot
# in the window.
try:
self.win.addch(ch)
except curses.error:
pass
if self.insert_mode:
(backy, backx) = self.win.getyx()
if curses.ascii.isprint(oldch):
self._insert_printable_char(oldch)
self.win.move(backy, backx)
def do_command(self, ch):
"Process a single editing command."
(y, x) = self.win.getyx()
self.lastcmd = ch
if curses.ascii.isprint(ch):
if y < self.maxy or x < self.maxx:
self._insert_printable_char(ch)
elif ch == curses.ascii.SOH: # ^a
self.win.move(y, 0)
elif ch in (curses.ascii.STX,curses.KEY_LEFT, curses.ascii.BS,curses.KEY_BACKSPACE):
if x > 0:
self.win.move(y, x-1)
elif y == 0:
pass
elif self.stripspaces:
self.win.move(y-1, self._end_of_line(y-1))
else:
self.win.move(y-1, self.maxx)
if ch in (curses.ascii.BS, curses.KEY_BACKSPACE):
self.win.delch()
elif ch == curses.ascii.EOT: # ^d
self.win.delch()
elif ch == curses.ascii.ENQ: # ^e
if self.stripspaces:
self.win.move(y, self._end_of_line(y))
else:
self.win.move(y, self.maxx)
elif ch in (curses.ascii.ACK, curses.KEY_RIGHT): # ^f
if x < self.maxx:
self.win.move(y, x+1)
elif y == self.maxy:
pass
else:
self.win.move(y+1, 0)
elif ch == curses.ascii.BEL: # ^g
return 0
elif ch == curses.ascii.NL: # ^j
if self.maxy == 0:
return 0
elif y < self.maxy:
self.win.move(y+1, 0)
elif ch == curses.ascii.VT: # ^k
if x == 0 and self._end_of_line(y) == 0:
self.win.deleteln()
else:
# first undo the effect of self._end_of_line
self.win.move(y, x)
self.win.clrtoeol()
elif ch == curses.ascii.FF: # ^l
self.win.refresh()
elif ch in (curses.ascii.SO, curses.KEY_DOWN): # ^n
if y < self.maxy:
self.win.move(y+1, x)
if x > self._end_of_line(y+1):
self.win.move(y+1, self._end_of_line(y+1))
elif ch == curses.ascii.SI: # ^o
self.win.insertln()
elif ch in (curses.ascii.DLE, curses.KEY_UP): # ^p
if y > 0:
self.win.move(y-1, x)
if x > self._end_of_line(y-1):
self.win.move(y-1, self._end_of_line(y-1))
return 1
def gather(self):
"Collect and return the contents of the window."
result = ""
for y in range(self.maxy+1):
self.win.move(y, 0)
stop = self._end_of_line(y)
if stop == 0 and self.stripspaces:
continue
for x in range(self.maxx+1):
if self.stripspaces and x > stop:
break
result = result + chr(curses.ascii.ascii(self.win.inch(y, x)))
if self.maxy > 0:
result = result + "\n"
return result
def edit(self, validate=None):
"Edit in the widget window and collect the results."
while 1:
ch = self.win.getch()
if validate:
ch = validate(ch)
if not ch:
continue
if not self.do_command(ch):
break
self.win.refresh()
return self.gather()
if __name__ == '__main__':
def test_editbox(stdscr):
ncols, nlines = 9, 4
uly, ulx = 15, 20
stdscr.addstr(uly-2, ulx, "Use Ctrl-G to end editing.")
win = curses.newwin(nlines, ncols, uly, ulx)
rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
return Textbox(win).edit()
str = curses.wrapper(test_editbox)
print 'Contents of text box:', repr(str)
| mit |
beernarrd/gramps | gramps/gui/editors/edittaglist.py | 5 | 4637 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Tag editing module for Gramps.
"""
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..managedwindow import ManagedWindow
from gramps.gen.const import URL_MANUAL_PAGE
from ..display import display_help
from ..listmodel import ListModel, TOGGLE
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Filters' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Tag_selection_dialog')
#-------------------------------------------------------------------------
#
# EditTagList
#
#-------------------------------------------------------------------------
class EditTagList(ManagedWindow):
"""
Dialog to allow the user to edit a list of tags.
"""
def __init__(self, tag_list, full_list, uistate, track):
"""
Initiate and display the dialog.
"""
ManagedWindow.__init__(self, uistate, track, self, modal=True)
# the self.window.run() below makes Gtk make it modal, so any change
# to the previous line's "modal" would require that line to be changed
self.namemodel = None
top = self._create_dialog()
self.set_window(top, None, _('Tag selection'))
self.setup_configs('interface.edittaglist', 360, 400)
for tag in full_list:
self.namemodel.add([tag[0], tag in tag_list, tag[1]])
self.namemodel.connect_model()
# The dialog is modal. We don't want to have several open dialogs of
# this type, since then the user will loose track of which is which.
self.return_list = None
self.show()
while True:
# the self.window.run() makes Gtk make it modal, so any change to
# that line means the ManagedWindow.__init__ must be changed also
response = self.window.run()
if response == Gtk.ResponseType.HELP:
display_help(webpage=WIKI_HELP_PAGE,
section=WIKI_HELP_SEC)
elif response == Gtk.ResponseType.DELETE_EVENT:
break
else:
if response == Gtk.ResponseType.OK:
self.return_list = [(row[0], row[2])
for row in self.namemodel.model
if row[1]]
self.close()
break
def _create_dialog(self):
"""
Create a dialog box to select tags.
"""
# pylint: disable-msg=E1101
top = Gtk.Dialog(parent=self.uistate.window)
top.vbox.set_spacing(5)
columns = [('', -1, 300),
(' ', -1, 25, TOGGLE, True, None),
(_('Tag'), -1, 300)]
view = Gtk.TreeView()
self.namemodel = ListModel(view, columns)
slist = Gtk.ScrolledWindow()
slist.add(view)
slist.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
top.vbox.pack_start(slist, 1, 1, 5)
top.add_button(_('_Help'), Gtk.ResponseType.HELP)
top.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
top.add_button(_('_OK'), Gtk.ResponseType.OK)
return top
def build_menu_names(self, obj): # meaningless while it's modal
"""
Define the menu entry for the ManagedWindows.
"""
return (_("Tag selection"), None)
| gpl-2.0 |
danielmt/vshard | vendor/github.com/youtube/vitess/examples/kubernetes/guestbook/main.py | 6 | 2121 | """Main python file."""
import os
import time
import json
from flask import Flask
from vtdb import vtgate_client
# Register gRPC protocol.
from vtdb import grpc_vtgate_client # pylint: disable=unused-import
app = Flask(__name__)
# conn is the connection to vtgate.
conn = None
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/page/<int:page>')
def view(page):
_ = page
return app.send_static_file('index.html')
@app.route('/lrange/guestbook/<int:page>')
def list_guestbook(page):
"""Read the list from a replica."""
cursor = conn.cursor(
tablet_type='replica', keyspace='test_keyspace')
cursor.execute(
'SELECT message, time_created_ns FROM messages WHERE page=:page'
' ORDER BY time_created_ns',
{'page': page})
entries = [row[0] for row in cursor.fetchall()]
cursor.close()
return json.dumps(entries)
@app.route('/rpush/guestbook/<int:page>/<value>')
def add_entry(page, value):
"""Insert a row on the master."""
cursor = conn.cursor(
tablet_type='master', keyspace='test_keyspace', writable=True)
cursor.begin()
cursor.execute(
'INSERT INTO messages (page, time_created_ns, message)'
' VALUES (:page, :time_created_ns, :message)',
{
'page': page,
'time_created_ns': int(time.time() * 1e9),
'message': value,
})
cursor.commit()
# Read the list back from master (critical read) because it's
# important that the user sees their own addition immediately.
cursor.execute(
'SELECT message, time_created_ns FROM messages WHERE page=:page'
' ORDER BY time_created_ns',
{'page': page})
entries = [row[0] for row in cursor.fetchall()]
cursor.close()
return json.dumps(entries)
@app.route('/env')
def env():
return json.dumps(dict(os.environ))
if __name__ == '__main__':
timeout = 10 # connect timeout in seconds
# Get vtgate service address from Kubernetes DNS.
addr = 'vtgate-test:15991'
# Connect to vtgate.
conn = vtgate_client.connect('grpc', addr, timeout)
app.run(host='0.0.0.0', port=8080, debug=True)
| mit |
switchboardOp/ansible | lib/ansible/modules/network/cloudengine/ce_vrf_af.py | 47 | 30552 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_vrf_af
version_added: "2.4"
short_description: Manages VPN instance address family on HUAWEI CloudEngine switches.
description:
- Manages VPN instance address family of HUAWEI CloudEngine switches.
author: Yang yang (@CloudEngine-Ansible)
notes:
- If I(state=absent), the vrf will be removed, regardless of the
non-required parameters.
options:
vrf:
description:
- VPN instance.
required: true
default: null
vrf_aftype:
description:
- VPN instance address family.
required: false
choices: ['v4','v6']
default: v4
route_distinguisher:
description:
- VPN instance route distinguisher,the RD used to distinguish same route prefix from different vpn.
The RD must be setted before setting vpn_target_value.
required: false
vpn_target_state:
description:
- Manage the state of the vpn target.
required: false
choices: ['present','absent']
vpn_target_type:
description:
- VPN instance vpn target type.
required: false
choices: ['export_extcommunity', 'import_extcommunity']
default: null
vpn_target_value:
description:
- VPN instance target value. Such as X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295>
or number<0-65535>.number<0-65535>:number<0-65535> or number<65536-4294967295>:number<0-65535>
but not support 0:0 and 0.0:0.
required: false
evpn:
description:
- Is extend vpn or normal vpn.
required: false
choices: ['true', 'false']
default: false
state:
description:
- Manage the state of the af.
required: false
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- name: vrf af module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Config vpna, set address family is ipv4
ce_vrf_af:
vrf: vpna
vrf_aftype: v4
state: present
provider: "{{ cli }}"
- name: Config vpna, delete address family is ipv4
ce_vrf_af:
vrf: vpna
vrf_aftype: v4
state: absent
provider: "{{ cli }}"
- name: Config vpna, set address family is ipv4,rd=1:1,set vpn_target_type=export_extcommunity,vpn_target_value=2:2
ce_vrf_af:
vrf: vpna
vrf_aftype: v4
route_distinguisher: 1:1
vpn_target_type: export_extcommunity
vpn_target_value: 2:2
vpn_target_state: present
state: present
provider: "{{ cli }}"
- name: Config vpna, set address family is ipv4,rd=1:1,delete vpn_target_type=export_extcommunity,vpn_target_value=2:2
ce_vrf_af:
vrf: vpna
vrf_aftype: v4
route_distinguisher: 1:1
vpn_target_type: export_extcommunity
vpn_target_value: 2:2
vpn_target_state: absent
state: present
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"vrf": "vpna",
"vrf_aftype": "v4",
"state": "present",
"vpn_targe_state":"absent",
"evpn": "none",
"vpn_target_type": "none",
"vpn_target_value": "none"}
existing:
description: k/v pairs of existing switchport
returned: always
type: dict
sample: {
"route_distinguisher": [
"1:1",
"2:2"
],
"vpn_target_type": [],
"vpn_target_value": [],
"vrf": "vpna",
"vrf_aftype": [
"ipv4uni",
"ipv6uni"
]
}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict
sample: {
"route_distinguisher": [
"1:1",
"2:2"
],
"vpn_target_type": [
"import_extcommunity",
"3:3"
],
"vpn_target_value": [],
"vrf": "vpna",
"vrf_aftype": [
"ipv4uni",
"ipv6uni"
]
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"ip vpn-instance vpna",
"vpn-target 3:3 import_extcommunity"
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_VRF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName></vrfName>
<vrfDescription></vrfDescription>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_GET_VRF_AF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<vpnInstAFs>
<vpnInstAF>
<afType></afType>
<vrfRD></vrfRD>%s
</vpnInstAF>
</vpnInstAFs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_DELETE_VRF_AF = """
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<vpnInstAFs>
<vpnInstAF operation="delete">
<afType>%s</afType>
</vpnInstAF>
</vpnInstAFs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
"""
CE_NC_CREATE_VRF_AF = """
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<vpnInstAFs>
<vpnInstAF operation="merge">
<afType>%s</afType>
<vrfRD>%s</vrfRD>%s
</vpnInstAF>
</vpnInstAFs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm></l3vpn>
"""
CE_NC_CREATE_VRF_TARGET = """
<vpnTargets>
<vpnTarget operation="merge">
<vrfRTType>%s</vrfRTType>
<vrfRTValue>%s</vrfRTValue>
</vpnTarget>
</vpnTargets>
"""
CE_NC_DELETE_VRF_TARGET = """
<vpnTargets>
<vpnTarget operation="delete">
<vrfRTType>%s</vrfRTType>
<vrfRTValue>%s</vrfRTValue>
</vpnTarget>
</vpnTargets>
"""
CE_NC_GET_VRF_TARGET = """
<vpnTargets>
<vpnTarget>
<vrfRTValue></vrfRTValue>
<vrfRTType></vrfRTType>
</vpnTarget>
</vpnTargets>
"""
CE_NC_CREATE_EXTEND_VRF_TARGET = """
<exVpnTargets>
<exVpnTarget operation="merge">
<vrfRTType>%s</vrfRTType>
<vrfRTValue>%s</vrfRTValue>
<extAddrFamily>evpn</extAddrFamily>
</exVpnTarget>
</exVpnTargets>
"""
CE_NC_DELETE_EXTEND_VRF_TARGET = """
<exVpnTargets>
<exVpnTarget operation="delete">
<vrfRTType>%s</vrfRTType>
<vrfRTValue>%s</vrfRTValue>
<extAddrFamily>evpn</extAddrFamily>
</exVpnTarget>
</exVpnTargets>
"""
CE_NC_GET_EXTEND_VRF_TARGET = """
<exVpnTargets>
<exVpnTarget>
<vrfRTType></vrfRTType>
<vrfRTValue></vrfRTValue>
<extAddrFamily></extAddrFamily>
</exVpnTarget>
</exVpnTargets>
"""
def build_config_xml(xmlstr):
"""build_config_xml"""
return '<config> ' + xmlstr + ' </config>'
def is_valid_value(vrf_targe_value):
"""check if the vrf target value is valid"""
each_num = None
if len(vrf_targe_value) > 21 or len(vrf_targe_value) < 3:
return False
if vrf_targe_value.find(':') == -1:
return False
elif vrf_targe_value == '0:0':
return False
elif vrf_targe_value == '0.0:0':
return False
else:
value_list = vrf_targe_value.split(':')
if value_list[0].find('.') != -1:
if not value_list[1].isdigit():
return False
if int(value_list[1]) > 65535:
return False
value = value_list[0].split('.')
if len(value) == 4:
for each_num in value:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
elif len(value) == 2:
for each_num in value:
if not each_num.isdigit():
return False
if int(each_num) > 65535:
return False
return True
else:
return False
elif not value_list[0].isdigit():
return False
elif not value_list[1].isdigit():
return False
elif int(value_list[0]) < 65536 and int(value_list[1]) < 4294967296:
return True
elif int(value_list[0]) > 65535 and int(value_list[0]) < 4294967296:
return bool(int(value_list[1]) < 65536)
else:
return False
class VrfAf(object):
"""manage the vrf address family and export/import target"""
def __init__(self, argument_spec, ):
self.spec = argument_spec
self.module = None
self.init_module()
# vpn instance info
self.vrf = self.module.params['vrf']
self.vrf_aftype = self.module.params['vrf_aftype']
if self.vrf_aftype == 'v4':
self.vrf_aftype = 'ipv4uni'
else:
self.vrf_aftype = 'ipv6uni'
self.route_distinguisher = self.module.params['route_distinguisher']
self.evpn = self.module.params['evpn']
self.vpn_target_type = self.module.params['vpn_target_type']
self.vpn_target_value = self.module.params['vpn_target_value']
self.vpn_target_state = self.module.params['vpn_target_state']
self.state = self.module.params['state']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.vpn_target_changed = False
self.vrf_af_type_changed = False
self.vrf_rd_changed = False
self.vrf_af_info = dict()
def init_module(self):
"""init_module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def is_vrf_af_exist(self):
"""is vrf address family exist"""
if not self.vrf_af_info:
return False
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
if vrf_af_ele["afType"] == self.vrf_aftype:
return True
else:
continue
return False
def get_exist_rd(self):
"""get exist route distinguisher """
if not self.vrf_af_info:
return None
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
if vrf_af_ele["afType"] == self.vrf_aftype:
if vrf_af_ele["vrfRD"] is None:
return None
else:
return vrf_af_ele["vrfRD"]
else:
continue
return None
def is_vrf_rd_exist(self):
"""is vrf route distinguisher exist"""
if not self.vrf_af_info:
return False
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
if vrf_af_ele["afType"] == self.vrf_aftype:
if vrf_af_ele["vrfRD"] is None:
return False
if self.route_distinguisher is not None:
return bool(vrf_af_ele["vrfRD"] == self.route_distinguisher)
else:
return True
else:
continue
return False
def is_vrf_rt_exist(self):
"""is vpn target exist"""
if not self.vrf_af_info:
return False
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
if vrf_af_ele["afType"] == self.vrf_aftype:
if self.evpn is False:
if not vrf_af_ele.get("vpnTargets"):
return False
for vpn_target in vrf_af_ele.get("vpnTargets"):
if vpn_target["vrfRTType"] == self.vpn_target_type \
and vpn_target["vrfRTValue"] == self.vpn_target_value:
return True
else:
continue
else:
if not vrf_af_ele.get("evpnTargets"):
return False
for evpn_target in vrf_af_ele.get("evpnTargets"):
if evpn_target["vrfRTType"] == self.vpn_target_type \
and evpn_target["vrfRTValue"] == self.vpn_target_value:
return True
else:
continue
else:
continue
return False
def set_update_cmd(self):
""" set update command"""
if not self.changed:
return
if self.state == "present":
self.updates_cmd.append('ip vpn-instance %s' % (self.vrf))
if self.vrf_aftype == 'ipv4uni':
self.updates_cmd.append('ipv4-family')
elif self.vrf_aftype == 'ipv6uni':
self.updates_cmd.append('ipv6-family')
if self.route_distinguisher:
if not self.is_vrf_rd_exist():
self.updates_cmd.append(
'route-distinguisher %s' % self.route_distinguisher)
else:
if self.get_exist_rd() is not None:
self.updates_cmd.append(
'undo route-distinguisher %s' % self.get_exist_rd())
if self.vpn_target_state == "present":
if not self.is_vrf_rt_exist():
if self.evpn is False:
self.updates_cmd.append(
'vpn-target %s %s' % (self.vpn_target_value, self.vpn_target_type))
else:
self.updates_cmd.append(
'vpn-target %s %s evpn' % (self.vpn_target_value, self.vpn_target_type))
elif self.vpn_target_state == "absent":
if self.is_vrf_rt_exist():
if self.evpn is False:
self.updates_cmd.append(
'undo vpn-target %s %s' % (self.vpn_target_value, self.vpn_target_type))
else:
self.updates_cmd.append(
'undo vpn-target %s %s evpn' % (self.vpn_target_value, self.vpn_target_type))
else:
self.updates_cmd.append('ip vpn-instance %s' % (self.vrf))
if self.vrf_aftype == 'ipv4uni':
self.updates_cmd.append('undo ipv4-family')
elif self.vrf_aftype == 'ipv6uni':
self.updates_cmd.append('undo ipv6-family')
def get_vrf(self):
""" check if vrf is need to change"""
getxmlstr = CE_NC_GET_VRF
xmlstr_new_1 = (self.vrf.lower())
xml_str = get_nc_config(self.module, getxmlstr)
re_find_1 = re.findall(
r'.*<vrfname>(.*)</vrfname>.*', xml_str.lower())
if re_find_1 is None:
return False
return xmlstr_new_1 in re_find_1
def get_vrf_af(self):
""" check if vrf is need to change"""
self.vrf_af_info["vpnInstAF"] = list()
if self.evpn is True:
getxmlstr = CE_NC_GET_VRF_AF % (
self.vrf, CE_NC_GET_EXTEND_VRF_TARGET)
else:
getxmlstr = CE_NC_GET_VRF_AF % (self.vrf, CE_NC_GET_VRF_TARGET)
xml_str = get_nc_config(self.module, getxmlstr)
if 'data/' in xml_str:
return self.state == 'present'
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get the vpn address family and RD text
vrf_addr_types = root.findall(
"data/l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance/vpnInstAFs/vpnInstAF")
if vrf_addr_types:
for vrf_addr_type in vrf_addr_types:
vrf_af_info = dict()
for vrf_addr_type_ele in vrf_addr_type:
if vrf_addr_type_ele.tag in ["vrfName", "afType", "vrfRD"]:
vrf_af_info[vrf_addr_type_ele.tag] = vrf_addr_type_ele.text
if vrf_addr_type_ele.tag == 'vpnTargets':
vrf_af_info["vpnTargets"] = list()
for rtargets in vrf_addr_type_ele:
rt_dict = dict()
for rtarget in rtargets:
if rtarget.tag in ["vrfRTValue", "vrfRTType"]:
rt_dict[rtarget.tag] = rtarget.text
vrf_af_info["vpnTargets"].append(rt_dict)
if vrf_addr_type_ele.tag == 'exVpnTargets':
vrf_af_info["evpnTargets"] = list()
for rtargets in vrf_addr_type_ele:
rt_dict = dict()
for rtarget in rtargets:
if rtarget.tag in ["vrfRTValue", "vrfRTType"]:
rt_dict[rtarget.tag] = rtarget.text
vrf_af_info["evpnTargets"].append(rt_dict)
self.vrf_af_info["vpnInstAF"].append(vrf_af_info)
def check_params(self):
"""Check all input params"""
# vrf and description check
if self.vrf == '_public_':
self.module.fail_json(
msg='Error: The vrf name _public_ is reserved.')
if not self.get_vrf():
self.module.fail_json(
msg='Error: The vrf name do not exist.')
if self.state == 'present':
if self.route_distinguisher:
if not is_valid_value(self.route_distinguisher):
self.module.fail_json(msg='Error:The vrf route distinguisher length must between 3 ~ 21,'
'i.e. X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295>'
'or number<0-65535>.number<0-65535>:number<0-65535>'
'or number<65536-4294967295>:number<0-65535>'
' but not be 0:0 or 0.0:0.')
if not self.vpn_target_state:
if self.vpn_target_value or self.vpn_target_type:
self.module.fail_json(
msg='Error: The vpn target state should be exist.')
if self.vpn_target_state:
if not self.vpn_target_value or not self.vpn_target_type:
self.module.fail_json(
msg='Error: The vpn target value and type should be exist.')
if self.vpn_target_value:
if not is_valid_value(self.vpn_target_value):
self.module.fail_json(msg='Error:The vrf target value length must between 3 ~ 21,'
'i.e. X.X.X.X:number<0-65535> or number<0-65535>:number<0-4294967295>'
'or number<0-65535>.number<0-65535>:number<0-65535>'
'or number<65536-4294967295>:number<0-65535>'
' but not be 0:0 or 0.0:0.')
def operate_vrf_af(self):
"""config/delete vrf"""
vrf_target_operate = ''
if self.route_distinguisher is None:
route_d = ''
else:
route_d = self.route_distinguisher
if self.state == 'present':
if self.vrf_aftype:
if self.is_vrf_af_exist():
self.vrf_af_type_changed = False
else:
self.vrf_af_type_changed = True
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
else:
self.vrf_af_type_changed = bool(self.is_vrf_af_exist())
if self.vpn_target_state == 'present':
if self.evpn is False and not self.is_vrf_rt_exist():
vrf_target_operate = CE_NC_CREATE_VRF_TARGET % (
self.vpn_target_type, self.vpn_target_value)
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vpn_target_changed = True
if self.evpn is True and not self.is_vrf_rt_exist():
vrf_target_operate = CE_NC_CREATE_EXTEND_VRF_TARGET % (
self.vpn_target_type, self.vpn_target_value)
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vpn_target_changed = True
elif self.vpn_target_state == 'absent':
if self.evpn is False and self.is_vrf_rt_exist():
vrf_target_operate = CE_NC_DELETE_VRF_TARGET % (
self.vpn_target_type, self.vpn_target_value)
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vpn_target_changed = True
if self.evpn is True and self.is_vrf_rt_exist():
vrf_target_operate = CE_NC_DELETE_EXTEND_VRF_TARGET % (
self.vpn_target_type, self.vpn_target_value)
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vpn_target_changed = True
else:
if self.route_distinguisher:
if not self.is_vrf_rd_exist():
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vrf_rd_changed = True
else:
self.vrf_rd_changed = False
else:
if self.is_vrf_rd_exist():
configxmlstr = CE_NC_CREATE_VRF_AF % (
self.vrf, self.vrf_aftype, route_d, vrf_target_operate)
self.vrf_rd_changed = True
else:
self.vrf_rd_changed = False
if not self.vrf_rd_changed and not self.vrf_af_type_changed and not self.vpn_target_changed:
self.changed = False
else:
self.changed = True
else:
if self.is_vrf_af_exist():
configxmlstr = CE_NC_DELETE_VRF_AF % (
self.vrf, self.vrf_aftype)
self.changed = True
else:
self.changed = False
if not self.changed:
return
conf_str = build_config_xml(configxmlstr)
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "OPERATE_VRF_AF")
def get_proposed(self):
"""get_proposed"""
if self.state == 'present':
self.proposed['vrf'] = self.vrf
if self.vrf_aftype is None:
self.proposed['vrf_aftype'] = 'ipv4uni'
else:
self.proposed['vrf_aftype'] = self.vrf_aftype
if self.route_distinguisher is not None:
self.proposed['route_distinguisher'] = self.route_distinguisher
else:
self.proposed['route_distinguisher'] = list()
if self.vpn_target_state == 'present':
self.proposed['evpn'] = self.evpn
self.proposed['vpn_target_type'] = self.vpn_target_type
self.proposed['vpn_target_value'] = self.vpn_target_value
else:
self.proposed['vpn_target_type'] = list()
self.proposed['vpn_target_value'] = list()
else:
self.proposed = dict()
self.proposed['state'] = self.state
self.proposed['vrf'] = self.vrf
self.proposed['vrf_aftype'] = list()
self.proposed['route_distinguisher'] = list()
self.proposed['vpn_target_value'] = list()
self.proposed['vpn_target_type'] = list()
def get_existing(self):
"""get_existing"""
self.get_vrf_af()
self.existing['vrf'] = self.vrf
self.existing['vrf_aftype'] = list()
self.existing['route_distinguisher'] = list()
self.existing['vpn_target_value'] = list()
self.existing['vpn_target_type'] = list()
self.existing['evpn_target_value'] = list()
self.existing['evpn_target_type'] = list()
if self.vrf_af_info["vpnInstAF"] is None:
return
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
self.existing['vrf_aftype'].append(vrf_af_ele["afType"])
self.existing['route_distinguisher'].append(
vrf_af_ele["vrfRD"])
if vrf_af_ele.get("vpnTargets"):
for vpn_target in vrf_af_ele.get("vpnTargets"):
self.existing['vpn_target_type'].append(
vpn_target["vrfRTType"])
self.existing['vpn_target_value'].append(
vpn_target["vrfRTValue"])
if vrf_af_ele.get("evpnTargets"):
for evpn_target in vrf_af_ele.get("evpnTargets"):
self.existing['evpn_target_type'].append(
evpn_target["vrfRTType"])
self.existing['evpn_target_value'].append(
evpn_target["vrfRTValue"])
def get_end_state(self):
"""get_end_state"""
self.get_vrf_af()
self.end_state['vrf'] = self.vrf
self.end_state['vrf_aftype'] = list()
self.end_state['route_distinguisher'] = list()
self.end_state['vpn_target_value'] = list()
self.end_state['vpn_target_type'] = list()
self.end_state['evpn_target_value'] = list()
self.end_state['evpn_target_type'] = list()
if self.vrf_af_info["vpnInstAF"] is None:
return
for vrf_af_ele in self.vrf_af_info["vpnInstAF"]:
self.end_state['vrf_aftype'].append(vrf_af_ele["afType"])
self.end_state['route_distinguisher'].append(vrf_af_ele["vrfRD"])
if vrf_af_ele.get("vpnTargets"):
for vpn_target in vrf_af_ele.get("vpnTargets"):
self.end_state['vpn_target_type'].append(
vpn_target["vrfRTType"])
self.end_state['vpn_target_value'].append(
vpn_target["vrfRTValue"])
if vrf_af_ele.get("evpnTargets"):
for evpn_target in vrf_af_ele.get("evpnTargets"):
self.end_state['evpn_target_type'].append(
evpn_target["vrfRTType"])
self.end_state['evpn_target_value'].append(
evpn_target["vrfRTValue"])
def work(self):
"""worker"""
self.check_params()
self.get_existing()
self.get_proposed()
self.operate_vrf_af()
self.set_update_cmd()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""main"""
argument_spec = dict(
vrf=dict(required=True, type='str'),
vrf_aftype=dict(choices=['v4', 'v6'],
default='v4', required=False),
route_distinguisher=dict(required=False, type='str'),
evpn=dict(type='bool', default=False),
vpn_target_type=dict(
choices=['export_extcommunity', 'import_extcommunity'], required=False),
vpn_target_value=dict(required=False, type='str'),
vpn_target_state=dict(choices=['absent', 'present'], required=False),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
interface = VrfAf(argument_spec)
interface.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
llinmeng/PythonStudy | imooc/1rumen/3.py | 1 | 2396 | # -*- coding: utf-8 -*-
print "=================================="
# 3-1
print 45678 + 0x12fd2
print 'Learn Python in imooc'
print 100 < 99
print 0xff == 255
print "=================================="
# 3-2
print 'hello, python'
print "hello, python"
print """hello, python"""
print 'hello,', 'python'
print 'hello', ',', 'python'
print "=================================="
# 3-3
# print 'hello, python'
# print "hello, python"
print """hello, python"""
print 'hello,', 'python'
print 'hello', ',', 'python'
print "=================================="
# 3-4
a = 'ABC'
b = a
a = 'XYZ'
print b, a
x = 1
d = 2
n = 1 # 测试100, 10, 1
x1 = x + (n - 1) * d
s = (x + x1) * n / 2
print s
print "=================================="
# 3-5
str = 'Pyhton was started in 1998 by \"Guido\". \nPython is free and easy to learn.'
print str
print "=================================="
# 3-6
print '''Python is created by "Guido".
It is free and easy to learn.
Let's start learn Python in imooc!'''
print r'''Python is created by "Guido".
It is free and easy to learn.
Let's start learn Python in imooc!'''
print r'''"To be, or not to be": that is the question.
Whether it's nobler in the mind to suffer.'''
print '''"To be, or not to be": that is the question.
Whether it's nobler in the mind to suffer.'''
print r'\"To be, or not to be\": that is the question.\nWhether it\'s nobler in the mind to suffer.'
print '\"To be, or not to be\": that is the question.\nWhether it\'s nobler in the mind to suffer.'
print "=================================="
# 3-8
print 2.5 + 10.0 / 4
print "=================================="
# 3-9
a = True
print a and 'a = T' or 'a = F' # 因为Python把0、空字符串''和None看成 False,其他数值和非空字符串都看成 True,
print a
# and 和 or 运算的一条重要法则:短路计算。
# 1. 在计算 a and b 时,如果 a 是 False,则根据与运算法则,整个结果必定为 False,
# 因此返回 a;如果 a 是 True,则整个计算结果必定取决与 b,因此返回 b。
# 2. 在计算 a or b 时,如果 a 是 True,则根据或运算法则,整个计算结果必定为 True,因此返回 a;
# 如果 a 是 False,则整个计算结果必定取决于 b,因此返回 b。
print "=================================="
# 3-10
a = 'python'
print 'hello,', a or 'world'
b = ''
print 'hello,', b or 'world'
| mit |
joone/chromium-crosswalk | build/android/pylib/local/device/local_device_environment.py | 7 | 4202 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import logging
import os
import threading
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.android import logcat_monitor
from devil.utils import parallelizer
from pylib import constants
from pylib.base import environment
def _DeviceCachePath(device):
file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
return os.path.join(constants.GetOutDirectory(), file_name)
class LocalDeviceEnvironment(environment.Environment):
def __init__(self, args, _error_func):
super(LocalDeviceEnvironment, self).__init__()
self._blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
self._device_serial = args.test_device
self._devices_lock = threading.Lock()
self._devices = []
self._max_tries = 1 + args.num_retries
self._tool_name = args.tool
self._enable_device_cache = args.enable_device_cache
self._incremental_install = args.incremental_install
self._concurrent_adb = args.enable_concurrent_adb
self._logcat_output_dir = args.logcat_output_dir
self._logcat_monitors = []
#override
def SetUp(self):
available_devices = device_utils.DeviceUtils.HealthyDevices(
self._blacklist, enable_device_files_cache=self._enable_device_cache)
if not available_devices:
raise device_errors.NoDevicesError
if self._device_serial:
self._devices = [d for d in available_devices
if d.adb.GetDeviceSerial() == self._device_serial]
if not self._devices:
raise device_errors.DeviceUnreachableError(
'Could not find device %r' % self._device_serial)
else:
self._devices = available_devices
if self._enable_device_cache:
for d in self._devices:
cache_path = _DeviceCachePath(d)
if os.path.exists(cache_path):
logging.info('Using device cache: %s', cache_path)
with open(cache_path) as f:
d.LoadCacheData(f.read())
os.unlink(cache_path)
if self._logcat_output_dir:
for d in self._devices:
logcat_file = os.path.join(
self._logcat_output_dir,
'%s_%s' % (d.adb.GetDeviceSerial(),
datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S')))
monitor = logcat_monitor.LogcatMonitor(
d.adb, clear=True, output_file=logcat_file)
self._logcat_monitors.append(monitor)
monitor.Start()
@property
def devices(self):
if not self._devices:
raise device_errors.NoDevicesError()
return self._devices
@property
def concurrent_adb(self):
return self._concurrent_adb
@property
def incremental_install(self):
return self._incremental_install
@property
def parallel_devices(self):
return parallelizer.SyncParallelizer(self.devices)
@property
def max_tries(self):
return self._max_tries
@property
def tool(self):
return self._tool_name
#override
def TearDown(self):
# Write the cache even when not using it so that it will be ready the first
# time that it is enabled. Writing it every time is also necessary so that
# an invalid cache can be flushed just by disabling it for one run.
for d in self._devices:
cache_path = _DeviceCachePath(d)
with open(cache_path, 'w') as f:
f.write(d.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
for m in self._logcat_monitors:
m.Stop()
m.Close()
def BlacklistDevice(self, device, reason='local_device_failure'):
if not self._blacklist:
logging.warning(
'Attempted to blacklist %s, but no blacklist was provided.',
str(device))
return
device_serial = device.adb.GetDeviceSerial()
self._blacklist.Extend([device_serial], reason=reason)
with self._devices_lock:
self._devices = [d for d in self._devices if str(d) != device_serial]
| bsd-3-clause |
owatte/speedtest-reporter | setup.py | 1 | 1915 | #!/usr/bin/env python
'''
This file is part of speedtest-reporter.
Speedtest-reporter is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Speedtest-reporter is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with speedtest-reporter. If not, see <http://www.gnu.org/licenses/>
'''
from setuptools import setup, find_packages
import speedtest_reporter
setup(
name='speedtest-reporter',
version=speedtest_reporter.__version__,
packages=find_packages(),
author="Olivier Watte - GwadaLUG",
author_email="olivier.watte@gmail.com",
description="internet connection speedtest with results recorded and published",
long_description=open('README.rst').read(),
install_requires=['pygal', 'speedtest-cli'],
include_package_data=True,
url='https://github.com/owatte/speedtest-reporter',
classifiers=[
"Programming Language :: Python",
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: System Administrators"
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Topic :: System :: Networking :: Monitoring",
],
entry_points = {
'console_scripts': [
'speedtest-reporter = speedtest_reporter:main',
],
},
)
| gpl-3.0 |
mirzawaqasahmed/avocado-vt | virttest/utils_spice.py | 2 | 13177 | """
Common spice test utility functions.
"""
import os
import logging
import time
import sys
from avocado.core import exceptions
from aexpect import ShellCmdError, ShellStatusError
from . import utils_net, utils_misc
class RVConnectError(Exception):
"""Exception raised in case that remote-viewer fails to connect"""
pass
def _is_pid_alive(session, pid):
try:
session.cmd("ps -p %s" % pid)
except ShellCmdError:
return False
return True
def wait_timeout(timeout=10):
"""
time.sleep(timeout) + logging.debug(timeout)
:param timeout=10
"""
logging.debug("Waiting (timeout=%ss)", timeout)
time.sleep(timeout)
def kill_app(vm_name, app_name, params, env):
"""
Kill selected app on selected VM
:params vm_name - VM name in parameters
:params app_name - name of application
"""
vm = env.get_vm(params[vm_name])
vm.verify_alive()
vm_session = vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)))
logging.info("Try to kill %s", app_name)
if vm.params.get("os_type") == "linux":
vm_session.cmd("pkill %s" % app_name
.split(os.path.sep)[-1])
elif vm.params.get("os_type") == "windows":
vm_session.cmd_output("taskkill /F /IM %s" % app_name
.split('\\')[-1])
vm.verify_alive()
vm_session.close()
def verify_established(client_vm, host, port, rv_binary,
tls_port=None, secure_channels=None):
"""
Parses netstat output for established connection on host:port
:param client_session - vm.wait_for_login()
:param host - host ip addr
:param port - port for client to connect
:param rv_binary - remote-viewer binary
"""
rv_binary = rv_binary.split(os.path.sep)[-1]
client_session = client_vm.wait_for_login(timeout=60)
tls_count = 0
# !!! -n means do not resolve port names
if ".exe" in rv_binary:
cmd = "netstat -n"
else:
cmd = ('(netstat -pn 2>&1| grep "^tcp.*:.*%s.*ESTABLISHED.*%s.*")' %
(host, rv_binary))
netstat_out = client_session.cmd_output(cmd)
logging.info("netstat output: %s", netstat_out)
if tls_port:
tls_count = netstat_out.count(tls_port)
else:
tls_port = port
if (netstat_out.count(port) + tls_count) < 4:
logging.error("Not enough channels were open")
raise RVConnectError()
if secure_channels:
if tls_count < len(secure_channels.split(',')):
logging.error("Not enough secure channels open")
raise RVConnectError()
for line in netstat_out.split('\n'):
if ((port in line and "ESTABLISHED" not in line) or
(tls_port in line and "ESTABLISHED" not in line)):
logging.error("Failed to get established connection from netstat")
raise RVConnectError()
if "ESTABLISHED" not in netstat_out:
logging.error("Failed to get established connection from netstat")
raise RVConnectError()
logging.info("%s connection to %s:%s successful.",
rv_binary, host, port)
client_session.close()
def start_vdagent(guest_session, test_timeout):
"""
Sending commands to start the spice-vdagentd service
:param guest_session: ssh session of the VM
:param test_timeout: timeout time for the cmds
"""
cmd = "service spice-vdagentd start"
try:
guest_session.cmd(cmd, print_func=logging.info,
timeout=test_timeout)
except ShellStatusError:
logging.debug("Status code of \"%s\" was not obtained, most likely"
"due to a problem with colored output" % cmd)
except:
raise exceptions.TestFail("Guest Vdagent Daemon Start failed")
logging.debug("------------ End of guest checking for Spice Vdagent"
" Daemon ------------")
wait_timeout(3)
def restart_vdagent(guest_session, test_timeout):
"""
Sending commands to restart the spice-vdagentd service
:param guest_session: ssh session of the VM
:param test_timeout: timeout time for the cmds
"""
cmd = "service spice-vdagentd restart"
try:
guest_session.cmd(cmd, print_func=logging.info,
timeout=test_timeout)
except ShellCmdError:
raise exceptions.TestFail("Couldn't restart spice vdagent process")
except:
raise exceptions.TestFail("Guest Vdagent Daemon Check failed")
logging.debug("------------ End of Spice Vdagent"
" Daemon Restart ------------")
wait_timeout(3)
def stop_vdagent(guest_session, test_timeout):
"""
Sending commands to stop the spice-vdagentd service
:param guest_session: ssh session of the VM
:param test_timeout: timeout time for the cmds
"""
cmd = "service spice-vdagentd stop"
try:
guest_session.cmd(cmd, print_func=logging.info,
timeout=test_timeout)
except ShellStatusError:
logging.debug("Status code of \"%s\" was not obtained, most likely"
"due to a problem with colored output" % cmd)
except ShellCmdError:
raise exceptions.TestFail("Couldn't turn off spice vdagent process")
except:
raise exceptions.TestFail("Guest Vdagent Daemon Check failed")
logging.debug("------------ End of guest checking for Spice Vdagent"
" Daemon ------------")
wait_timeout(3)
def verify_vdagent(guest_session, test_timeout):
"""
Verifying vdagent is installed on a VM
:param guest_session: ssh session of the VM
:param test_timeout: timeout time for the cmds
"""
cmd = "rpm -qa | grep spice-vdagent"
try:
guest_session.cmd(cmd, print_func=logging.info, timeout=test_timeout)
finally:
logging.debug("----------- End of guest check to see if vdagent "
"package is available ------------")
wait_timeout(3)
def get_vdagent_status(vm_session, test_timeout):
"""
Return the status of vdagent
:param vm_session: ssh session of the VM
:param test_timeout: timeout time for the cmd
"""
output = ""
cmd = "service spice-vdagentd status"
wait_timeout(3)
try:
output = vm_session.cmd(
cmd, print_func=logging.info, timeout=test_timeout)
except ShellCmdError:
# getting the status of vdagent stopped returns 3, which results in a
# ShellCmdError
return("stopped")
except:
print "Unexpected error:", sys.exc_info()[0]
raise exceptions.TestFail(
"Failed attempting to get status of spice-vdagentd")
wait_timeout(3)
return(output)
def verify_virtio(guest_session, test_timeout):
"""
Verify Virtio linux driver is properly loaded.
:param guest_session: ssh session of the VM
:param test_timeout: timeout time for the cmds
"""
cmd = "ls /dev/virtio-ports/"
try:
guest_session.cmd(cmd, print_func=logging.info, timeout=test_timeout)
finally:
logging.debug("------------ End of guest check of the Virtio-Serial"
" Driver------------")
wait_timeout(3)
def install_rv_win(client, host_path, client_path='C:\\virt-viewer.msi'):
"""
Install remote-viewer on a windows client
:param client: VM object
:param host_path: Location of installer on host
:param client_path: Location of installer after copying
"""
session = client.wait_for_login(
timeout=int(client.params.get("login_timeout", 360)))
client.copy_files_to(host_path, client_path)
try:
session.cmd_output('start /wait msiexec /i ' + client_path +
' INSTALLDIR="C:\\virt-viewer"')
except:
pass
def install_usbclerk_win(client, host_path, client_path="C:\\usbclerk.msi"):
"""
Install remote-viewer on a windows client
:param client: VM object
:param host_path: Location of installer on host
:param client_path: Location of installer after copying
"""
session = client.wait_for_login(timeout=int(
client.params.get("login_timeout", 360)))
client.copy_files_to(host_path, client_path)
try:
session.cmd_output("start /wait msiexec /i " + client_path + " /qn")
except:
pass
def clear_interface(vm, login_timeout=360, timeout=5):
"""
Clears user interface of a vm without reboot
:param vm: VM where cleaning is required
"""
# kill remote-viewer window if it is open
if vm.params.get("os_type") == "windows":
session = vm.wait_for_login()
try:
session.cmd("taskkill /F /IM remote-viewer.exe")
except:
logging.info("Remote-viewer not running")
else:
clear_interface_linux(vm, login_timeout, timeout)
def clear_interface_linux(vm, login_timeout, timeout):
"""
Clears user interface of a vm without reboot
:param vm: VM where cleaning is required
"""
logging.info("restarting X/gdm on: %s", vm.name)
session = vm.wait_for_login(username="root", password="123456",
timeout=login_timeout)
if "release 7" in session.cmd('cat /etc/redhat-release'):
command = "gdm"
pgrep_process = "'^gdm$'"
else:
command = "Xorg"
pgrep_process = "Xorg"
try:
pid = session.cmd("pgrep %s" % pgrep_process)
session.cmd("killall %s" % command)
utils_misc.wait_for(lambda: _is_pid_alive(session, pid), 10,
timeout, 0.2)
except:
pass
try:
session.cmd("ps -C %s" % command)
except ShellCmdError:
raise exceptions.TestFail("X/gdm not running")
def deploy_epel_repo(guest_session, params):
"""
Deploy epel repository to RHEL VM If It's RHEL6 or 5.
:param guest_session - ssh session to guest VM
:param params
"""
# Check existence of epel repository
try:
guest_session.cmd("test -a /etc/yum.repos.d/epel.repo")
except ShellCmdError:
arch = guest_session.cmd("arch")
if "i686" in arch:
arch = "i386"
else:
arch = arch[:-1]
if "release 5" in guest_session.cmd("cat /etc/redhat-release"):
cmd = ("yum -y localinstall http://download.fedoraproject.org/"
"pub/epel/5/%s/epel-release-5-4.noarch.rpm 2>&1" % arch)
logging.info("Installing epel repository to %s",
params.get("guest_vm"))
guest_session.cmd(cmd, print_func=logging.info, timeout=90)
elif "release 6" in guest_session.cmd("cat /etc/redhat-release"):
cmd = ("yum -y localinstall http://download.fedoraproject.org/"
"pub/epel/6/%s/epel-release-6-8.noarch.rpm 2>&1" % arch)
logging.info("Installing epel repository to %s",
params.get("guest_vm"))
guest_session.cmd(cmd, print_func=logging.info, timeout=90)
elif "release 7" in guest_session.cmd("cat /etc/redhat-release"):
cmd = ("yum -y localinstall http://download.bos.redhat.com/"
"pub/epel/7/%s/e/epel-release-7-5.noarch.rpm 2>&1" % arch)
logging.info("Installing epel repository to %s",
params.get("guest_vm"))
guest_session.cmd(cmd, print_func=logging.info, timeout=90)
else:
raise Exception("Unsupported RHEL guest")
def gen_rv_file(params, guest_vm, host_subj=None, cacert=None):
"""
Generates vv file for remote-viewer
:param params: all parameters of the test
:param guest_vm: object of a guest VM
:param host_subj: subject of the host
:param cacert: location of certificate of host
"""
full_screen = params.get("full_screen")
proxy = params.get("spice_proxy")
rv_file = open('rv_file.vv', 'w')
rv_file.write("[virt-viewer]\n" +
"type=%s\n" % params.get("display") +
"host=%s\n" % utils_net.get_host_ip_address(params) +
"port=%s\n" % guest_vm.get_spice_var("spice_port"))
ticket = params.get("spice_password", None)
ticket_send = params.get("spice_password_send", None)
qemu_ticket = params.get("qemu_password", None)
if ticket_send:
ticket = ticket_send
if qemu_ticket:
ticket = qemu_ticket
if ticket:
rv_file.write("password=%s\n" % ticket)
if guest_vm.get_spice_var("spice_ssl") == "yes":
rv_file.write("tls-port=%s\n" %
guest_vm.get_spice_var("spice_tls_port"))
rv_file.write("tls-ciphers=DEFAULT\n")
if host_subj:
rv_file.write("host-subject=%s\n" % host_subj)
if cacert:
cert = open(cacert)
ca = cert.read()
ca = ca.replace('\n', r'\n')
rv_file.write("ca=%s\n" % ca)
if full_screen == "yes":
rv_file.write("fullscreen=1\n")
if proxy:
rv_file.write("proxy=%s\n" % proxy)
| gpl-2.0 |
noelbk/neutron-juniper | neutron/extensions/lbaas_agentscheduler.py | 10 | 4404 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import abstractmethod
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import constants
from neutron.extensions import agent
from neutron import manager
from neutron.plugins.common import constants as plugin_const
from neutron import policy
from neutron import wsgi
LOADBALANCER_POOL = 'loadbalancer-pool'
LOADBALANCER_POOLS = LOADBALANCER_POOL + 's'
LOADBALANCER_AGENT = 'loadbalancer-agent'
class PoolSchedulerController(wsgi.Controller):
def index(self, request, **kwargs):
lbaas_plugin = manager.NeutronManager.get_service_plugins().get(
plugin_const.LOADBALANCER)
if not lbaas_plugin:
return {'pools': []}
policy.enforce(request.context,
"get_%s" % LOADBALANCER_POOLS,
{},
plugin=lbaas_plugin)
return lbaas_plugin.list_pools_on_lbaas_agent(
request.context, kwargs['agent_id'])
class LbaasAgentHostingPoolController(wsgi.Controller):
def index(self, request, **kwargs):
lbaas_plugin = manager.NeutronManager.get_service_plugins().get(
plugin_const.LOADBALANCER)
if not lbaas_plugin:
return
policy.enforce(request.context,
"get_%s" % LOADBALANCER_AGENT,
{},
plugin=lbaas_plugin)
return lbaas_plugin.get_lbaas_agent_hosting_pool(
request.context, kwargs['pool_id'])
class Lbaas_agentscheduler(extensions.ExtensionDescriptor):
"""Extension class supporting LBaaS agent scheduler.
"""
@classmethod
def get_name(cls):
return "Loadbalancer Agent Scheduler"
@classmethod
def get_alias(cls):
return constants.LBAAS_AGENT_SCHEDULER_EXT_ALIAS
@classmethod
def get_description(cls):
return "Schedule pools among lbaas agents"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/lbaas_agent_scheduler/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-02-07T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
parent = dict(member_name="agent",
collection_name="agents")
controller = resource.Resource(PoolSchedulerController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
LOADBALANCER_POOLS, controller, parent))
parent = dict(member_name="pool",
collection_name="pools")
controller = resource.Resource(LbaasAgentHostingPoolController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
LOADBALANCER_AGENT, controller, parent,
path_prefix=plugin_const.
COMMON_PREFIXES[plugin_const.LOADBALANCER]))
return exts
def get_extended_resources(self, version):
return {}
class NoEligibleLbaasAgent(agent.AgentNotFound):
message = _("No eligible loadbalancer agent found "
"for pool %(pool_id)s.")
class NoActiveLbaasAgent(agent.AgentNotFound):
message = _("No active loadbalancer agent found "
"for pool %(pool_id)s.")
class LbaasAgentSchedulerPluginBase(object):
"""REST API to operate the lbaas agent scheduler.
All of method must be in an admin context.
"""
@abstractmethod
def list_pools_on_lbaas_agent(self, context, id):
pass
@abstractmethod
def get_lbaas_agent_hosting_pool(self, context, pool_id):
pass
| apache-2.0 |
sti-lyneos/shop | tests/test_ubuntu_sso_api.py | 3 | 1359 | import os
import unittest
from tests.utils import (
setup_test_env,
)
setup_test_env()
from softwarecenter.backend.ubuntusso import (UbuntuSSOAPIFake,
UbuntuSSO,
get_ubuntu_sso_backend,
)
class TestSSOAPI(unittest.TestCase):
""" tests the ubuntu sso backend stuff """
def test_fake_and_real_provide_similar_methods(self):
""" test if the real and fake sso provide the same functions """
sso_real = UbuntuSSO
sso_fake = UbuntuSSOAPIFake
# ensure that both fake and real implement the same methods
self.assertEqual(
set([x for x in dir(sso_real) if not x.startswith("_")]),
set([x for x in dir(sso_fake) if not x.startswith("_")]))
def test_get_ubuntu_backend(self):
# test that we get the real one
self.assertEqual(type(get_ubuntu_sso_backend()),
UbuntuSSO)
# test that we get the fake one
os.environ["SOFTWARE_CENTER_FAKE_REVIEW_API"] = "1"
self.assertEqual(type(get_ubuntu_sso_backend()),
UbuntuSSOAPIFake)
# clean the environment
del os.environ["SOFTWARE_CENTER_FAKE_REVIEW_API"]
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
Darkade/udlap | 9no/simulacion/examen 2010 09 13 - Reyes Ivan/distribuciones.py | 1 | 1670 | from random import random
from math import sqrt, exp, log, sin, cos,pi,factorial
class continuas:
"""Clase para simular variables aleatorias Continuas. Incluye las distribuciones: normal (metodo polar y muller), exponencial, gamma."""
def polar(self):
S=2
while S > 1:
v1=2*random()-1
v2=2*random()-1
S=v1**2+v2**2
fact=sqrt(-2*log(S)/S)
return [fact*v1,fact*v2]
def muller(self):
f1=sqrt(-2*log(random()))
f2=2*pi*random()
return [f1*cos(f2),f1*sin(f2)]
def exponencial(self,l):
return -1.0/l*log(random())
def gamma(self,k,l):
return sum([self.exponencial(l) for i in range(0,k)])
def beta(self,a,b):
x=self.gamma(a,1)
y=self.gamma(b,1)
return x/(x+y)
class discretas:
"""Clase para simular variables aleatorias discretas. Incluye: Uniforme discreta, bernoulli, binomial, poisson, geometrica, binomial negativa"""
def uniforme_d(self,n):
return int(n*random())+1
def bernoulli(self,p):
return int(random()>p)
def binomial(self,p,n):
u=random()
q=1-p
pr=q**n
F=pr
x=0
while u>F:
x+=1
pr=pr*(n-x+1)*p/(x*q)
F+=pr
return x
def hipergeometrica(self,M,K,n):
print "Aun no implementado"
#f(x)=f(x-1)\frac{(K-x+1)(n-x+1)}{x(M-K-n+x)}
pass
def poisson(self,l):
u=random()
pr=exp(-1*l)
F=pr
x=0
while u>F:
pr=pr*l/(x+1)
F+=pr
x+=1
return x
def geometrica(self,p):
return int(log(random())/log(1-p)) + 1
def binomial_neg(self,p,r):
u=random()
q=1-p
pr=p**r
F=pr
x=r
while u>F:
pr=pr*x*q/(x+1-r)
F+=pr
x+=1
return x
def binomial_neg2(self,p,r):
suma=0
for i in range(10):
suma+=self.geometrica(p)
return suma
| apache-2.0 |
eehlers/QuantLibXL | make_zip.py | 1 | 9636 |
import sys
import os
import shutil
import datetime
import glob
import zipfile
import argparse
import re
QLXL = "QuantLibXL"
VERSION = "1.8.0"
VERSION_ = "1_8_0"
#VC_VERSION = "vc120"
VC_VERSION = "vc90"
QLXL_VERSION = QLXL + "-" + VERSION
ROOT_DIR = QLXL_VERSION + "\\"
class ZipFile:
root = None
zipFile = None
def __init__(self, path, root):
self.root = root
self.zipFile = zipfile.ZipFile(path, "w", zipfile.ZIP_DEFLATED)
def __del__(self):
self.zipFile.close()
def zip(self, sourcePath, targetPath = None):
print sourcePath
if targetPath is None:
targetPath = self.root + sourcePath
self.zipFile.write(sourcePath, targetPath)
def zipGlob(self, path, excludeFiles = None):
for fileName in glob.glob(path):
if excludeFiles is not None:
for r in excludeFiles:
if r.match(fileName):
continue
print fileName
self.zip(fileName)
class Selector:
zipFile = None
inputPath = None
incDirs = None
excDirs = None
incFiles = None
excFiles = None
def __init__(self, zipFile, inputPath, incDirs=None, excDirs=None, incFiles=None, excFiles=None):
self.zipFile = zipFile
self.inputPath = inputPath
self.incDirs = incDirs
self.excDirs = excDirs
self.incFiles = incFiles
self.excFiles = excFiles
self.process()
def process(self):
for root, dirs, files in os.walk(self.inputPath):
root += "\\"
for d in reversed(dirs):
if self.excludeDir(d):
dirs.remove(d)
continue
for f in files:
if self.includeFile(f):
self.zipFile.zip(root + f)
def excludeDir(self, d):
if self.excDirs is not None:
for r in self.excDirs:
if r.match(d):
return True
if self.incDirs is None:
return False
else:
for r in self.incDirs:
if r.match(d):
return False
return True
def includeFile(self, f):
if self.excFiles is not None:
for r in self.excFiles:
if r.match(f):
return False
if self.incFiles is None:
return True
else:
for r in self.incFiles:
if r.match(f):
return True
return False
def prompt_exit(msg='', status=0):
if msg:
print msg
if sys.platform == 'win32':
raw_input('press any key to exit')
sys.exit(status)
#DELETEME
def visit(params, dirname, names):
zfile = params[0]
exclude = params[1]
strip = params[2]
if strip:
rootDir = dirname[len(strip):]
else:
rootDir = dirname
for name in names:
if exclude == name: continue
sourcePath = dirname + "/" + name
targetPath = rootDir + "/" + name
zfile.write(sourcePath, ROOT_DIR + targetPath)
def makeZipStatic():
zipFilePath = "zip/%s-%s.zip" % (QLXL_VERSION, "RateCurveFramework")
zfile = zipfile.ZipFile(zipFilePath, "w", zipfile.ZIP_DEFLATED)
# Zip up some specific files from the QuantLibXL directory.
zfile.write("Docs/QuantLibXL-docs-" + VERSION + ".chm", ROOT_DIR + "Docs/QuantLibXL-docs-" + VERSION + ".chm")
zfile.write("xll/QuantLibXL-" + VC_VERSION + "-mt-s-" + VERSION_ + ".xll", ROOT_DIR + "xll/QuantLibXL-" + VC_VERSION + "-mt-s-" + VERSION_ + ".xll")
zfile.write("zip/README.txt", ROOT_DIR + "README.txt")
# Recursively zip some subdirectories of the QuantLibXL directory.
#os.path.walk("Data", visit, (zfile, ".gitignore", None))
os.path.walk("Data2/XLS", visit, (zfile, ".gitignore", None))
os.path.walk("framework", visit, (zfile, "ReadMe.txt", None))
#os.path.walk("Workbooks", visit, (zfile, None, None))
# Zip up some files from other projects in the repo.
os.path.walk("../QuantLibAddin/gensrc/metadata", visit, (zfile, None, "../QuantLibAddin/gensrc/"))
zfile.write("../XL-Launcher/bin/Addin/Launcher.xla", ROOT_DIR + "Launcher.xla")
for fileName in glob.glob("../XL-Launcher/bin/Addin/session_file.*-s-*.xml"):
baseName = os.path.basename(fileName)
if -1 != baseName.find("-dev") or -1 != baseName.find("-x64"): continue
zfile.write("../XL-Launcher/bin/Addin/" + baseName, ROOT_DIR + baseName)
for fileName in glob.glob("../XL-Launcher/bin/Addin/session_file.*-s-*.bat"):
baseName = os.path.basename(fileName)
if -1 != baseName.find("-dev") or -1 != baseName.find("-x64"): continue
zfile.write("../XL-Launcher/bin/Addin/" + baseName, ROOT_DIR + baseName)
zfile.close()
def makeZipStaticX64():
zipFilePath = "zip/%s-%s-%s.zip" % (QLXL_VERSION, "x64", "RateCurveFramework")
zfile = zipfile.ZipFile(zipFilePath, "w", zipfile.ZIP_DEFLATED)
# Zip up some specific files from the QuantLibXL directory.
zfile.write("Docs/QuantLibXL-docs-" + VERSION + ".chm", ROOT_DIR + "Docs/QuantLibXL-docs-" + VERSION + ".chm")
zfile.write("xll/QuantLibXL-" + VC_VERSION + "-x64-mt-s-" + VERSION_ + ".xll", ROOT_DIR + "xll/QuantLibXL-" + VC_VERSION + "-x64-mt-s-" + VERSION_ + ".xll")
zfile.write("zip/README.txt", ROOT_DIR + "README.txt")
# Recursively zip some subdirectories of the QuantLibXL directory.
#os.path.walk("Data", visit, (zfile, ".gitignore", None))
os.path.walk("Data2/XLS", visit, (zfile, ".gitignore", None))
os.path.walk("framework", visit, (zfile, "ReadMe.txt", None))
#os.path.walk("Workbooks", visit, (zfile, None, None))
# Zip up some files from other projects in the repo.
os.path.walk("../QuantLibAddin/gensrc/metadata", visit, (zfile, None, "../QuantLibAddin/gensrc/"))
zfile.write("../XL-Launcher/bin/Addin/Launcher.xla", ROOT_DIR + "Launcher.xla")
zfile.write("../XL-Launcher/bin/Addin/README.txt", ROOT_DIR + "README-session_files.txt")
for fileName in glob.glob("../XL-Launcher/bin/Addin/session_file.*x64-s-*.xml"):
baseName = os.path.basename(fileName)
if -1 != baseName.find("-dev"): continue
zfile.write("../XL-Launcher/bin/Addin/" + baseName, ROOT_DIR + baseName)
for fileName in glob.glob("../XL-Launcher/bin/Addin/session_file.*x64-s-*.bat"):
baseName = os.path.basename(fileName)
if -1 != baseName.find("-dev"): continue
zfile.write("../XL-Launcher/bin/Addin/" + baseName, ROOT_DIR + baseName)
zfile.close()
def zipBinaryFiles(zipFile):
#zipFile.zip("zip\\README.txt", zipFile.root + "README.txt")
zipFile.zip("xll\\QuantLibXL-" + VC_VERSION + "-mt-s-" + VERSION_ + ".xll")
zipFile.zip("xll\\QuantLibXL-" + VC_VERSION + "-x64-mt-s-" + VERSION_ + ".xll")
#zipFile.zip("Docs\\QuantLibXL-docs-" + VERSION + ".chm")
Selector(
inputPath = 'Workbooks',
zipFile = zipFile,
incFiles = (
re.compile('^.*\.TXT$'),
re.compile('^.*\.bat$'),
re.compile('^.*\.xlsm$'),
re.compile('^.*\.xlam$'),
re.compile('^.*\.xlsx$'),),
)
def zipFrameworkFiles(zipFile):
zipFile.zip("../XL-Launcher/bin/Addin/Launcher.xla", zipFile.root + "Launcher.xla")
zipFile.zip("../XL-Launcher/bin/Addin/session_file.public.live.xml", zipFile.root + "session_file.xml")
zipFile.zip("../XL-Launcher/bin/Addin/session_file.public.live.bat", zipFile.root + "session_file.public.live.bat")
zipFile.zip("../XL-Launcher/bin/Addin/session_file.public.live.xml", zipFile.root + "session_file.public.live.xml")
Selector(
inputPath = 'Data2',
zipFile = zipFile,
)
Selector(
inputPath = 'framework',
zipFile = zipFile,
)
def zipSourceFiles(zipFile):
zipFile.zipGlob("*.sln")
#zipFile.zip("Docs\\Makefile.vc")
#zipFile.zip("Docs\\quantlibxl.doxy")
#zipFile.zipGlob("Docs\\*.css")
#zipFile.zipGlob("Docs\\*.html")
#zipFile.zipGlob("Docs\\*.vcproj")
#zipFile.zipGlob("Docs\\*.vcxproj")
#zipFile.zipGlob("Docs\\images\\*.bmp")
#zipFile.zipGlob("Docs\\images\\*.ico")
#zipFile.zipGlob("Docs\\images\\*.jpg")
#zipFile.zipGlob("Docs\\images\\*.png")
#zipFile.zipGlob("Docs\\pages\\*.docs")
Selector(
inputPath = 'qlxl',
zipFile = zipFile,
excDirs = (
re.compile('^build.*'),),
excFiles = (
re.compile('^.gitignore$'),
re.compile('^Makefile.am$'),
re.compile('^.*\.user$'),
re.compile('^.*\.filters$')),
)
def makeZipBinary():
zipFile = ZipFile("zip/" + QLXL_VERSION + "-bin.zip", ROOT_DIR)
zipBinaryFiles(zipFile)
def makeZipFramework():
zipFile = ZipFile("zip/" + QLXL_VERSION + "-framework.zip", ROOT_DIR)
zipBinaryFiles(zipFile)
zipFrameworkFiles(zipFile)
def makeZipSource():
zipFile = ZipFile("zip/" + QLXL_VERSION + ".zip", QLXL + "\\")
zipSourceFiles(zipFile)
parser = argparse.ArgumentParser(description='zip up QuantLibXL')
parser.add_argument('-t','--target', help='target environment', required=True)
args = vars(parser.parse_args())
if 'binary' == args['target']:
makeZipBinary()
elif 'framework' == args['target']:
makeZipFramework()
elif 'source' == args['target']:
makeZipSource()
elif 'static' == args['target']:
makeZipStatic()
elif 'staticX64' == args['target']:
makeZipStaticX64()
else:
print "Error - unsupported target : " + args['target']
raw_input('press any key to exit')
| bsd-3-clause |
jmhsi/justin_tinker | lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py | 327 | 4608 | import types
import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
if response.chunked:
super_update_chunk_length = response._update_chunk_length
def _update_chunk_length(self):
super_update_chunk_length()
if self.chunk_left == 0:
self._fp._close()
response._update_chunk_length = types.MethodType(_update_chunk_length, response)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
| apache-2.0 |
abhikumar22/MYBLOG | blg/Lib/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| gpl-3.0 |
yfried/ansible | lib/ansible/modules/cloud/vmware/vmware_dvs_portgroup.py | 17 | 18284 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2017-2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_dvs_portgroup
short_description: Create or remove a Distributed vSwitch portgroup.
description:
- Create or remove a Distributed vSwitch portgroup.
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
notes:
- Tested on vSphere 5.5
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
portgroup_name:
description:
- The name of the portgroup that is to be created or deleted.
required: True
switch_name:
description:
- The name of the distributed vSwitch the port group should be created on.
required: True
vlan_id:
description:
- The VLAN ID that should be configured with the portgroup, use 0 for no VLAN.
- 'If C(vlan_trunk) is configured to be I(true), this can be a range, example: 1-4094.'
required: True
num_ports:
description:
- The number of ports the portgroup should contain.
required: True
portgroup_type:
description:
- See VMware KB 1022312 regarding portgroup types.
required: True
choices:
- 'earlyBinding'
- 'lateBinding'
- 'ephemeral'
state:
description:
- Determines if the portgroup should be present or not.
required: True
type: bool
choices:
- 'present'
- 'absent'
version_added: '2.5'
vlan_trunk:
description:
- Indicates whether this is a VLAN trunk or not.
required: False
default: False
type: bool
version_added: '2.5'
network_policy:
description:
- Dictionary which configures the different security values for portgroup.
- 'Valid attributes are:'
- '- C(promiscuous) (bool): indicates whether promiscuous mode is allowed. (default: false)'
- '- C(forged_transmits) (bool): indicates whether forged transmits are allowed. (default: false)'
- '- C(mac_changes) (bool): indicates whether mac changes are allowed. (default: false)'
required: False
version_added: '2.5'
default: {
promiscuous: False,
forged_transmits: False,
mac_changes: False,
}
teaming_policy:
description:
- Dictionary which configures the different teaming values for portgroup.
- 'Valid attributes are:'
- '- C(load_balance_policy) (string): Network adapter teaming policy. (default: loadbalance_srcid)'
- ' - choices: [ loadbalance_ip, loadbalance_srcmac, loadbalance_srcid, loadbalance_loadbased, failover_explicit]'
- ' - "loadbalance_loadbased" is available from version 2.6 and onwards'
- '- C(inbound_policy) (bool): Indicate whether or not the teaming policy is applied to inbound frames as well. (default: False)'
- '- C(notify_switches) (bool): Indicate whether or not to notify the physical switch if a link fails. (default: True)'
- '- C(rolling_order) (bool): Indicate whether or not to use a rolling policy when restoring links. (default: False)'
required: False
version_added: '2.5'
default: {
'notify_switches': True,
'load_balance_policy': 'loadbalance_srcid',
'inbound_policy': False,
'rolling_order': False
}
port_policy:
description:
- Dictionary which configures the advanced policy settings for the portgroup.
- 'Valid attributes are:'
- '- C(block_override) (bool): indicates if the block policy can be changed per port. (default: true)'
- '- C(ipfix_override) (bool): indicates if the ipfix policy can be changed per port. (default: false)'
- '- C(live_port_move) (bool): indicates if a live port can be moved in or out of the portgroup. (default: false)'
- '- C(network_rp_override) (bool): indicates if the network resource pool can be changed per port. (default: false)'
- '- C(port_config_reset_at_disconnect) (bool): indicates if the configuration of a port is reset automatically after disconnect. (default: true)'
- '- C(security_override) (bool): indicates if the security policy can be changed per port. (default: false)'
- '- C(shaping_override) (bool): indicates if the shaping policy can be changed per port. (default: false)'
- '- C(traffic_filter_override) (bool): indicates if the traffic filter can be changed per port. (default: false)'
- '- C(uplink_teaming_override) (bool): indicates if the uplink teaming policy can be changed per port. (default: false)'
- '- C(vendor_config_override) (bool): indicates if the vendor config can be changed per port. (default: false)'
- '- C(vlan_override) (bool): indicates if the vlan can be changed per port. (default: false)'
required: False
version_added: '2.5'
default: {
'traffic_filter_override': False,
'network_rp_override': False,
'live_port_move': False,
'security_override': False,
'vendor_config_override': False,
'port_config_reset_at_disconnect': True,
'uplink_teaming_override': False,
'block_override': True,
'shaping_override': False,
'vlan_override': False,
'ipfix_override': False
}
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create vlan portgroup
vmware_dvs_portgroup:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
portgroup_name: vlan-123-portrgoup
switch_name: dvSwitch
vlan_id: 123
num_ports: 120
portgroup_type: earlyBinding
state: present
delegate_to: localhost
- name: Create vlan trunk portgroup
vmware_dvs_portgroup:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
portgroup_name: vlan-trunk-portrgoup
switch_name: dvSwitch
vlan_id: 1-1000
vlan_trunk: True
num_ports: 120
portgroup_type: earlyBinding
state: present
delegate_to: localhost
- name: Create no-vlan portgroup
vmware_dvs_portgroup:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
portgroup_name: no-vlan-portrgoup
switch_name: dvSwitch
vlan_id: 0
num_ports: 120
portgroup_type: earlyBinding
state: present
delegate_to: localhost
- name: Create vlan portgroup with all security and port policies
vmware_dvs_portgroup:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
portgroup_name: vlan-123-portrgoup
switch_name: dvSwitch
vlan_id: 123
num_ports: 120
portgroup_type: earlyBinding
state: present
network_policy:
promiscuous: yes
forged_transmits: yes
mac_changes: yes
port_policy:
block_override: yes
ipfix_override: yes
live_port_move: yes
network_rp_override: yes
port_config_reset_at_disconnect: yes
security_override: yes
shaping_override: yes
traffic_filter_override: yes
uplink_teaming_override: yes
vendor_config_override: yes
vlan_override: yes
delegate_to: localhost
'''
try:
from pyVmomi import vim, vmodl
except ImportError as e:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_dvspg_by_name,
vmware_argument_spec, wait_for_task)
class VMwareDvsPortgroup(PyVmomi):
def __init__(self, module):
super(VMwareDvsPortgroup, self).__init__(module)
self.dvs_portgroup = None
self.dv_switch = None
def process_state(self):
dvspg_states = {
'absent': {
'present': self.state_destroy_dvspg,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvspg,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvspg,
}
}
try:
dvspg_states[self.module.params['state']][self.check_dvspg_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_port_group(self):
config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
# Basic config
config.name = self.module.params['portgroup_name']
config.numPorts = self.module.params['num_ports']
# Default port config
config.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
if self.module.params['vlan_trunk']:
config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
vlan_id_start, vlan_id_end = self.module.params['vlan_id'].split('-')
config.defaultPortConfig.vlan.vlanId = [vim.NumericRange(start=int(vlan_id_start.strip()), end=int(vlan_id_end.strip()))]
else:
config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
config.defaultPortConfig.vlan.vlanId = int(self.module.params['vlan_id'])
config.defaultPortConfig.vlan.inherited = False
config.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
config.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=self.module.params['network_policy']['promiscuous'])
config.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=self.module.params['network_policy']['forged_transmits'])
config.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=self.module.params['network_policy']['mac_changes'])
# Teaming Policy
teamingPolicy = vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy()
teamingPolicy.policy = vim.StringPolicy(value=self.module.params['teaming_policy']['load_balance_policy'])
teamingPolicy.reversePolicy = vim.BoolPolicy(value=self.module.params['teaming_policy']['inbound_policy'])
teamingPolicy.notifySwitches = vim.BoolPolicy(value=self.module.params['teaming_policy']['notify_switches'])
teamingPolicy.rollingOrder = vim.BoolPolicy(value=self.module.params['teaming_policy']['rolling_order'])
config.defaultPortConfig.uplinkTeamingPolicy = teamingPolicy
# PG policy (advanced_policy)
config.policy = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy()
config.policy.blockOverrideAllowed = self.module.params['port_policy']['block_override']
config.policy.ipfixOverrideAllowed = self.module.params['port_policy']['ipfix_override']
config.policy.livePortMovingAllowed = self.module.params['port_policy']['live_port_move']
config.policy.networkResourcePoolOverrideAllowed = self.module.params['port_policy']['network_rp_override']
config.policy.portConfigResetAtDisconnect = self.module.params['port_policy']['port_config_reset_at_disconnect']
config.policy.securityPolicyOverrideAllowed = self.module.params['port_policy']['security_override']
config.policy.shapingOverrideAllowed = self.module.params['port_policy']['shaping_override']
config.policy.trafficFilterOverrideAllowed = self.module.params['port_policy']['traffic_filter_override']
config.policy.uplinkTeamingOverrideAllowed = self.module.params['port_policy']['uplink_teaming_override']
config.policy.vendorConfigOverrideAllowed = self.module.params['port_policy']['vendor_config_override']
config.policy.vlanOverrideAllowed = self.module.params['port_policy']['vlan_override']
# PG Type
config.type = self.module.params['portgroup_type']
task = self.dv_switch.AddDVPortgroup_Task([config])
changed, result = wait_for_task(task)
return changed, result
def state_destroy_dvspg(self):
changed = True
result = None
if not self.module.check_mode:
task = self.dvs_portgroup.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_dvspg(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvspg(self):
changed = True
result = None
if not self.module.check_mode:
changed, result = self.create_port_group()
self.module.exit_json(changed=changed, result=str(result))
def check_dvspg_state(self):
self.dv_switch = find_dvs_by_name(self.content, self.module.params['switch_name'])
if self.dv_switch is None:
self.module.fail_json(msg="A distributed virtual switch with name %s does not exist" % self.module.params['switch_name'])
self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.module.params['portgroup_name'])
if self.dvs_portgroup is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
portgroup_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
vlan_id=dict(required=True, type='str'),
num_ports=dict(required=True, type='int'),
portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
vlan_trunk=dict(type='bool', default=False),
network_policy=dict(
type='dict',
options=dict(
promiscuous=dict(type='bool', default=False),
forged_transmits=dict(type='bool', default=False),
mac_changes=dict(type='bool', default=False)
),
default=dict(
promiscuous=False,
forged_transmits=False,
mac_changes=False
)
),
teaming_policy=dict(
type='dict',
options=dict(
inbound_policy=dict(type='bool', default=False),
notify_switches=dict(type='bool', default=True),
rolling_order=dict(type='bool', default=False),
load_balance_policy=dict(type='str',
default='loadbalance_srcid',
choices=[
'loadbalance_ip',
'loadbalance_srcmac',
'loadbalance_srcid',
'loadbalance_loadbased',
'failover_explicit',
],
)
),
default=dict(
inbound_policy=False,
notify_switches=True,
rolling_order=False,
load_balance_policy='loadbalance_srcid',
),
),
port_policy=dict(
type='dict',
options=dict(
block_override=dict(type='bool', default=True),
ipfix_override=dict(type='bool', default=False),
live_port_move=dict(type='bool', default=False),
network_rp_override=dict(type='bool', default=False),
port_config_reset_at_disconnect=dict(type='bool', default=True),
security_override=dict(type='bool', default=False),
shaping_override=dict(type='bool', default=False),
traffic_filter_override=dict(type='bool', default=False),
uplink_teaming_override=dict(type='bool', default=False),
vendor_config_override=dict(type='bool', default=False),
vlan_override=dict(type='bool', default=False)
),
default=dict(
block_override=True,
ipfix_override=False,
live_port_move=False,
network_rp_override=False,
port_config_reset_at_disconnect=True,
security_override=False,
shaping_override=False,
traffic_filter_override=False,
uplink_teaming_override=False,
vendor_config_override=False,
vlan_override=False
)
)
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
vmware_dvs_portgroup = VMwareDvsPortgroup(module)
vmware_dvs_portgroup.process_state()
if __name__ == '__main__':
main()
| gpl-3.0 |
gkarlin/django-jenkins | build/Django/tests/modeltests/test_client/views.py | 54 | 8058 | try:
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
from xml.dom.minidom import parseString
from django.contrib.auth.decorators import login_required, permission_required
from django.core import mail
from django.forms import fields
from django.forms.forms import Form
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from django.shortcuts import render_to_response
from django.template import Context, Template
from django.utils.decorators import method_decorator
def get_view(request):
"A simple view that expects a GET request, and returns a rendered template"
t = Template('This is a test. {{ var }} is the value.', name='GET Template')
c = Context({'var': request.GET.get('var', 42)})
return HttpResponse(t.render(c))
def post_view(request):
"""A view that expects a POST, and returns a different template depending
on whether any POST data is available
"""
if request.method == 'POST':
if request.POST:
t = Template('Data received: {{ data }} is the value.', name='POST Template')
c = Context({'data': request.POST['value']})
else:
t = Template('Viewing POST page.', name='Empty POST Template')
c = Context()
else:
t = Template('Viewing GET page.', name='Empty GET Template')
c = Context()
return HttpResponse(t.render(c))
def view_with_header(request):
"A view that has a custom header"
response = HttpResponse()
response['X-DJANGO-TEST'] = 'Slartibartfast'
return response
def raw_post_view(request):
"""A view which expects raw XML to be posted and returns content extracted
from the XML"""
if request.method == 'POST':
root = parseString(request.body)
first_book = root.firstChild.firstChild
title, author = [n.firstChild.nodeValue for n in first_book.childNodes]
t = Template("{{ title }} - {{ author }}", name="Book template")
c = Context({"title": title, "author": author})
else:
t = Template("GET request.", name="Book GET template")
c = Context()
return HttpResponse(t.render(c))
def redirect_view(request):
"A view that redirects all requests to the GET view"
if request.GET:
query = '?' + urlencode(request.GET, True)
else:
query = ''
return HttpResponseRedirect('/test_client/get_view/' + query)
def view_with_secure(request):
"A view that indicates if the request was secure"
response = HttpResponse()
response.test_was_secure_request = request.is_secure()
return response
def double_redirect_view(request):
"A view that redirects all requests to a redirection view"
return HttpResponseRedirect('/test_client/permanent_redirect_view/')
def bad_view(request):
"A view that returns a 404 with some error content"
return HttpResponseNotFound('Not found!. This page contains some MAGIC content')
TestChoices = (
('a', 'First Choice'),
('b', 'Second Choice'),
('c', 'Third Choice'),
('d', 'Fourth Choice'),
('e', 'Fifth Choice')
)
class TestForm(Form):
text = fields.CharField()
email = fields.EmailField()
value = fields.IntegerField()
single = fields.ChoiceField(choices=TestChoices)
multi = fields.MultipleChoiceField(choices=TestChoices)
def form_view(request):
"A view that tests a simple form"
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
t = Template('Valid POST data.', name='Valid POST Template')
c = Context()
else:
t = Template('Invalid POST data. {{ form.errors }}', name='Invalid POST Template')
c = Context({'form': form})
else:
form = TestForm(request.GET)
t = Template('Viewing base form. {{ form }}.', name='Form GET Template')
c = Context({'form': form})
return HttpResponse(t.render(c))
def form_view_with_template(request):
"A view that tests a simple form"
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
message = 'POST data OK'
else:
message = 'POST data has errors'
else:
form = TestForm()
message = 'GET form page'
return render_to_response('form_view.html',
{
'form': form,
'message': message
}
)
def login_protected_view(request):
"A simple view that is login protected."
t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
login_protected_view = login_required(login_protected_view)
def login_protected_view_changed_redirect(request):
"A simple view that is login protected with a custom redirect field set"
t = Template('This is a login protected test. Username is {{ user.username }}.', name='Login Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
login_protected_view_changed_redirect = login_required(redirect_field_name="redirect_to")(login_protected_view_changed_redirect)
def _permission_protected_view(request):
"A simple view that is permission protected."
t = Template('This is a permission protected test. '
'Username is {{ user.username }}. '
'Permissions are {{ user.get_all_permissions }}.' ,
name='Permissions Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
permission_protected_view = permission_required('modeltests.test_perm')(_permission_protected_view)
permission_protected_view_exception = permission_required('modeltests.test_perm', raise_exception=True)(_permission_protected_view)
class _ViewManager(object):
@method_decorator(login_required)
def login_protected_view(self, request):
t = Template('This is a login protected test using a method. '
'Username is {{ user.username }}.',
name='Login Method Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
@method_decorator(permission_required('modeltests.test_perm'))
def permission_protected_view(self, request):
t = Template('This is a permission protected test using a method. '
'Username is {{ user.username }}. '
'Permissions are {{ user.get_all_permissions }}.' ,
name='Permissions Template')
c = Context({'user': request.user})
return HttpResponse(t.render(c))
_view_manager = _ViewManager()
login_protected_method_view = _view_manager.login_protected_view
permission_protected_method_view = _view_manager.permission_protected_view
def session_view(request):
"A view that modifies the session"
request.session['tobacconist'] = 'hovercraft'
t = Template('This is a view that modifies the session.',
name='Session Modifying View Template')
c = Context()
return HttpResponse(t.render(c))
def broken_view(request):
"""A view which just raises an exception, simulating a broken view."""
raise KeyError("Oops! Looks like you wrote some bad code.")
def mail_sending_view(request):
mail.EmailMessage(
"Test message",
"This is a test email",
"from@example.com",
['first@example.com', 'second@example.com']).send()
return HttpResponse("Mail sent")
def mass_mail_sending_view(request):
m1 = mail.EmailMessage(
'First Test message',
'This is the first test email',
'from@example.com',
['first@example.com', 'second@example.com'])
m2 = mail.EmailMessage(
'Second Test message',
'This is the second test email',
'from@example.com',
['second@example.com', 'third@example.com'])
c = mail.get_connection()
c.send_messages([m1,m2])
return HttpResponse("Mail sent")
| lgpl-3.0 |
nirvn/QGIS | tests/src/python/test_qgsmaplayer.py | 5 | 3434 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMapLayer
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '1/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsReadWriteContext,
QgsVectorLayer,
QgsProject)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtXml import QDomDocument
start_app()
class TestQgsMapLayer(unittest.TestCase):
def testUniqueId(self):
"""
Test that layers created quickly with same name get a unique ID
"""
# make 1000 layers quickly
layers = []
for i in range(1000):
layer = QgsVectorLayer(
'Point?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
layers.append(layer)
# make sure all ids are unique
ids = set()
for l in layers:
self.assertFalse(l.id() in ids)
ids.add(l.id())
def copyLayerViaXmlReadWrite(self, source, dest):
# write to xml
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(source.writeLayerXml(elem, doc, QgsReadWriteContext()))
self.assertTrue(dest.readLayerXml(elem, QgsReadWriteContext()), QgsProject.instance())
def testGettersSetters(self):
# test auto refresh getters/setters
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
self.assertFalse(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 0)
layer.setAutoRefreshInterval(5)
self.assertFalse(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 5)
layer.setAutoRefreshEnabled(True)
self.assertTrue(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 5)
layer.setAutoRefreshInterval(0) # should disable auto refresh
self.assertFalse(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 0)
def testSaveRestoreAutoRefresh(self):
""" test saving/restoring auto refresh to xml """
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertFalse(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 0)
layer.setAutoRefreshInterval(56)
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertFalse(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 56)
layer.setAutoRefreshEnabled(True)
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertTrue(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 56)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
stricaud/dionaea | modules/python/scripts/log.py | 11 | 1650 | #********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2009 Paul Baecher & Markus Koetter
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact nepenthesdev@gmail.com
#*
#*******************************************************************************/
from dionaea.core import *
import logging
global handler
global logger
class DionaeaLogHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self, logging.DEBUG)
def emit(self,record):
dlhfn(record.name, record.levelno, record.pathname, record.lineno, record.msg)
def start():
pass
def new():
global logger
global handler
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
handler = DionaeaLogHandler()
logger.addHandler(handler)
def stop():
global logger
global handler
logger.removeHandler(handler)
| gpl-2.0 |
ticklemepierce/osf.io | scripts/tests/test_migrate_metadata.py | 35 | 5155 | from nose.tools import * # noqa
from tests.base import OsfTestCase
import json
from modularodm import Q
from framework.mongo.utils import to_mongo
from website.project.model import ensure_schemas, MetaSchema, Node
from tests import factories
from scripts.migration.migrate_registered_meta import (
main as do_migration,
prepare_nodes
)
SCHEMA_NAMES = [
'Open-Ended Registration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
'Replication Recipe (Brandt et al., 2013): Post-Completion',
'Confirmatory - General'
]
OLD_META = {
'Open-Ended Registration': {
'summary': 'some airy',
},
'OSF-Standard Pre-Data Collection Registration': {
'comments': 'Standard',
'datacompletion': 'Yes',
'looked': 'Yes',
},
'Replication Recipe (Brandt et al., 2013): Pre-Registration': {
'item1': 'Ver',
'item10': 'yes',
'item11': 'fas',
'item12': 'afs',
'item13': 'fsa',
'item14': 'fsa',
'item15': 'fsa',
'item16': 'sf',
'item17': 'Exact',
'item18': 'Different',
'item19': 'Different',
'item2': 'vsf',
'item20': 'Different',
'item21': 'Close',
'item22': 'Exact',
'item23': 'Exact',
'item24': 'fsasf',
'item25': 'asfsfa',
'item26': 'safasf',
'item27': 'asf',
'item28': 'fassf',
'item3': 'fafa',
'item4': 'fsafds',
'item5': 'fafa',
'item6': 'asdfsadf',
'item7': 'sfsaf',
'item8': 'sfdsdf',
'item9': 'sfd',
},
'Replication Recipe (Brandt et al., 2013): Post-Completion': {
'item29': 'adad',
'item30': 'asd',
'item31': 'asd',
'item32': 'not significantly different from the original effect size',
'item33': 'informative failure to replicate',
'item34': 'asdasd',
'item35': 'ds',
'item36': 'ads',
'item37': 'das',
},
'Confirmatory - General': {
'comments': 'Standard',
'datacompletion': 'Yes',
'looked': 'Yes',
}
}
class TestMigrateSchemas(OsfTestCase):
def _make_registration(self, schemas):
if not isinstance(schemas, list):
schemas = [schemas]
reg = factories.RegistrationFactory()
reg.save()
self.db['node'].update(
{'_id': reg._id},
{
'$set': {
'registered_meta': {
to_mongo(schema.name): json.dumps(OLD_META[schema.name])
for schema in schemas
},
'registered_schema': None
}
}
)
def setUp(self):
super(TestMigrateSchemas, self).setUp()
MetaSchema.remove()
ensure_schemas()
self.regular_old_node = factories.NodeFactory()
self.open_ended_schema = MetaSchema.find_one(
Q('name', 'eq', SCHEMA_NAMES[0]) &
Q('schema_version', 'eq', 1)
)
self.open_ended = self._make_registration(self.open_ended_schema)
self.standard_schema = MetaSchema.find_one(
Q('name', 'eq', SCHEMA_NAMES[1]) &
Q('schema_version', 'eq', 1)
)
self.standard = self._make_registration(self.standard_schema)
self.brandt_pre_schema = MetaSchema.find_one(
Q('name', 'eq', SCHEMA_NAMES[2]) &
Q('schema_version', 'eq', 1)
)
self.brandt_pre = self._make_registration(self.brandt_pre_schema)
self.brandt_post_schema = MetaSchema.find_one(
Q('name', 'eq', SCHEMA_NAMES[3]) &
Q('schema_version', 'eq', 1)
)
self.brandt_post = self._make_registration(self.brandt_post_schema)
self.multiple = self._make_registration([
self.brandt_pre_schema,
self.brandt_post_schema
])
self.confirmatory_schema = MetaSchema.find_one(
Q('name', 'eq', 'Confirmatory - General')
)
self.confirmatory = self._make_registration(self.confirmatory_schema)
self.db['node'].update({}, {'$set': {'registered_schema': None}}, multi=True)
def tearDown(self):
super(TestMigrateSchemas, self).tearDown()
self.db['node'].remove()
def test_prepare_nodes(self):
prepare_nodes(self.db)
for node in self.db['node'].find():
assert_equal(node['registered_schema'], [])
def test_migrate_registration_schemas(self):
target_nodes = self.db['node'].find({'is_registration': True})
do_migration(_db=self.db)
for node in target_nodes:
for meta_schema_id in node['registered_schema']:
meta_schema = MetaSchema.load(meta_schema_id)
old_data = OLD_META[meta_schema.name]
for key, value in old_data.iteritems():
assert_equal(
node['registered_meta'][meta_schema._id][key]['value'],
value
)
| apache-2.0 |
nonZero/OpenCommunity | src/communities/south_migrations/0001_initial.py | 3 | 7084 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Community'
db.create_table(u'communities_community', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('upcoming_meeting_started', self.gf('django.db.models.fields.BooleanField')(default=False)),
('upcoming_meeting_scheduled_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('upcoming_meeting_location', self.gf('django.db.models.fields.CharField')(max_length=300, null=True, blank=True)),
('upcoming_meeting_comments', self.gf('ocd.base_models.HTMLField')(null=True, blank=True)),
('upcoming_meeting_guests', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('upcoming_meeting_version', self.gf('django.db.models.fields.IntegerField')(default=0)),
('upcoming_meeting_is_published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('upcoming_meeting_published_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('upcoming_meeting_summary', self.gf('ocd.base_models.HTMLField')(null=True, blank=True)),
('board_name', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
))
db.send_create_signal(u'communities', ['Community'])
# Adding M2M table for field upcoming_meeting_participants on 'Community'
db.create_table(u'communities_community_upcoming_meeting_participants', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('community', models.ForeignKey(orm[u'communities.community'], null=False)),
('ocuser', models.ForeignKey(orm[u'users.ocuser'], null=False))
))
db.create_unique(u'communities_community_upcoming_meeting_participants', ['community_id', 'ocuser_id'])
def backwards(self, orm):
# Deleting model 'Community'
db.delete_table(u'communities_community')
# Removing M2M table for field upcoming_meeting_participants on 'Community'
db.delete_table('communities_community_upcoming_meeting_participants')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'communities.community': {
'Meta': {'object_name': 'Community'},
'board_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'upcoming_meeting_comments': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_guests': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_location': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'upcoming_meeting_participants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'+'", 'blank': 'True', 'to': u"orm['users.OCUser']"}),
'upcoming_meeting_published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_summary': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_version': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.ocuser': {
'Meta': {'object_name': 'OCUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
}
}
complete_apps = ['communities'] | bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.