hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0967e8321a3aebd0cdc22c04c4cca5451f40b6cc | 16,824 | py | Python | subsdownloader2/src/SourceCode/chardet/langhungarianmodel.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | subsdownloader2/src/SourceCode/chardet/langhungarianmodel.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | subsdownloader2/src/SourceCode/chardet/langhungarianmodel.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10
253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52, 253, 253, 253, 253, 253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11, 253, 253, 253, 253, 253,
159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190,
191, 192, 193, 194, 195, 196, 197, 75, 198, 199, 200, 201, 202, 203, 204, 205,
79, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
221, 51, 81, 222, 78, 223, 224, 225, 226, 44, 227, 228, 229, 61, 230, 231,
232, 233, 234, 58, 235, 66, 59, 236, 237, 238, 60, 69, 63, 239, 240, 241,
82, 14, 74, 242, 70, 80, 243, 72, 244, 15, 83, 77, 84, 30, 76, 85,
245, 246, 247, 25, 73, 42, 24, 248, 249, 250, 31, 56, 29, 251, 252, 253,
)
win1250HungarianCharToOrderMap = (
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10
253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20
252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52, 253, 253, 253, 253, 253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11, 253, 253, 253, 253, 253,
161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
177, 178, 179, 180, 78, 181, 69, 182, 183, 184, 185, 186, 187, 188, 189, 190,
191, 192, 193, 194, 195, 196, 197, 76, 198, 199, 200, 201, 202, 203, 204, 205,
81, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
221, 51, 83, 222, 80, 223, 224, 225, 226, 44, 227, 228, 229, 61, 230, 231,
232, 233, 234, 58, 235, 66, 59, 236, 237, 238, 60, 70, 63, 239, 240, 241,
84, 14, 75, 242, 71, 82, 243, 73, 244, 15, 85, 79, 86, 30, 77, 87,
245, 246, 247, 25, 74, 42, 24, 248, 249, 250, 31, 56, 29, 251, 252, 253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2, 2, 1, 2,
3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 3, 2, 3, 3, 1, 1, 3, 3, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0,
3, 2, 1, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 1, 1, 3, 2, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 2, 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 2, 2, 0, 3, 2, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 2, 3, 3, 3, 3, 3, 2, 3, 3, 2, 2, 3, 2, 3, 2, 0, 3, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 2, 3, 3, 3, 1, 2, 3, 2, 2, 3, 1, 2, 3, 3, 2, 2, 0, 3, 3, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 2, 3, 3, 3, 3, 0, 2, 3, 2,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 3, 3, 2, 1, 3, 2, 2, 3, 2, 1, 3, 2, 2, 1, 0, 3, 3, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 2, 2, 3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 3, 1, 2, 1, 3, 3, 3, 3, 2, 2, 3, 1, 1, 3, 2, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 1, 3, 3, 3, 3, 3, 2, 2, 1, 3, 3, 3, 0, 1, 1, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 2, 3, 3, 3, 2, 0, 3, 2, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0,
3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 2, 3, 2, 3, 3, 3, 1, 3, 2, 2, 2, 3, 1, 1, 3, 3, 1, 1, 0, 3, 3, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 2, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 3, 1, 2, 3, 2, 2, 0, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 2, 2, 2, 3, 1, 3, 3, 2, 2, 1, 3, 3, 3, 1, 1, 3, 1, 2, 3, 2, 3, 2, 2, 2, 1, 0, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
3, 1, 1, 3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 3, 1, 2, 1, 3, 3, 3, 2, 2, 3, 2, 1, 0, 3, 2, 0, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 1, 1, 3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 3, 1, 1, 0, 3, 3, 3, 3, 0, 2, 3, 0, 0, 2, 1, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 0, 1, 2, 3, 2, 3, 2, 2, 3, 2, 1, 2, 0, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 2, 1, 2, 3, 3, 2, 2, 2, 3, 2, 3, 3, 1, 3, 3, 1, 1, 0, 2, 3, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 1, 2, 2, 2, 2, 3, 3, 3, 1, 1, 1, 3, 3, 1, 1, 3, 1, 1, 3, 2, 1, 2, 3, 1, 1, 0, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 2, 1, 2, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 2, 2, 1, 2, 1, 1, 2, 2, 1, 1, 0, 2, 2, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 1, 1, 2, 1, 1, 3, 3, 1, 0, 1, 1, 3, 3, 2, 0, 1, 1, 2, 3, 1, 0, 2, 2, 1, 0, 0, 1, 3, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 2, 1, 3, 3, 3, 3, 3, 1, 2, 3, 2, 3, 3, 2, 1, 1, 3, 2, 3, 2, 1, 2, 2, 0, 1, 2, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
3, 3, 3, 3, 2, 2, 2, 2, 3, 1, 2, 2, 1, 1, 3, 3, 0, 3, 2, 1, 2, 3, 2, 1, 3, 3, 1, 1, 0, 2, 1, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 3, 3, 2, 2, 2, 3, 2, 3, 3, 3, 2, 1, 1, 3, 3, 1, 1, 1, 2, 2, 3, 2, 3, 2, 2, 2, 1, 0, 2, 2, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 3, 3, 3, 3, 3, 0, 0, 3, 3, 2, 3, 0, 0, 0, 2, 3, 3, 1, 0, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 1, 2, 3, 3, 3, 3, 3, 1, 2, 3, 3, 2, 2, 1, 1, 0, 3, 3, 2, 2, 1, 2, 2, 1, 0, 2, 2, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 3, 2, 2, 1, 3, 1, 2, 3, 3, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 3, 2, 1, 1, 1, 1, 2, 1, 0, 1, 2, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 3, 3, 1, 1, 1, 1, 1, 3, 3, 3, 0, 1, 1, 3, 3, 1, 1, 1, 1, 1, 2, 2, 0, 3, 1, 1, 2, 0, 2, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
3, 1, 0, 1, 2, 1, 2, 2, 0, 1, 2, 3, 1, 2, 0, 0, 0, 2, 1, 1, 1, 1, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0,
1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 0, 2, 0, 2, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0,
1, 1, 1, 2, 3, 2, 3, 3, 0, 1, 2, 2, 3, 1, 0, 1, 0, 2, 1, 2, 2, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 3, 3, 2, 2, 1, 0, 0, 3, 2, 3, 2, 0, 0, 0, 1, 1, 3, 0, 0, 1, 1, 0, 0, 2, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 1, 1, 2, 2, 3, 3, 1, 0, 1, 3, 2, 3, 1, 1, 1, 0, 1, 1, 1, 1, 1, 3, 1, 0, 0, 2, 2, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 1, 1, 1, 2, 2, 2, 1, 0, 1, 2, 3, 3, 2, 0, 0, 0, 2, 1, 1, 1, 2, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0,
1, 2, 2, 2, 2, 2, 1, 1, 1, 2, 0, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1,
3, 2, 2, 1, 0, 0, 1, 1, 2, 2, 0, 3, 0, 1, 2, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 2, 1, 1, 1,
2, 2, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
2, 3, 3, 0, 1, 0, 0, 0, 3, 3, 1, 0, 0, 1, 2, 2, 1, 0, 0, 0, 0, 2, 0, 0, 1, 1, 1, 0, 2, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 2, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1,
2, 3, 3, 0, 1, 0, 0, 0, 2, 2, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 2, 0, 1, 0,
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 1, 1, 1, 1, 0, 1,
3, 2, 2, 0, 1, 0, 1, 0, 2, 3, 2, 0, 0, 1, 2, 2, 1, 0, 0, 1, 1, 1, 0, 0, 2, 1, 0, 1, 2, 2, 1, 1,
2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 0, 2, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 2, 1, 1, 0, 1,
2, 2, 2, 0, 0, 1, 0, 0, 2, 2, 1, 1, 0, 0, 2, 1, 1, 0, 0, 0, 1, 2, 0, 0, 2, 1, 0, 0, 2, 1, 1, 1,
2, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 2, 2, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 2, 3, 0, 0, 0, 1, 0, 3, 2, 1, 0, 0, 1, 2, 1, 1, 0, 0, 0, 0, 2, 1, 0, 1, 1, 0, 0, 2, 1, 2, 1,
1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 2, 0, 0, 1, 0, 0, 0, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
3, 0, 0, 2, 1, 2, 2, 1, 0, 0, 2, 1, 2, 2, 0, 0, 0, 2, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 2, 0, 0, 0,
1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1,
1, 3, 2, 0, 0, 0, 1, 0, 2, 2, 2, 0, 0, 0, 2, 2, 1, 0, 0, 0, 0, 3, 1, 1, 1, 1, 0, 0, 2, 1, 1, 1,
2, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 2, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1,
2, 3, 2, 0, 0, 0, 1, 0, 2, 2, 0, 0, 0, 0, 2, 1, 1, 0, 0, 0, 0, 2, 1, 0, 1, 1, 0, 0, 2, 1, 1, 0,
2, 1, 1, 1, 1, 2, 1, 2, 1, 2, 0, 1, 1, 1, 0, 2, 1, 1, 1, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1,
3, 1, 1, 2, 2, 2, 3, 2, 1, 1, 2, 2, 1, 1, 0, 1, 0, 2, 2, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2, 2, 1, 0, 0, 0, 1, 1, 0, 0, 1, 2, 0, 0, 2, 1, 1, 1,
2, 2, 1, 1, 1, 2, 1, 2, 1, 1, 0, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 0, 1, 2, 1, 1, 1, 0, 1,
1, 0, 0, 1, 2, 3, 2, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0,
1, 2, 1, 2, 1, 2, 1, 1, 1, 2, 0, 2, 1, 1, 1, 0, 1, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 3, 2, 0, 0, 0, 0, 0, 1, 1, 2, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 2, 0, 0, 1, 1, 0, 0, 2, 1, 1, 1,
2, 1, 1, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1,
1, 2, 2, 0, 1, 1, 1, 0, 2, 2, 2, 0, 0, 0, 3, 2, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0,
1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1,
2, 1, 0, 2, 1, 1, 2, 2, 1, 1, 2, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0,
1, 2, 2, 2, 2, 2, 1, 1, 1, 2, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0,
1, 2, 3, 0, 0, 0, 1, 0, 2, 2, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 0, 1, 0,
2, 1, 1, 1, 1, 1, 0, 2, 0, 0, 0, 1, 2, 1, 1, 1, 1, 0, 1, 2, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1,
2, 2, 2, 0, 0, 0, 1, 0, 2, 1, 2, 0, 0, 0, 1, 1, 2, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 2, 1, 0, 1,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 1, 1, 1, 1, 0, 1,
1, 2, 2, 0, 0, 0, 1, 0, 2, 2, 2, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 2, 0, 0, 1, 1, 1, 0, 1,
1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1,
1, 0, 0, 1, 0, 1, 2, 1, 0, 0, 1, 1, 1, 2, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0,
0, 2, 1, 2, 1, 1, 1, 1, 1, 2, 0, 2, 0, 1, 1, 0, 1, 2, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0,
2, 1, 1, 0, 1, 2, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 2, 1, 0, 1,
2, 2, 1, 1, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1,
1, 2, 2, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0, 1,
2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1,
1, 1, 2, 0, 0, 3, 1, 0, 2, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0,
1, 2, 1, 0, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0,
2, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1,
2, 1, 1, 1, 2, 1, 1, 1, 0, 1, 1, 2, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0,
1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0,
2, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 2, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': constants.True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': constants.True,
'charsetName': "windows-1250"
}
| 74.442478 | 95 | 0.4153 |
a25abe3e613d649f188d0c2834ebb72b4ca819da | 1,150 | py | Python | app/portal/horizon/openstack_dashboard/dashboards/project/cgroups/tabs.py | haoshen61/f5-adcaas-openstack | 4bda29271930bf7c621f4184bda8d43b2fa96336 | [
"Apache-2.0"
] | 37 | 2018-10-30T02:47:24.000Z | 2021-12-04T10:29:40.000Z | app/portal/horizon/openstack_dashboard/dashboards/project/cgroups/tabs.py | haoshen61/f5-adcaas-openstack | 4bda29271930bf7c621f4184bda8d43b2fa96336 | [
"Apache-2.0"
] | 106 | 2019-01-18T03:06:55.000Z | 2019-11-29T05:06:18.000Z | app/portal/horizon/openstack_dashboard/dashboards/project/cgroups/tabs.py | haoshen61/f5-adcaas-openstack | 4bda29271930bf7c621f4184bda8d43b2fa96336 | [
"Apache-2.0"
] | 35 | 2018-11-26T03:36:31.000Z | 2021-12-04T10:29:41.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = ("project/cgroups/_detail_overview.html")
def get_context_data(self, request):
cgroup = self.tab_group.kwargs['cgroup']
return {"cgroup": cgroup}
def get_redirect_url(self):
return reverse('horizon:project:cgroups:index')
class CGroupsDetailTabs(tabs.TabGroup):
slug = "cgroup_details"
tabs = (OverviewTab,)
| 32.857143 | 78 | 0.71913 |
29a2c4d98e3b82b8e7fd5150aff506199d09ea0b | 3,445 | py | Python | stdplugins/PKarbonRGB_2.py | Deepumad77/CartoonBot | eff24f40043e41feadc70a63fa655353c1783ee0 | [
"Apache-2.0"
] | null | null | null | stdplugins/PKarbonRGB_2.py | Deepumad77/CartoonBot | eff24f40043e41feadc70a63fa655353c1783ee0 | [
"Apache-2.0"
] | 2 | 2020-05-22T14:59:05.000Z | 2020-05-28T12:04:02.000Z | stdplugins/PKarbonRGB_2.py | Deepumad77/CartoonBot | eff24f40043e41feadc70a63fa655353c1783ee0 | [
"Apache-2.0"
] | 3 | 2021-01-28T15:42:25.000Z | 2021-11-18T04:02:01.000Z | """Carbon Scraper Plugin for Userbot. //text in creative way.
usage: .rgbk2 //as a reply to any text message
Thanks to @r4v4n4 for vars,,, Random RGB feature by @PhycoNinja13b"""
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from telethon import events
from urllib.parse import quote_plus
from urllib.error import HTTPError
from time import sleep
import asyncio
import os
import random
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="rgbk2 ?(.*)", allow_sudo=True))
async def carbon_api(e):
RED = random.randint(0,256)
GREEN = random.randint(0,256)
BLUE = random.randint(0,256)
OPC = random.random()
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
""" A Wrapper for carbon.now.sh """
hmm = await e.reply("⬜⬜⬜⬜⬜")
CARBON = 'https://carbon.now.sh/?bg=rgba({R}%2C{G}%2C{B}%2C{O})&t=material&wt=none&l=auto&ds=false&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Fira%20Code&fs=14px&lh=152%25&si=false&es=2x&wm=false&code={code}'
CARBONLANG = "en"
textx = await e.get_reply_message()
pcode = e.text
if pcode[8:]:
pcode = str(pcode[8:])
elif textx:
pcode = str(textx.message) # Importing message to module
code = quote_plus(pcode) # Converting to urlencoded
url = CARBON.format(code=code, R=RED, G=GREEN, B=BLUE, O=OPC, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.GOOGLE_CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('--disable-gpu')
prefs = {'download.default_directory' : './'}
chrome_options.add_experimental_option('prefs', prefs)
await hmm.edit("⬛⬛⬜⬜⬜")
driver = webdriver.Chrome(executable_path=Config.CHROME_DRIVER, options=chrome_options)
driver.get(url)
download_path = './'
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_path}}
command_result = driver.execute("send_command", params)
driver.find_element_by_xpath('//*[@id="__next"]/main/div[3]/div[2]/div[1]/div[1]/div/span[2]').click()
driver.find_element_by_id("export-menu").click()
#driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
sleep(5) # this might take a bit.
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
sleep(5)
await hmm.edit("⬛⬛⬛⬜⬜")
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
sleep(5) #Waiting for downloading
await hmm.edit("⬛⬛⬛⬛⬛")
file = './carbon.png'
await hmm.edit("✅RGB Karbon 2.0 Completed, Uploading Karbon✅")
await e.client.send_file(
e.chat_id,
file,
caption="RGB Karbon 2.0 by [@PhycoNinja13b](https://github.com/Phyco-Ninja/UniNinja) \n **RGBA Colour Code** = `({r}, {g}, {b}, {o})`".format(r=RED,g=GREEN,b=BLUE,o=OPC),
force_document=False,
reply_to=e.message.reply_to_msg_id,
)
os.remove('./carbon.png')
# Removing carbon.png after uploading
await hmm.delete() # Deleting msg
| 42.530864 | 241 | 0.693179 |
f26e971a94e5bec49f551a5850dceb21ecde0aef | 6,582 | py | Python | vk_api/audio.py | deker104/vk_api | dffe3156717c5bf70594a8f10138e01d3b5420f4 | [
"Apache-2.0"
] | null | null | null | vk_api/audio.py | deker104/vk_api | dffe3156717c5bf70594a8f10138e01d3b5420f4 | [
"Apache-2.0"
] | null | null | null | vk_api/audio.py | deker104/vk_api | dffe3156717c5bf70594a8f10138e01d3b5420f4 | [
"Apache-2.0"
] | 1 | 2019-07-20T11:10:35.000Z | 2019-07-20T11:10:35.000Z | # -*- coding: utf-8 -*-
"""
:authors: python273
:contact: https://vk.com/python273
:license: Apache License, Version 2.0, see LICENSE file
:copyright: (c) 2018 python273
"""
import re
from bs4 import BeautifulSoup
from .audio_url_decoder import decode_audio_url
from .exceptions import AccessDenied
RE_AUDIO_ID = re.compile(r'audio(-?\d+)_(\d+)')
RE_ALBUM_ID = re.compile(r'act=audio_playlist(-?\d+)_(\d+)')
TRACKS_PER_USER_PAGE = 50
TRACKS_PER_ALBUM_PAGE = 100
ALBUMS_PER_USER_PAGE = 100
class VkAudio(object):
"""
Модуль для получения аудиозаписей без использования официального API.
:param vk: Объект :class:`VkApi`
"""
__slots__ = ('_vk', 'user_id')
def __init__(self, vk):
self.user_id = vk.method('users.get')[0]['id']
self._vk = vk
def get_iter(self, owner_id=None, album_id=None):
""" Получить список аудиозаписей пользователя (по частям)
:param owner_id: ID владельца (отрицательные значения для групп)
:param album_id: ID альбома
"""
if owner_id is None:
owner_id = self.user_id
if album_id is not None:
url = 'https://m.vk.com/audio?act=audio_playlist{}_{}'.format(
owner_id, album_id
)
offset_diff = TRACKS_PER_ALBUM_PAGE
else:
url = 'https://m.vk.com/audios{}'.format(owner_id)
offset_diff = TRACKS_PER_USER_PAGE
offset = 0
while True:
response = self._vk.http.get(
url,
params={
'offset': offset
},
allow_redirects=False
)
if not response.text:
raise AccessDenied(
'You don\'t have permissions to browse user\'s audio'
)
tracks = scrap_data(response.text, self.user_id)
if not tracks:
break
for i in tracks:
yield i
offset += offset_diff
def get(self, owner_id=None, album_id=None):
""" Получить список аудиозаписей пользователя
:param owner_id: ID владельца (отрицательные значения для групп)
:param album_id: ID альбома
"""
return list(self.get_iter(owner_id, album_id))
def get_albums_iter(self, owner_id=None):
""" Получить список альбомов пользователя (по частям)
:param owner_id: ID владельца (отрицательные значения для групп)
"""
if owner_id is None:
owner_id = self.user_id
offset = 0
while True:
response = self._vk.http.get(
'https://m.vk.com/audio?act=audio_playlists{}'.format(
owner_id
),
params={
'offset': offset
},
allow_redirects=False
)
if not response.text:
raise AccessDenied(
'You don\'t have permissions to browse {}\'s albums'.format(
owner_id
)
)
albums = scrap_albums(response.text)
if not albums:
break
for i in albums:
yield i
offset += ALBUMS_PER_USER_PAGE
def get_albums(self, owner_id=None):
""" Получить список альбомов пользователя
:param owner_id: ID владельца (отрицательные значения для групп)
"""
return list(self.get_albums_iter(owner_id))
def search_user(self, owner_id=None, q=''):
""" Искать по аудиозаписям пользователя
:param owner_id: ID владельца (отрицательные значения для групп)
:param q: запрос
"""
if owner_id is None:
owner_id = self.user_id
response = self._vk.http.get(
'https://m.vk.com/audio',
params={
'id': owner_id,
'q': q
},
allow_redirects=False
)
if not response.text:
raise AccessDenied(
'You don\'t have permissions to browse {}\'s audio'.format(
owner_id
)
)
return [
i for i in scrap_data(response.text, self.user_id)
if i['owner_id'] == owner_id
]
def search(self, q='', offset=0):
""" Искать аудиозаписи
:param q: запрос
:param offset: смещение
"""
response = self._vk.http.get(
'https://m.vk.com/audio',
params={
'act': 'search',
'q': q,
'offset': offset
}
)
return scrap_data(response.text, self.user_id)
def scrap_data(html, user_id):
""" Парсинг списка аудиозаписей из html страницы """
soup = BeautifulSoup(html, 'html.parser')
tracks = []
for audio in soup.find_all('div', {'class': 'audio_item'}):
if 'audio_item_disabled' in audio['class']:
continue
artist = audio.select_one('.ai_artist').text
title = audio.select_one('.ai_title').text
duration = int(audio.select_one('.ai_dur')['data-dur'])
full_id = tuple(
int(i) for i in RE_AUDIO_ID.search(audio['id']).groups()
)
link = audio.select_one('.ai_body').input['value']
if 'audio_api_unavailable' in link:
link = decode_audio_url(link, user_id)
tracks.append({
'id': full_id[1],
'owner_id': full_id[0],
'url': link,
'artist': artist,
'title': title,
'duration': duration,
})
return tracks
def scrap_albums(html):
""" Парсинг списка альбомов из html страницы """
soup = BeautifulSoup(html, 'html.parser')
albums = []
for album in soup.find_all('div', {'class': 'audioPlaylistsPage__item'}):
link = album.select_one('.audioPlaylistsPage__itemLink')['href']
full_id = tuple(int(i) for i in RE_ALBUM_ID.search(link).groups())
stats_text = album.select_one('.audioPlaylistsPage__stats').text
plays = int(stats_text.split(maxsplit=1)[0])
albums.append({
'id': full_id[1],
'owner_id': full_id[0],
'url': 'https://m.vk.com/audio?act=audio_playlist{}_{}'.format(
*full_id
),
'title': album.select_one('.audioPlaylistsPage__title').text,
'plays': plays
})
return albums
| 26.433735 | 80 | 0.538438 |
81529226102191fb9696865cfdb664de15adaf8e | 2,032 | py | Python | patrace/python/patracetools/patracetools/change_msaa_samples.py | ylz-at/patrace | b42ba9189616dc6c1599e40a702a90bb974e23f7 | [
"MIT"
] | 1 | 2019-10-13T16:57:52.000Z | 2019-10-13T16:57:52.000Z | patrace/python/patracetools/patracetools/change_msaa_samples.py | xiangruipuzhao/patrace | db444184177277b23e1de93fd320493628aae850 | [
"MIT"
] | null | null | null | patrace/python/patracetools/patracetools/change_msaa_samples.py | xiangruipuzhao/patrace | db444184177277b23e1de93fd320493628aae850 | [
"MIT"
] | 1 | 2021-04-15T17:34:35.000Z | 2021-04-15T17:34:35.000Z | #!/usr/bin/env python2
import argparse
import json
try:
from patrace import InputFile, OutputFile
except ImportError:
print 'patrace (Python interface of PATrace SDK) is required.'
def change_msaa_samples(input, output, samples):
header = json.loads(input.jsonHeader)
output.jsonHeader = json.dumps(header)
current_fbo = -1
modified_fbos = set()
for call in input.Calls():
if call.name == 'glBindFramebuffer':
current_fbo = call.args[1].asInt
elif call.name == 'glRenderbufferStorageMultisampleEXT':
original_samples = call.args[1].asInt
call.args[1].asInt = samples
modified_fbos.add(current_fbo)
print 'fbo: ' + str(current_fbo)
print 'original samples: ' + str(original_samples)
print 'new samples: ' + str(samples)
print '---'
elif call.name == 'glFramebufferTexture2DMultisampleEXT':
original_samples = call.args[5].asInt
call.args[5].asInt = samples
modified_fbos.add(current_fbo)
print 'fbo: ' + str(current_fbo)
print 'original samples: ' + str(original_samples)
print 'new samples: ' + str(samples)
print '---'
output.WriteCall(call)
return modified_fbos
def main():
parser = argparse.ArgumentParser(description='Remap thread ids in a .pat trace')
parser.add_argument('file', help='Path to the .pat trace file')
parser.add_argument('--newfile', default='trace_out.pat', help="Specifies the path to the output trace file")
parser.add_argument('--samples', type=int, default=0, help="Only functions with this name will be remapped", choices=[0, 2, 4, 8, 16])
args = parser.parse_args()
with InputFile(args.file) as input:
with OutputFile(args.newfile) as output:
modified_fbos = change_msaa_samples(input, output, args.samples)
print 'Modified FBOs: ' + str(modified_fbos)
if __name__ == '__main__':
main()
| 34.440678 | 139 | 0.641732 |
4e78e1ed314e89bd90bf7a73014ac7b3a5027b98 | 12,680 | py | Python | dataset/my_dataset_nocs.py | pairlab/6pack | 47ea2dbfc48dbe239d8a52517c7e46345b7b57b8 | [
"MIT"
] | 4 | 2020-05-13T11:42:29.000Z | 2021-02-26T18:19:57.000Z | dataset/my_dataset_nocs.py | pairlab/6pack | 47ea2dbfc48dbe239d8a52517c7e46345b7b57b8 | [
"MIT"
] | null | null | null | dataset/my_dataset_nocs.py | pairlab/6pack | 47ea2dbfc48dbe239d8a52517c7e46345b7b57b8 | [
"MIT"
] | 1 | 2021-02-26T18:19:58.000Z | 2021-02-26T18:19:58.000Z | import os
import os.path
import torch
import numpy as np
import torchvision.transforms as transforms
from transformations import euler_matrix
from torch.autograd import Variable
import argparse
import time
import random
import numpy.ma as ma
import copy
import math
import scipy.misc
import scipy.io as scio
import cv2
from PIL import Image
class Dataset():
def __init__(self, num_pts, img_dir, img_num):
self.num_pts = num_pts
self. img_num = img_num
self.cam_cx = 321.24099379
self.cam_cy = 237.11014479
self.cam_fx = 537.99040688
self.cam_fy = 539.09122804
self.cam_scale = 1.0
self.xmap = np.array([[j for i in range(640)] for j in range(480)])
self.ymap = np.array([[i for i in range(640)] for j in range(480)])
self.color = np.array([[255, 69, 0], [124, 252, 0], [0, 238, 238], [238, 238, 0], [155, 48, 255], [0, 0, 238], [255, 131, 250], [189, 183, 107], [165, 42, 42], [0, 234, 0]])
self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.bbox = [[0.0, 0.0, 0.0] for k in range(8)]
self.choose_obj = ''
self.index = 0
self.video_id = ''
self.img_dir = img_dir
def add_bbox(self, bbox):
self.bbox = np.array(bbox)
def divide_scale(self, scale, pts):
pts[:, 0] = pts[:, 0] / scale[0]
pts[:, 1] = pts[:, 1] / scale[1]
pts[:, 2] = pts[:, 2] / scale[2]
return pts
def get_anchor_box(self, ori_bbox):
bbox = ori_bbox
limit = np.array(search_fit(bbox))
num_per_axis = 5
gap_max = num_per_axis - 1
gap_x = (limit[1] - limit[0]) / float(gap_max)
gap_y = (limit[3] - limit[2]) / float(gap_max)
gap_z = (limit[5] - limit[4]) / float(gap_max)
ans = []
scale = [max(limit[1], -limit[0]), max(limit[3], -limit[2]), max(limit[5], -limit[4])]
for i in range(0, num_per_axis):
for j in range(0, num_per_axis):
for k in range(0, num_per_axis):
ans.append([limit[0] + i * gap_x, limit[2] + j * gap_y, limit[4] + k * gap_z])
ans = np.array(ans)
scale = np.array(scale)
ans = self.divide_scale(scale, ans)
return ans, scale
def change_to_scale(self, scale, cloud_fr):
cloud_fr = self.divide_scale(scale, cloud_fr)
return cloud_fr
def re_scale(self, target_fr, target_to):
ans_scale = target_fr / target_to
ans_target = target_fr
ans_scale = ans_scale[0][0]
return ans_target, ans_scale
def enlarge_bbox(self, target):
limit = np.array(search_fit(target))
longest = max(limit[1]-limit[0], limit[3]-limit[2], limit[5]-limit[4])
longest = longest * 1.3
scale1 = longest / (limit[1]-limit[0])
scale2 = longest / (limit[3]-limit[2])
scale3 = longest / (limit[5]-limit[4])
target[:, 0] *= scale1
target[:, 1] *= scale2
target[:, 2] *= scale3
return target
def load_depth(self, depth_path):
depth = cv2.imread(depth_path, -1)
if len(depth.shape) == 3:
depth16 = np.uint16(depth[:, :, 1]*256) + np.uint16(depth[:, :, 2])
depth16 = depth16.astype(np.uint16)
elif len(depth.shape) == 2 and depth.dtype == 'uint16':
depth16 = depth
else:
assert False, '[ Error ]: Unsupported depth type.'
return depth16
def getone(self, img_dir, depth_dir, current_r, current_t):
img = Image.open(img_dir)
depth = self.load_depth(depth_dir)
target_r = current_r
target_t = current_t
cam_cx = self.cam_cx
cam_cy = self.cam_cy
cam_fx = self.cam_fx
cam_fy = self.cam_fy
cam_scale = self.cam_scale
target = self.bbox
target = self.enlarge_bbox(copy.deepcopy(target))
target_tmp = np.dot(target, target_r.T) + target_t
target_tmp[:, 0] *= -1.0
target_tmp[:, 1] *= -1.0
rmin, rmax, cmin, cmax = get_2dbbox(target_tmp, cam_cx, cam_cy, cam_fx, cam_fy, cam_scale)
limit = search_fit(target)
img = np.transpose(np.array(img)[:, :, :3], (2, 0, 1))[:, rmin:rmax, cmin:cmax]
img = img / 255.0
depth = depth[rmin:rmax, cmin:cmax]
choose = (depth.flatten() > -10000.0).nonzero()[0]
depth_masked = depth.flatten()[choose][:, np.newaxis].astype(np.float32)
xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
pt2 = depth_masked / cam_scale
pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
cloud = np.concatenate((-pt0, -pt1, pt2), axis=1)
cloud = np.dot(cloud - target_t, target_r)
choose_temp = (cloud[:, 0] > limit[0]) * (cloud[:, 0] < limit[1]) * (cloud[:, 1] > limit[2]) * (cloud[:, 1] < limit[3]) * (cloud[:, 2] > limit[4]) * (cloud[:, 2] < limit[5])
choose = ((depth.flatten() != 0.0) * choose_temp).nonzero()[0]
if len(choose) == 0:
choose = np.array([0])
if len(choose) > self.num_pts:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:self.num_pts] = 1
np.random.shuffle(c_mask)
choose = choose[c_mask.nonzero()]
else:
choose = np.pad(choose, (0, self.num_pts - len(choose)), 'wrap')
depth_masked = depth.flatten()[choose][:, np.newaxis].astype(np.float32)
xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
pt2 = depth_masked / cam_scale
pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
cloud = np.concatenate((-pt0, -pt1, pt2), axis=1)
choose = np.array([choose])
cloud = np.dot(cloud - target_t, target_r)
cloud = cloud / 1000.0
target = target / 1000.0
anchor_box, scale = self.get_anchor_box(target)
cloud_fr = self.change_to_scale(scale, cloud)
return self.norm(torch.from_numpy(img.astype(np.float32))).unsqueeze(0), \
torch.LongTensor(choose.astype(np.int32)).unsqueeze(0), \
torch.from_numpy(cloud.astype(np.float32)).unsqueeze(0), \
torch.from_numpy(current_r.astype(np.float32)), \
torch.from_numpy(current_t.astype(np.float32)), \
torch.from_numpy(anchor_box.astype(np.float32)).unsqueeze(0), \
torch.from_numpy(scale.astype(np.float32)).unsqueeze(0)
def get_pose(self, pose_fn):
rot_mat = []
with open(pose_fn, 'r') as f:
lines = f.readlines()
trans = [float(i) for i in lines[0].split(' ') if i.strip()]
for j in range(3):
rot_mat.append([float(i) for i in lines[j+1].split(' ') if i.strip()])
return np.asarray(trans), np.asarray(rot_mat)
def __getitem__(self, index):
dir_index = random.randint(0,self.img_num)
item_dir = os.path.join(self.img_dir, str(dir_index))
img_fr_fn = os.path.join(item_dir, "color_fr.png")
depth_fr_fn = os.path.join(item_dir, "depth_fr.png")
img_to_fn = os.path.join(item_dir, "color_to.png")
depth_to_fn = os.path.join(item_dir, "depth_to.png")
base_t = np.zeros(3)
base_r = np.identity(3)
pose_fn = os.path.join(item_dir, "pose.txt")
current_r, current_t = self.get_pose(pose_fn)
img_fr, choose_fr, cloud_fr, r_fr, t_fr, anchor, scale = self.getone(img_fr_fn, depth_fr_fn, base_r, base_t)
img_to, choose_to, cloud_to, r_to, t_to, anchor, scale = self.getone(img_to_fn, depth_to_fn, current_r, current_t)
return img_fr, choose_fr, cloud_fr, r_fr, t_fr, img_to, choose_to, cloud_to, r_to, t_to, anchor, scale
def build_frame(self, min_x, max_x, min_y, max_y, min_z, max_z):
bbox = []
for i in np.arange(min_x, max_x, 1.0):
bbox.append([i, min_y, min_z])
for i in np.arange(min_x, max_x, 1.0):
bbox.append([i, min_y, max_z])
for i in np.arange(min_x, max_x, 1.0):
bbox.append([i, max_y, min_z])
for i in np.arange(min_x, max_x, 1.0):
bbox.append([i, max_y, max_z])
for i in np.arange(min_y, max_y, 1.0):
bbox.append([min_x, i, min_z])
for i in np.arange(min_y, max_y, 1.0):
bbox.append([min_x, i, max_z])
for i in np.arange(min_y, max_y, 1.0):
bbox.append([max_x, i, min_z])
for i in np.arange(min_y, max_y, 1.0):
bbox.append([max_x, i, max_z])
for i in np.arange(min_z, max_z, 1.0):
bbox.append([min_x, min_y, i])
for i in np.arange(min_z, max_z, 1.0):
bbox.append([min_x, max_y, i])
for i in np.arange(min_z, max_z, 1.0):
bbox.append([max_x, min_y, i])
for i in np.arange(min_z, max_z, 1.0):
bbox.append([max_x, max_y, i])
bbox = np.array(bbox)
return bbox
def projection(self, img_dir, current_r, current_t):
img = np.array(Image.open(img_dir))
cam_cx = self.cam_cx
cam_cy = self.cam_cy
cam_fx = self.cam_fx
cam_fy = self.cam_fy
cam_scale = self.cam_scale
target_r = current_r
target_t = current_t
target = self.bbox
limit = search_fit(target)
bbox = self.build_frame(limit[0], limit[1], limit[2], limit[3], limit[4], limit[5])
bbox = np.dot(bbox, target_r.T) + target_t
bbox[:, 0] *= -1.0
bbox[:, 1] *= -1.0
fw = open('results/{0}_pose.txt'.format(self.index), 'w')
for it in target_r:
fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
it = target_t
fw.write('{0} {1} {2}\n'.format(it[0], it[1], it[2]))
fw.close()
for tg in bbox:
y = int(tg[0] * cam_fx / tg[2] + cam_cx)
x = int(tg[1] * cam_fy / tg[2] + cam_cy)
if x - 3 < 0 or x + 3 > 479 or y - 3 < 0 or y + 3 > 639:
continue
for xxx in range(x-2, x+3):
for yyy in range(y-2, y+3):
img[xxx][yyy] = self.color[1]
scipy.misc.imsave('results/{0}.png'.format(self.index), img)
self.index += 1
border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
img_width = 480
img_length = 640
def get_2dbbox(cloud, cam_cx, cam_cy, cam_fx, cam_fy, cam_scale):
rmin = 10000
rmax = -10000
cmin = 10000
cmax = -10000
for tg in cloud:
p1 = int(tg[0] * cam_fx / tg[2] + cam_cx)
p0 = int(tg[1] * cam_fy / tg[2] + cam_cy)
if p0 < rmin:
rmin = p0
if p0 > rmax:
rmax = p0
if p1 < cmin:
cmin = p1
if p1 > cmax:
cmax = p1
rmax += 1
cmax += 1
if rmin < 0:
rmin = 0
if cmin < 0:
cmin = 0
if rmax >= 480:
rmax = 479
if cmax >= 640:
cmax = 639
r_b = rmax - rmin
for tt in range(len(border_list)):
if r_b > border_list[tt] and r_b < border_list[tt + 1]:
r_b = border_list[tt + 1]
break
c_b = cmax - cmin
for tt in range(len(border_list)):
if c_b > border_list[tt] and c_b < border_list[tt + 1]:
c_b = border_list[tt + 1]
break
center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)]
rmin = center[0] - int(r_b / 2)
rmax = center[0] + int(r_b / 2)
cmin = center[1] - int(c_b / 2)
cmax = center[1] + int(c_b / 2)
if rmin < 0:
delt = -rmin
rmin = 0
rmax += delt
if cmin < 0:
delt = -cmin
cmin = 0
cmax += delt
if rmax > img_width:
delt = rmax - img_width
rmax = img_width
rmin -= delt
if cmax > img_length:
delt = cmax - img_length
cmax = img_length
cmin -= delt
return rmin, rmax, cmin, cmax
def search_fit(points):
min_x = min(points[:, 0])
max_x = max(points[:, 0])
min_y = min(points[:, 1])
max_y = max(points[:, 1])
min_z = min(points[:, 2])
max_z = max(points[:, 2])
return [min_x, max_x, min_y, max_y, min_z, max_z]
| 32.764858 | 181 | 0.555836 |
b5c84caf2073e9d8b0d4bd1b7961c722e47701df | 963 | py | Python | setup.py | datature/hub | d3abe283887a7e56df9270aa5508834a329e5b80 | [
"MIT"
] | 6 | 2021-04-22T09:25:40.000Z | 2021-08-02T13:49:06.000Z | setup.py | datature/hub | d3abe283887a7e56df9270aa5508834a329e5b80 | [
"MIT"
] | 1 | 2021-06-04T04:19:38.000Z | 2021-06-04T04:19:38.000Z | setup.py | datature/hub | d3abe283887a7e56df9270aa5508834a329e5b80 | [
"MIT"
] | null | null | null | import setuptools
with open("longdescription.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="datature-hub",
version="0.2.1",
author="Ian Duncan",
author_email="ian@datature.io",
description="Loader for models trained on the Datature platform",
include_package_data=True,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/datature/hub",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
python_requires=">=3.6, <3.9",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
],
install_requires=[
"tensorflow==2.3.0",
"requests>=2.25.1",
"opencv-python==4.5.1.48",
"numpy>=1.16.0,<1.19.0",
"Pillow~=8.2",
],
)
| 29.181818 | 69 | 0.619938 |
4e1aef45e4a8ecef861e449e17b1874814408074 | 36,487 | py | Python | thermo/joback.py | tedhyu/thermo | 1966c7cba5a603984b49f22c97ff00a144d90812 | [
"MIT"
] | 1 | 2021-03-05T23:39:47.000Z | 2021-03-05T23:39:47.000Z | thermo/joback.py | tedhyu/thermo | 1966c7cba5a603984b49f22c97ff00a144d90812 | [
"MIT"
] | 1 | 2021-12-17T21:28:17.000Z | 2021-12-17T21:28:17.000Z | thermo/joback.py | tedhyu/thermo | 1966c7cba5a603984b49f22c97ff00a144d90812 | [
"MIT"
] | 1 | 2022-01-18T16:14:59.000Z | 2022-01-18T16:14:59.000Z | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2017, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['smarts_fragment', 'Joback', 'J_BIGGS_JOBACK_SMARTS',
'J_BIGGS_JOBACK_SMARTS_id_dict']
from collections import namedtuple, Counter
from pprint import pprint
from thermo.utils import to_num, horner, exp
try:
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit.Chem import rdMolDescriptors
hasRDKit = True
except:
# pragma: no cover
hasRDKit = False
rdkit_missing = 'RDKit is not installed; it is required to use this functionality'
J_BIGGS_JOBACK_SMARTS = [["Methyl","-CH3", "[CX4H3]"],
["Secondary acyclic", "-CH2-", "[!R;CX4H2]"],
["Tertiary acyclic",">CH-", "[!R;CX4H]"],
["Quaternary acyclic", ">C<", "[!R;CX4H0]"],
["Primary alkene", "=CH2", "[CX3H2]"],
["Secondary alkene acyclic", "=CH-", "[!R;CX3H1;!$([CX3H1](=O))]"],
["Tertiary alkene acyclic", "=C<", "[$([!R;#6X3H0]);!$([!R;#6X3H0]=[#8])]"],
["Cumulative alkene", "=C=", "[$([CX2H0](=*)=*)]"],
["Terminal alkyne", u"≡CH","[$([CX2H1]#[!#7])]"],
["Internal alkyne",u"≡C-","[$([CX2H0]#[!#7])]"],
["Secondary cyclic", "-CH2- (ring)", "[R;CX4H2]"],
["Tertiary cyclic", ">CH- (ring)", "[R;CX4H]"],
["Quaternary cyclic", ">C< (ring)", "[R;CX4H0]"],
["Secondary alkene cyclic", "=CH- (ring)", "[R;CX3H1,cX3H1]"],
["Tertiary alkene cyclic", "=C< (ring)","[$([R;#6X3H0]);!$([R;#6X3H0]=[#8])]"],
["Fluoro", "-F", "[F]"],
["Chloro", "-Cl", "[Cl]"],
["Bromo", "-Br", "[Br]"],
["Iodo", "-I", "[I]"],
["Alcohol","-OH (alcohol)", "[OX2H;!$([OX2H]-[#6]=[O]);!$([OX2H]-a)]"],
["Phenol","-OH (phenol)", "[$([OX2H]-a)]"],
["Ether acyclic", "-O- (nonring)", "[OX2H0;!R;!$([OX2H0]-[#6]=[#8])]"],
["Ether cyclic", "-O- (ring)", "[#8X2H0;R;!$([#8X2H0]~[#6]=[#8])]"],
["Carbonyl acyclic", ">C=O (nonring)","[$([CX3H0](=[OX1]));!$([CX3](=[OX1])-[OX2]);!R]=O"],
["Carbonyl cyclic", ">C=O (ring)","[$([#6X3H0](=[OX1]));!$([#6X3](=[#8X1])~[#8X2]);R]=O"],
["Aldehyde","O=CH- (aldehyde)","[CX3H1](=O)"],
["Carboxylic acid", "-COOH (acid)", "[OX2H]-[C]=O"],
["Ester", "-COO- (ester)", "[#6X3H0;!$([#6X3H0](~O)(~O)(~O))](=[#8X1])[#8X2H0]"],
["Oxygen double bond other", "=O (other than above)","[OX1H0;!$([OX1H0]~[#6X3]);!$([OX1H0]~[#7X3]~[#8])]"],
["Primary amino","-NH2", "[NX3H2]"],
["Secondary amino acyclic",">NH (nonring)", "[NX3H1;!R]"],
["Secondary amino cyclic",">NH (ring)", "[#7X3H1;R]"],
["Tertiary amino", ">N- (nonring)","[#7X3H0;!$([#7](~O)~O)]"],
["Imine acyclic","-N= (nonring)","[#7X2H0;!R]"],
["Imine cyclic","-N= (ring)","[#7X2H0;R]"],
["Aldimine", "=NH", "[#7X2H1]"],
["Cyano", "-CN","[#6X2]#[#7X1H0]"],
["Nitro", "-NO2", "[$([#7X3,#7X3+][!#8])](=[O])~[O-]"],
["Thiol", "-SH", "[SX2H]"],
["Thioether acyclic", "-S- (nonring)", "[#16X2H0;!R]"],
["Thioether cyclic", "-S- (ring)", "[#16X2H0;R]"]]
J_BIGGS_JOBACK_SMARTS_id_dict = {i+1: j[2] for i, j in enumerate(J_BIGGS_JOBACK_SMARTS)}
J_BIGGS_JOBACK_SMARTS_str_dict = {i[1]: i[2] for i in J_BIGGS_JOBACK_SMARTS}
# Shi Chenyang's JRGUI code indicates he left the following list of smarts in
# favor of those above by J Biggs
SHI_CHENYANG_JOBACK_SMARTS = [
("-CH3", "[CH3;A;X4;!R]"),
("-CH2-", "[CH2;A;X4;!R]"),
(">CH-", "[CH1;A;X4;!R]"),
(">C<", "[CH0;A;X4;!R]"),
("=CH2", "[CH2;A;X3;!R]"),
("=CH-", "[CH1;A;X3;!R]"),
("=C<", "[CH0;A;X3;!R]"),
("=C=", "[$([CH0;A;X2;!R](=*)=*)]"),
("≡CH", "[$([CH1;A;X2;!R]#*)]"),
("≡C-", "[$([CH0;A;X2;!R]#*)]"),
("-CH2- (ring)", "[CH2;A;X4;R]"),
(">CH- (ring)", "[CH1;A;X4;R]"),
(">C< (ring)", "[CH0;A;X4;R]"),
("=CH- (ring)", "[CH1;X3;R]"),
("=C< (ring)", "[CH0;X3;R]"),
("-F", "[F]"),
("-Cl", "[Cl]"),
("-Br", "[Br]"),
("-I", "[I]"),
("-OH (alcohol)", "[O;H1;$(O-!@[C;!$(C=!@[O,N,S])])]"),
("-OH (phenol)", "[O;H1;$(O-!@c)]"),
("-O- (nonring)", "[OH0;X2;!R]"),
("-O- (ring)", "[OH0;X2;R]"),
(">C=O (nonring)", "[CH0;A;X3;!R]=O"),
(">C=O (ring)", "[CH0;A;X3;R]=O"),
("O=CH- (aldehyde)", "[CH;D2;$(C-!@C)](=O)"),
("-COOH (acid)", "[$(C-!@[A;!O])](=O)([O;H,-])"),
("-COO- (ester)", "C(=O)[OH0]"),
("=O (other than above)", "[OX1]"),
("-NH2", "[NH2;X3]"),
(">NH (nonring)", "[NH1;X3;!R]"),
(">NH (ring)", "[NH1;X3;R]"),
(">N- (nonring)", "[NH0;X3;!R]"),
("-N= (nonring)", "[NH0;X2;!R]"),
("-N= (ring)", "[NH0;X2;R]"),
("=NH", "[NH1;X2]"),
("-CN", "C#N"),
("-NO2", "N(=O)=O"),
("-SH", "[SH1]"),
("-S- (nonring)", "[SH0;!R]"),
("-S- (ring)", "[SH0;R]")]
SHI_CHENYANG_JOBACK_SMARTS_id_dict = {i+1: j[1] for i, j in enumerate(SHI_CHENYANG_JOBACK_SMARTS)}
SHI_CHENYANG_JOBACK_SMARTS_str_dict = {i[0]: i[1] for i in SHI_CHENYANG_JOBACK_SMARTS}
joback_data_txt = u'''-CH3 0.0141 -0.0012 65 23.58 -5.10 -76.45 -43.96 1.95E+1 -8.08E-3 1.53E-4 -9.67E-8 0.908 2.373 548.29 -1.719
-CH2- 0.0189 0.0000 56 22.88 11.27 -20.64 8.42 -9.09E-1 9.50E-2 -5.44E-5 1.19E-8 2.590 2.226 94.16 -0.199
>CH- 0.0164 0.0020 41 21.74 12.64 29.89 58.36 -2.30E+1 2.04E-1 -2.65E-4 1.20E-7 0.749 1.691 -322.15 1.187
>C< 0.0067 0.0043 27 18.25 46.43 82.23 116.02 -6.62E+1 4.27E-1 -6.41E-4 3.01E-7 -1.460 0.636 -573.56 2.307
=CH2 0.0113 -0.0028 56 18.18 -4.32 -9.630 3.77 2.36E+1 -3.81E-2 1.72E-4 -1.03E-7 -0.473 1.724 495.01 -1.539
=CH- 0.0129 -0.0006 46 24.96 8.73 37.97 48.53 -8.00 1.05E-1 -9.63E-5 3.56E-8 2.691 2.205 82.28 -0.242
=C< 0.0117 0.0011 38 24.14 11.14 83.99 92.36 -2.81E+1 2.08E-1 -3.06E-4 1.46E-7 3.063 2.138 n. a. n. a.
=C= 0.0026 0.0028 36 26.15 17.78 142.14 136.70 2.74E+1 -5.57E-2 1.01E-4 -5.02E-8 4.720 2.661 n. a. n. a.
≡CH 0.0027 -0.0008 46 9.20 -11.18 79.30 77.71 2.45E+1 -2.71E-2 1.11E-4 -6.78E-8 2.322 1.155 n. a. n. a.
≡C- 0.0020 0.0016 37 27.38 64.32 115.51 109.82 7.87 2.01E-2 -8.33E-6 1.39E-9 4.151 3.302 n. a. n. a.
-CH2- (ring) 0.0100 0.0025 48 27.15 7.75 -26.80 -3.68 -6.03 8.54E-2 -8.00E-6 -1.80E-8 0.490 2.398 307.53 -0.798
>CH- (ring) 0.0122 0.0004 38 21.78 19.88 8.67 40.99 -2.05E+1 1.62E-1 -1.60E-4 6.24E-8 3.243 1.942 -394.29 1.251
>C< (ring) 0.0042 0.0061 27 21.32 60.15 79.72 87.88 -9.09E+1 5.57E-1 -9.00E-4 4.69E-7 -1.373 0.644 n. a. n. a.
=CH- (ring) 0.0082 0.0011 41 26.73 8.13 2.09 11.30 -2.14 5.74E-2 -1.64E-6 -1.59E-8 1.101 2.544 259.65 -0.702
=C< (ring) 0.0143 0.0008 32 31.01 37.02 46.43 54.05 -8.25 1.01E-1 -1.42E-4 6.78E-8 2.394 3.059 -245.74 0.912
-F 0.0111 -0.0057 27 -0.03 -15.78 -251.92 -247.19 2.65E+1 -9.13E-2 1.91E-4 -1.03E-7 1.398 -0.670 n. a. n. a.
-Cl 0.0105 -0.0049 58 38.13 13.55 -71.55 -64.31 3.33E+1 -9.63E-2 1.87E-4 -9.96E-8 2.515 4.532 625.45 -1.814
-Br 0.0133 0.0057 71 66.86 43.43 -29.48 -38.06 2.86E+1 -6.49E-2 1.36E-4 -7.45E-8 3.603 6.582 738.91 -2.038
-I 0.0068 -0.0034 97 93.84 41.69 21.06 5.74 3.21E+1 -6.41E-2 1.26E-4 -6.87E-8 2.724 9.520 809.55 -2.224
-OH (alcohol) 0.0741 0.0112 28 92.88 44.45 -208.04 -189.20 2.57E+1 -6.91E-2 1.77E-4 -9.88E-8 2.406 16.826 2173.72 -5.057
-OH (phenol) 0.0240 0.0184 -25 76.34 82.83 -221.65 -197.37 -2.81 1.11E-1 -1.16E-4 4.94E-8 4.490 12.499 3018.17 -7.314
-O- (nonring) 0.0168 0.0015 18 22.42 22.23 -132.22 -105.00 2.55E+1 -6.32E-2 1.11E-4 -5.48E-8 1.188 2.410 122.09 -0.386
-O- (ring) 0.0098 0.0048 13 31.22 23.05 -138.16 -98.22 1.22E+1 -1.26E-2 6.03E-5 -3.86E-8 5.879 4.682 440.24 -0.953
>C=O (nonring) 0.0380 0.0031 62 76.75 61.20 -133.22 -120.50 6.45 6.70E-2 -3.57E-5 2.86E-9 4.189 8.972 340.35 -0.350
>C=O (ring) 0.0284 0.0028 55 94.97 75.97 -164.50 -126.27 3.04E+1 -8.29E-2 2.36E-4 -1.31E-7 0. 6.645 n. a. n. a.
O=CH- (aldehyde) 0.0379 0.0030 82 72.24 36.90 -162.03 -143.48 3.09E+1 -3.36E-2 1.60E-4 -9.88E-8 3.197 9.093 740.92 -1.713
-COOH (acid) 0.0791 0.0077 89 169.09 155.50 -426.72 -387.87 2.41E+1 4.27E-2 8.04E-5 -6.87E-8 11.051 19.537 1317.23 -2.578
-COO- (ester) 0.0481 0.0005 82 81.10 53.60 -337.92 -301.95 2.45E+1 4.02E-2 4.02E-5 -4.52E-8 6.959 9.633 483.88 -0.966
=O (other than above) 0.0143 0.0101 36 -10.50 2.08 -247.61 -250.83 6.82 1.96E-2 1.27E-5 -1.78E-8 3.624 5.909 675.24 -1.340
-NH2 0.0243 0.0109 38 73.23 66.89 -22.02 14.07 2.69E+1 -4.12E-2 1.64E-4 -9.76E-8 3.515 10.788 n. a. n. a.
>NH (nonring) 0.0295 0.0077 35 50.17 52.66 53.47 89.39 -1.21 7.62E-2 -4.86E-5 1.05E-8 5.099 6.436 n. a. n. a.
>NH (ring) 0.0130 0.0114 29 52.82 101.51 31.65 75.61 1.18E+1 -2.30E-2 1.07E-4 -6.28E-8 7.490 6.930 n. a. n. a.
>N- (nonring) 0.0169 0.0074 9 11.74 48.84 123.34 163.16 -3.11E+1 2.27E-1 -3.20E-4 1.46E-7 4.703 1.896 n. a. n. a.
-N= (nonring) 0.0255 -0.0099 n. a. 74.60 n. a. 23.61 n. a. n. a. n. a. n. a. n. a. n. a. 3.335 n. a. n. a.
-N= (ring) 0.0085 0.0076 34 57.55 68.40 55.52 79.93 8.83 -3.84E-3 4.35E-5 -2.60E-8 3.649 6.528 n. a. n. a.
=NH n. a. n. a. n. a. 83.08 68.91 93.70 119.66 5.69 -4.12E-3 1.28E-4 -8.88E-8 n. a. 12.169 n. a. n. a.
-CN 0.0496 -0.0101 91 125.66 59.89 88.43 89.22 3.65E+1 -7.33E-2 1.84E-4 -1.03E-7 2.414 12.851 n. a. n. a.
-NO2 0.0437 0.0064 91 152.54 127.24 -66.57 -16.83 2.59E+1 -3.74E-3 1.29E-4 -8.88E-8 9.679 16.738 n. a. n. a.
-SH 0.0031 0.0084 63 63.56 20.09 -17.33 -22.99 3.53E+1 -7.58E-2 1.85E-4 -1.03E-7 2.360 6.884 n. a. n. a.
-S- (nonring) 0.0119 0.0049 54 68.78 34.40 41.87 33.12 1.96E+1 -5.61E-3 4.02E-5 -2.76E-8 4.130 6.817 n. a. n. a.
-S- (ring) 0.0019 0.0051 38 52.10 79.93 39.10 27.76 1.67E+1 4.81E-3 2.77E-5 -2.11E-8 1.557 5.984 n. a. n. a.'''
joback_groups_str_dict = {}
joback_groups_id_dict = {}
JOBACK = namedtuple('JOBACK', 'i, name, Tc, Pc, Vc, Tb, Tm, Hform, Gform, Cpa, Cpb, Cpc, Cpd, Hfus, Hvap, mua, mub')
for i, line in enumerate(joback_data_txt.split('\n')):
parsed = to_num(line.split('\t'))
j = JOBACK(i+1, *parsed)
joback_groups_str_dict[parsed[0]] = j
joback_groups_id_dict[i+1] = j
def smarts_fragment(catalog, rdkitmol=None, smi=None):
r'''Fragments a molecule into a set of unique groups and counts as
specified by the `catalog`. The molecule can either be an rdkit
molecule object, or a smiles string which will be parsed by rdkit.
Returns a dictionary of groups and their counts according to the
indexes of the catalog provided.
Parameters
----------
catalog : dict
Dictionary indexed by keys pointing to smarts strings, [-]
rdkitmol : mol, optional
Molecule as rdkit object, [-]
smi : str, optional
Smiles string representing a chemical, [-]
Returns
-------
counts : dict
Dictionaty of integer counts of the found groups only, indexed by
the same keys used by the catalog [-]
success : bool
Whether or not molecule was fully and uniquely fragmented, [-]
status : str
A string holding an explanation of why the molecule failed to be
fragmented, if it fails; 'OK' if it suceeds.
Notes
-----
Raises an exception if rdkit is not installed, or `smi` or `rdkitmol` is
not defined.
Examples
--------
Acetone:
>>> smarts_fragment(catalog=J_BIGGS_JOBACK_SMARTS_id_dict, smi='CC(=O)C')
({24: 1, 1: 2}, True, 'OK')
Sodium sulfate, (Na2O4S):
>>> smarts_fragment(catalog=J_BIGGS_JOBACK_SMARTS_id_dict, smi='[O-]S(=O)(=O)[O-].[Na+].[Na+]')
({29: 4}, False, 'Did not match all atoms present')
Propionic anhydride (C6H10O3):
>>> smarts_fragment(catalog=J_BIGGS_JOBACK_SMARTS_id_dict, smi='CCC(=O)OC(=O)CC')
({1: 2, 2: 2, 28: 2}, False, 'Matched some atoms repeatedly: [4]')
'''
if not hasRDKit: # pragma: no cover
raise Exception(rdkit_missing)
if rdkitmol is None and smi is None:
raise Exception('Either an rdkit mol or a smiles string is required')
if smi is not None:
rdkitmol = Chem.MolFromSmiles(smi)
if rdkitmol is None:
status = 'Failed to construct mol'
success = False
return {}, success, status
atom_count = len(rdkitmol.GetAtoms())
status = 'OK'
success = True
counts = {}
all_matches = {}
for key, smart in catalog.items():
patt = Chem.MolFromSmarts(smart)
hits = rdkitmol.GetSubstructMatches(patt)
if hits:
all_matches[smart] = hits
counts[key] = len(hits)
matched_atoms = set()
for i in all_matches.values():
for j in i:
matched_atoms.update(j)
if len(matched_atoms) != atom_count:
status = 'Did not match all atoms present'
success = False
# Check the atom aount again, this time looking for duplicate matches (only if have yet to fail)
if success:
matched_atoms = []
for i in all_matches.values():
for j in i:
matched_atoms.extend(j)
if len(matched_atoms) < atom_count:
status = 'Matched %d of %d atoms only' %(len(matched_atoms), atom_count)
success = False
elif len(matched_atoms) > atom_count:
status = 'Matched some atoms repeatedly: %s' %( [i for i, c in Counter(matched_atoms).items() if c > 1])
success = False
return counts, success, status
class Joback(object):
r'''Class for performing chemical property estimations with the Joback
group contribution method as described in [1]_ and [2]_. This is a very
common method with low accuracy but wide applicability. This routine can be
used with either its own automatic fragmentation routine, or user specified
groups. It is applicable to organic compounds only, and has only 41 groups
with no interactions between them. Each method's documentation describes
its accuracy. The automatic fragmentation routine is possible only because
of the development of SMARTS expressions to match the Joback groups by
Dr. Jason Biggs. The list of SMARTS expressions
was posted publically on the
`RDKit mailing list <https://www.mail-archive.com/rdkit-discuss@lists.sourceforge.net/msg07446.html>`_.
Parameters
----------
mol : rdkitmol or smiles str
Input molecule for the analysis, [-]
atom_count : int, optional
The total number of atoms including hydrogen in the molecule; this will
be counted by rdkit if it not provided, [-]
MW : float, optional
Molecular weight of the molecule; this will be calculated by rdkit if
not provided, [g/mol]
Tb : float, optional
An experimentally known boiling temperature for the chemical; this
increases the accuracy of the calculated critical point if provided.
[K]
Notes
-----
Be sure to check the status of the automatic fragmentation; not all
chemicals with the Joback method are applicable.
Approximately 68% of chemcials in the thermo database seem to be able to
be estimated with the Joback method.
Examples
--------
Analysis of Acetone:
>>> J = Joback('CC(=O)C')
>>> J.Hfus(J.counts)
5125.0
>>> J.Cpig(350)
84.69109750000001
>>> J.status
'OK'
All properties can be obtained in one go with the `estimate` method:
>>> pprint(J.estimate()) # doctest: +ELLIPSIS
{'Cpig': <bound method Joback.Cpig of <thermo.joback.Joback object at 0x...>>,
'Cpig_coeffs': [7.520000000000003,
0.26084,
-0.0001207,
1.545999999999998e-08],
'Gf': -154540.00000000003,
'Hf': -217829.99999999997,
'Hfus': 5125.0,
'Hvap': 29018.0,
'Pc': 4802499.604994407,
'Tb': 322.11,
'Tc': 500.5590049525365,
'Tm': 173.5,
'Vc': 0.0002095,
'mul': <bound method Joback.mul of <thermo.joback.Joback object at 0x...>>,
'mul_coeffs': [839.1099999999998, -14.99]}
The results for propionic anhydride (if the status is not OK) should not be
used.
>>> J = Joback('CCC(=O)OC(=O)CC')
>>> J.status
'Matched some atoms repeatedly: [4]'
>>> J.Cpig(300)
175.85999999999999
None of the routines need to use the automatic routine; they can be used
manually too:
>>> Joback.Tb({1: 2, 24: 1})
322.11
References
----------
.. [1] Joback, Kevin G. "A Unified Approach to Physical Property Estimation
Using Multivariate Statistical Techniques." Thesis, Massachusetts
Institute of Technology, 1984.
.. [2] Joback, K.G., and R.C. Reid. "Estimation of Pure-Component
Properties from Group-Contributions." Chemical Engineering
Communications 57, no. 1-6 (July 1, 1987): 233-43.
doi:10.1080/00986448708960487.
'''
calculated_Cpig_coeffs = None
calculated_mul_coeffs = None
def __init__(self, mol, atom_count=None, MW=None, Tb=None):
if type(mol) == Chem.rdchem.Mol:
self.rdkitmol = mol
else:
self.rdkitmol = Chem.MolFromSmiles(mol)
if atom_count is None:
self.rdkitmol_Hs = Chem.AddHs(self.rdkitmol)
self.atom_count = len(self.rdkitmol_Hs.GetAtoms())
else:
self.atom_count = atom_count
if MW is None:
self.MW = rdMolDescriptors.CalcExactMolWt(self.rdkitmol_Hs)
else:
self.MW = MW
self.counts, self.success, self.status = smarts_fragment(J_BIGGS_JOBACK_SMARTS_id_dict, rdkitmol=self.rdkitmol)
if Tb is not None:
self.Tb_estimated = self.Tb(self.counts)
else:
self.Tb_estimated = Tb
def estimate(self):
'''Method to compute all available properties with the Joback method;
returns their results as a dict. For the tempearture dependent values
Cpig and mul, both the coefficients and objects to perform calculations
are returned.
'''
# Pre-generate the coefficients or they will not be returned
self.mul(300)
self.Cpig(300)
estimates = {'Tb': self.Tb(self.counts),
'Tm': self.Tm(self.counts),
'Tc': self.Tc(self.counts, self.Tb_estimated),
'Pc': self.Pc(self.counts, self.atom_count),
'Vc': self.Vc(self.counts),
'Hf': self.Hf(self.counts),
'Gf': self.Gf(self.counts),
'Hfus': self.Hfus(self.counts),
'Hvap': self.Hvap(self.counts),
'mul': self.mul,
'mul_coeffs': self.calculated_mul_coeffs,
'Cpig': self.Cpig,
'Cpig_coeffs': self.calculated_Cpig_coeffs}
return estimates
@staticmethod
def Tb(counts):
r'''Estimates the normal boiling temperature of an organic compound
using the Joback method as a function of chemical structure only.
.. math::
T_b = 198.2 + \sum_i {T_{b,i}}
For 438 compounds tested by Joback, the absolute average error was
12.91 K and standard deviation was 17.85 K; the average relative error
was 3.6%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Tb : float
Estimated normal boiling temperature, [K]
Examples
--------
>>> Joback.Tb({1: 2, 24: 1})
322.11
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Tb*count
Tb = 198.2 + tot
return Tb
@staticmethod
def Tm(counts):
r'''Estimates the melting temperature of an organic compound using the
Joback method as a function of chemical structure only.
.. math::
T_m = 122.5 + \sum_i {T_{m,i}}
For 388 compounds tested by Joback, the absolute average error was
22.6 K and standard deviation was 24.68 K; the average relative error
was 11.2%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Tm : float
Estimated melting temperature, [K]
Examples
--------
>>> Joback.Tm({1: 2, 24: 1})
173.5
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Tm*count
Tm = 122.5 + tot
return Tm
@staticmethod
def Tc(counts, Tb=None):
r'''Estimates the critcal temperature of an organic compound using the
Joback method as a function of chemical structure only, or optionally
improved by using an experimental boiling point. If the experimental
boiling point is not provided it will be estimated with the Joback
method as well.
.. math::
T_c = T_b \left[0.584 + 0.965 \sum_i {T_{c,i}}
- \left(\sum_i {T_{c,i}}\right)^2 \right]^{-1}
For 409 compounds tested by Joback, the absolute average error was
4.76 K and standard deviation was 6.94 K; the average relative error
was 0.81%.
Appendix BI of Joback's work lists 409 estimated critical temperatures.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Tb : float, optional
Experimental normal boiling temperature, [K]
Returns
-------
Tc : float
Estimated critical temperature, [K]
Examples
--------
>>> Joback.Tc({1: 2, 24: 1}, Tb=322.11)
500.5590049525365
'''
if Tb is None:
Tb = Joback.Tb(counts)
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Tc*count
Tc = Tb/(0.584 + 0.965*tot - tot*tot)
return Tc
@staticmethod
def Pc(counts, atom_count):
r'''Estimates the critcal pressure of an organic compound using the
Joback method as a function of chemical structure only. This
correlation was developed using the actual number of atoms forming
the molecule as well.
.. math::
P_c = \left [0.113 + 0.0032N_A - \sum_i {P_{c,i}}\right ]^{-2}
In the above equation, critical pressure is calculated in bar; it is
converted to Pa here.
392 compounds were used by Joback in this determination, with an
absolute average error of 2.06 bar, standard devaition 3.2 bar, and
AARE of 5.2%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
atom_count : int
Total number of atoms (including hydrogens) in the molecule, [-]
Returns
-------
Pc : float
Estimated critical pressure, [Pa]
Examples
--------
>>> Joback.Pc({1: 2, 24: 1}, 10)
4802499.604994407
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Pc*count
Pc = (0.113 + 0.0032*atom_count - tot)**-2
return Pc*1E5 # bar to Pa
@staticmethod
def Vc(counts):
r'''Estimates the critcal volume of an organic compound using the
Joback method as a function of chemical structure only.
.. math::
V_c = 17.5 + \sum_i {V_{c,i}}
In the above equation, critical volume is calculated in cm^3/mol; it
is converted to m^3/mol here.
310 compounds were used by Joback in this determination, with an
absolute average error of 7.54 cm^3/mol, standard devaition 13.16
cm^3/mol, and AARE of 2.27%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Vc : float
Estimated critical volume, [m^3/mol]
Examples
--------
>>> Joback.Vc({1: 2, 24: 1})
0.0002095
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Vc*count
Vc = 17.5 + tot
return Vc*1E-6 # cm^3/mol to m^3/mol
@staticmethod
def Hf(counts):
r'''Estimates the ideal-gas enthalpy of formation at 298.15 K of an
organic compound using the Joback method as a function of chemical
structure only.
.. math::
H_{formation} = 68.29 + \sum_i {H_{f,i}}
In the above equation, enthalpy of formation is calculated in kJ/mol;
it is converted to J/mol here.
370 compounds were used by Joback in this determination, with an
absolute average error of 2.2 kcal/mol, standard devaition 2.0
kcal/mol, and AARE of 15.2%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Hf : float
Estimated ideal-gas enthalpy of formation at 298.15 K, [J/mol]
Examples
--------
>>> Joback.Hf({1: 2, 24: 1})
-217829.99999999997
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Hform*count
Hf = 68.29 + tot
return Hf*1000 # kJ/mol to J/mol
@staticmethod
def Gf(counts):
r'''Estimates the ideal-gas Gibbs energy of formation at 298.15 K of an
organic compound using the Joback method as a function of chemical
structure only.
.. math::
G_{formation} = 53.88 + \sum {G_{f,i}}
In the above equation, Gibbs energy of formation is calculated in
kJ/mol; it is converted to J/mol here.
328 compounds were used by Joback in this determination, with an
absolute average error of 2.0 kcal/mol, standard devaition 4.37
kcal/mol, and AARE of 15.7%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Gf : float
Estimated ideal-gas Gibbs energy of formation at 298.15 K, [J/mol]
Examples
--------
>>> Joback.Gf({1: 2, 24: 1})
-154540.00000000003
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Gform*count
Gf = 53.88 + tot
return Gf*1000 # kJ/mol to J/mol
@staticmethod
def Hfus(counts):
r'''Estimates the enthalpy of fusion of an organic compound at its
melting point using the Joback method as a function of chemical
structure only.
.. math::
\Delta H_{fus} = -0.88 + \sum_i H_{fus,i}
In the above equation, enthalpy of fusion is calculated in
kJ/mol; it is converted to J/mol here.
For 155 compounds tested by Joback, the absolute average error was
485.2 cal/mol and standard deviation was 661.4 cal/mol; the average
relative error was 38.7%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Hfus : float
Estimated enthalpy of fusion of the compound at its melting point,
[J/mol]
Examples
--------
>>> Joback.Hfus({1: 2, 24: 1})
5125.0
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Hfus*count
Hfus = -0.88 + tot
return Hfus*1000 # kJ/mol to J/mol
@staticmethod
def Hvap(counts):
r'''Estimates the enthalpy of vaporization of an organic compound at
its normal boiling point using the Joback method as a function of
chemical structure only.
.. math::
\Delta H_{vap} = 15.30 + \sum_i H_{vap,i}
In the above equation, enthalpy of fusion is calculated in
kJ/mol; it is converted to J/mol here.
For 368 compounds tested by Joback, the absolute average error was
303.5 cal/mol and standard deviation was 429 cal/mol; the average
relative error was 3.88%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Hvap : float
Estimated enthalpy of vaporization of the compound at its normal
boiling point, [J/mol]
Examples
--------
>>> Joback.Hvap({1: 2, 24: 1})
29018.0
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Hvap*count
Hvap = 15.3 + tot
return Hvap*1000 # kJ/mol to J/mol
@staticmethod
def Cpig_coeffs(counts):
r'''Computes the ideal-gas polynomial heat capacity coefficients
of an organic compound using the Joback method as a function of
chemical structure only.
.. math::
C_p^{ig} = \sum_i a_i - 37.93 + \left[ \sum_i b_i + 0.210 \right] T
+ \left[ \sum_i c_i - 3.91 \cdot 10^{-4} \right] T^2
+ \left[\sum_i d_i + 2.06 \cdot 10^{-7}\right] T^3
288 compounds were used by Joback in this determination. No overall
error was reported.
The ideal gas heat capacity values used in developing the heat
capacity polynomials used 9 data points between 298 K and 1000 K.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
coefficients : list[float]
Coefficients which will result in a calculated heat capacity in
in units of J/mol/K, [-]
Examples
--------
>>> c = Joback.Cpig_coeffs({1: 2, 24: 1})
>>> c
[7.520000000000003, 0.26084, -0.0001207, 1.545999999999998e-08]
>>> Cp = lambda T : c[0] + c[1]*T + c[2]*T**2 + c[3]*T**3
>>> Cp(300)
75.32642000000001
'''
a, b, c, d = 0.0, 0.0, 0.0, 0.0
for group, count in counts.items():
a += joback_groups_id_dict[group].Cpa*count
b += joback_groups_id_dict[group].Cpb*count
c += joback_groups_id_dict[group].Cpc*count
d += joback_groups_id_dict[group].Cpd*count
a -= 37.93
b += 0.210
c -= 3.91E-4
d += 2.06E-7
return [a, b, c, d]
@staticmethod
def mul_coeffs(counts):
r'''Computes the liquid phase viscosity Joback coefficients
of an organic compound using the Joback method as a function of
chemical structure only.
.. math::
\mu_{liq} = \text{MW} \exp\left( \frac{ \sum_i \mu_a - 597.82}{T}
+ \sum_i \mu_b - 11.202 \right)
288 compounds were used by Joback in this determination. No overall
error was reported.
The liquid viscosity data used was specified to be at "several
temperatures for each compound" only. A small and unspecified number
of compounds were used in this estimation.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
coefficients : list[float]
Coefficients which will result in a liquid viscosity in
in units of Pa*s, [-]
Examples
--------
>>> mu_ab = Joback.mul_coeffs({1: 2, 24: 1})
>>> mu_ab
[839.1099999999998, -14.99]
>>> MW = 58.041864812
>>> mul = lambda T : MW*exp(mu_ab[0]/T + mu_ab[1])
>>> mul(300)
0.0002940378347162687
'''
a, b = 0.0, 0.0
for group, count in counts.items():
a += joback_groups_id_dict[group].mua*count
b += joback_groups_id_dict[group].mub*count
a -= 597.82
b -= 11.202
return [a, b]
def Cpig(self, T):
r'''Computes ideal-gas heat capacity at a specified temperature
of an organic compound using the Joback method as a function of
chemical structure only.
.. math::
C_p^{ig} = \sum_i a_i - 37.93 + \left[ \sum_i b_i + 0.210 \right] T
+ \left[ \sum_i c_i - 3.91 \cdot 10^{-4} \right] T^2
+ \left[\sum_i d_i + 2.06 \cdot 10^{-7}\right] T^3
Parameters
----------
T : float
Temperature, [K]
Returns
-------
Cpig : float
Ideal-gas heat capacity, [J/mol/K]
Examples
--------
>>> J = Joback('CC(=O)C')
>>> J.Cpig(300)
75.32642000000001
'''
if self.calculated_Cpig_coeffs is None:
self.calculated_Cpig_coeffs = Joback.Cpig_coeffs(self.counts)
return horner(reversed(self.calculated_Cpig_coeffs), T)
def mul(self, T):
r'''Computes liquid viscosity at a specified temperature
of an organic compound using the Joback method as a function of
chemical structure only.
.. math::
\mu_{liq} = \text{MW} \exp\left( \frac{ \sum_i \mu_a - 597.82}{T}
+ \sum_i \mu_b - 11.202 \right)
Parameters
----------
T : float
Temperature, [K]
Returns
-------
mul : float
Liquid viscosity, [Pa*s]
Examples
--------
>>> J = Joback('CC(=O)C')
>>> J.mul(300)
0.0002940378347162687
'''
if self.calculated_mul_coeffs is None:
self.calculated_mul_coeffs = Joback.mul_coeffs(self.counts)
a, b = self.calculated_mul_coeffs
return self.MW*exp(a/T + b)
| 39.360302 | 145 | 0.555787 |
3b703ee1642dd63b71fa04a30b229b7ade7bfd07 | 108,751 | py | Python | boto/opsworks/layer1.py | kyleknap/boto | 38478c1e71b2c307cdfff13ff0191dfa55006139 | [
"MIT"
] | 1 | 2016-08-19T11:22:12.000Z | 2016-08-19T11:22:12.000Z | boto/opsworks/layer1.py | kyleknap/boto | 38478c1e71b2c307cdfff13ff0191dfa55006139 | [
"MIT"
] | null | null | null | boto/opsworks/layer1.py | kyleknap/boto | 38478c1e71b2c307cdfff13ff0191dfa55006139 | [
"MIT"
] | null | null | null | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.opsworks import exceptions
from boto.compat import json
class OpsWorksConnection(AWSQueryConnection):
"""
AWS OpsWorks
Welcome to the AWS OpsWorks API Reference . This guide provides
descriptions, syntax, and usage examples about AWS OpsWorks
actions and data types, including common parameters and error
codes.
AWS OpsWorks is an application management service that provides an
integrated experience for overseeing the complete application
lifecycle. For information about this product, go to the `AWS
OpsWorks`_ details page.
**SDKs and CLI**
The most common way to use the AWS OpsWorks API is by using the
AWS Command Line Interface (CLI) or by using one of the AWS SDKs
to implement applications in your preferred language. For more
information, see:
+ `AWS CLI`_
+ `AWS SDK for Java`_
+ `AWS SDK for .NET`_
+ `AWS SDK for PHP 2`_
+ `AWS SDK for Ruby`_
+ `AWS SDK for Node.js`_
+ `AWS SDK for Python(Boto)`_
**Endpoints**
AWS OpsWorks supports only one endpoint, opsworks.us-
east-1.amazonaws.com (HTTPS), so you must connect to that
endpoint. You can then use the API to direct AWS OpsWorks to
create stacks in any AWS Region.
**Chef Version**
When you call CreateStack, CloneStack, or UpdateStack we recommend
you use the `ConfigurationManager` parameter to specify the Chef
version, 0.9 or 11.4. The default value is currently 0.9. However,
we expect to change the default value to 11.4 in October 2013. For
more information, see `Using AWS OpsWorks with Chef 11`_.
"""
APIVersion = "2013-02-18"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com"
ServiceName = "OpsWorks"
TargetPrefix = "OpsWorks_20130218"
ResponseError = JSONResponseError
_faults = {
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
kwargs['host'] = region.endpoint
super(OpsWorksConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def assign_volume(self, volume_id, instance_id=None):
"""
Assigns one of the stack's registered Amazon EBS volumes to a
specified instance. The volume must first be registered with
the stack by calling RegisterVolume. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'VolumeId': volume_id, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssignVolume',
body=json.dumps(params))
def associate_elastic_ip(self, elastic_ip, instance_id=None):
"""
Associates one of the stack's registered Elastic IP addresses
with a specified instance. The address must first be
registered with the stack by calling RegisterElasticIp. For
more information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'ElasticIp': elastic_ip, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssociateElasticIp',
body=json.dumps(params))
def attach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Attaches an Elastic Load Balancing load balancer to a
specified layer.
You must create the Elastic Load Balancing instance
separately, by using the Elastic Load Balancing console, API,
or CLI. For more information, see ` Elastic Load Balancing
Developer Guide`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is to be attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='AttachElasticLoadBalancer',
body=json.dumps(params))
def clone_stack(self, source_stack_id, service_role_arn, name=None,
region=None, vpc_id=None, attributes=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, use_custom_cookbooks=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
clone_permissions=None, clone_app_ids=None,
default_root_device_type=None):
"""
Creates a clone of a specified stack. For more information,
see `Clone a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type source_stack_id: string
:param source_stack_id: The source stack ID.
:type name: string
:param name: The cloned stack name.
:type region: string
:param region: The cloned stack AWS region, such as "us-east-1". For
more information about AWS regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the cloned stack is to be
launched into. It must be in the specified region. All instances
will be launched into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: A list of stack attributes and values as key/value
pairs to be added to the cloned stack.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. If you create a stack by using the AWS OpsWorks
console, it creates the role for you. You can obtain an existing
stack's IAM ARN programmatically by calling DescribePermissions.
For more information about IAM ARNs, see `Using Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the source
stack's service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The cloned stack's default operating system, which
must be set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default
option is `Amazon Linux`.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The cloned stack's default
Availability Zone, which must be in the specified region. For more
information, see `Regions and Endpoints`_. If you also specify a
value for `DefaultSubnetId`, the subnet must be in the same zone.
For more information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default subnet ID. All instances
will be launched into this subnet unless you specify otherwise when
you create the instance. If you also specify a value for
`DefaultAvailabilityZone`, the subnet must be in the same zone. For
information on default values and when this parameter is required,
see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.: `"{\"key1\":
\"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9 or 11.4. The default value is
currently 0.9. However, we expect to change the default value to
11.4 in September 2013.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether to use custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type clone_permissions: boolean
:param clone_permissions: Whether to clone the source stack's
permissions.
:type clone_app_ids: list
:param clone_app_ids: A list of source stack app IDs to be included in
the cloned stack.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
"""
params = {
'SourceStackId': source_stack_id,
'ServiceRoleArn': service_role_arn,
}
if name is not None:
params['Name'] = name
if region is not None:
params['Region'] = region
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if clone_permissions is not None:
params['ClonePermissions'] = clone_permissions
if clone_app_ids is not None:
params['CloneAppIds'] = clone_app_ids
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CloneStack',
body=json.dumps(params))
def create_app(self, stack_id, name, type, shortname=None,
description=None, app_source=None, domains=None,
enable_ssl=None, ssl_configuration=None, attributes=None):
"""
Creates an app for a specified stack. For more information,
see `Creating Apps`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type shortname: string
:param shortname: The app's short name.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type type: string
:param type: The app type. Each supported type is associated with a
particular layer. For example, PHP applications are associated with
a PHP layer. AWS OpsWorks deploys an application to those instances
that are members of the corresponding layer.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether to enable SSL for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes bag.
"""
params = {'StackId': stack_id, 'Name': name, 'Type': type, }
if shortname is not None:
params['Shortname'] = shortname
if description is not None:
params['Description'] = description
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
return self.make_request(action='CreateApp',
body=json.dumps(params))
def create_deployment(self, stack_id, command, app_id=None,
instance_ids=None, comment=None, custom_json=None):
"""
Deploys a stack or app.
+ App deployment generates a `deploy` event, which runs the
associated recipes and passes them a JSON stack configuration
object that includes information about the app.
+ Stack deployment runs the `deploy` recipes but does not
raise an event.
For more information, see `Deploying Apps`_ and `Run Stack
Commands`_.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type app_id: string
:param app_id: The app ID. This parameter is required for app
deployments, but not for other deployment commands.
:type instance_ids: list
:param instance_ids: The instance IDs for the deployment targets.
:type command: dict
:param command: A `DeploymentCommand` object that specifies the
deployment command and any associated arguments.
:type comment: string
:param comment: A user-defined comment.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.: `"{\"key1\":
\"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
"""
params = {'StackId': stack_id, 'Command': command, }
if app_id is not None:
params['AppId'] = app_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
if comment is not None:
params['Comment'] = comment
if custom_json is not None:
params['CustomJson'] = custom_json
return self.make_request(action='CreateDeployment',
body=json.dumps(params))
def create_instance(self, stack_id, layer_ids, instance_type,
auto_scaling_type=None, hostname=None, os=None,
ami_id=None, ssh_key_name=None,
availability_zone=None, subnet_id=None,
architecture=None, root_device_type=None,
install_updates_on_boot=None):
"""
Creates an instance in a specified stack. For more
information, see `Adding an Instance to a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array that contains the instance layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type:
The instance auto scaling type, which has three possible values:
+ **AlwaysRunning**: A 24/7 instance, which is not affected by auto
scaling.
+ **TimeBasedAutoScaling**: A time-based auto scaling instance, which
is started and stopped based on a specified schedule. To specify
the schedule, call SetTimeBasedAutoScaling.
+ **LoadBasedAutoScaling**: A load-based auto scaling instance, which
is started and stopped based on load metrics. To use load-based
auto scaling, you must enable it for the instance layer and
configure the thresholds by calling SetLoadBasedAutoScaling.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance operating system, which must be set to one of
the following.
+ Standard operating systems: `Amazon Linux` or `Ubuntu 12.04 LTS`
+ Custom AMIs: `Custom`
The default option is `Amazon Linux`. If you set this parameter to
`Custom`, you must use the CreateInstance action's AmiId parameter
to specify the custom AMI that you want to use. For more
information on the standard operating systems, see `Operating
Systems`_For more information on how to use custom AMIs with
OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id: A custom AMI ID to be used to create the instance. The
AMI should be based on one of the standard AWS OpsWorks APIs:
Amazon Linux or Ubuntu 12.04 LTS. For more information, see
`Instances`_
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type availability_zone: string
:param availability_zone: The instance Availability Zone. For more
information, see `Regions and Endpoints`_.
:type subnet_id: string
:param subnet_id: The ID of the instance's subnet. If the stack is
running in a VPC, you can use this parameter to override the
stack's default subnet ID value and direct AWS OpsWorks to launch
the instance in a different subnet.
:type architecture: string
:param architecture: The instance architecture. Instance types do not
necessarily support both architectures. For a list of the
architectures that are supported by the different instance types,
see `Instance Families and Types`_.
:type root_device_type: string
:param root_device_type: The instance root device type. For more
information, see `Storage for the Root Device`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
"""
params = {
'StackId': stack_id,
'LayerIds': layer_ids,
'InstanceType': instance_type,
}
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if subnet_id is not None:
params['SubnetId'] = subnet_id
if architecture is not None:
params['Architecture'] = architecture
if root_device_type is not None:
params['RootDeviceType'] = root_device_type
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
return self.make_request(action='CreateInstance',
body=json.dumps(params))
def create_layer(self, stack_id, type, name, shortname, attributes=None,
custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None):
"""
Creates a layer. For more information, see `How to Create a
Layer`_.
You should use **CreateLayer** for noncustom layer types such
as PHP App Server only if the stack does not have an existing
layer of that type. A stack can have at most one instance of
each noncustom layer; if you attempt to create a second
instance, **CreateLayer** fails. A stack can have an arbitrary
number of custom layers, so you can call **CreateLayer** as
many times as you like for that layer type.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The layer stack ID.
:type type: string
:param type:
The layer type. A stack cannot have more than one layer of the same
type. This parameter must be set to one of the following:
+ lb: An HAProxy layer
+ web: A Static Web Server layer
+ rails-app: A Rails App Server layer
+ php-app: A PHP App Server layer
+ nodejs-app: A Node.js App Server layer
+ memcached: A Memcached layer
+ db-master: A MySQL layer
+ monitoring-master: A Ganglia layer
+ custom: A custom layer
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorks and by Chef recipes. The short name is also used as the
name for the directory where your app files are installed. It can
have a maximum of 200 characters, which are limited to the
alphanumeric characters, '-', '_', and '.'.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes bag.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile that to
be used for the layer's EC2 instances. For more information about
IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer custom
security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the layer
packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
"""
params = {
'StackId': stack_id,
'Type': type,
'Name': name,
'Shortname': shortname,
}
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
return self.make_request(action='CreateLayer',
body=json.dumps(params))
def create_stack(self, name, region, service_role_arn,
default_instance_profile_arn, vpc_id=None,
attributes=None, default_os=None, hostname_theme=None,
default_availability_zone=None, default_subnet_id=None,
custom_json=None, configuration_manager=None,
use_custom_cookbooks=None, custom_cookbooks_source=None,
default_ssh_key_name=None,
default_root_device_type=None):
"""
Creates a new stack. For more information, see `Create a New
Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type name: string
:param name: The stack name.
:type region: string
:param region: The stack AWS region, such as "us-east-1". For more
information about Amazon regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the stack is to be launched into.
It must be in the specified region. All instances will be launched
into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes bag.
:type service_role_arn: string
:param service_role_arn: The stack AWS Identity and Access Management
(IAM) role, which allows AWS OpsWorks to work with AWS resources on
your behalf. You must set this parameter to the Amazon Resource
Name (ARN) for an existing IAM role. For more information about IAM
ARNs, see `Using Identifiers`_.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's default operating system, which must be
set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is
`Amazon Linux`.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default subnet ID. All instances
will be launched into this subnet unless you specify otherwise when
you create the instance. If you also specify a value for
`DefaultAvailabilityZone`, the subnet must be in that zone. For
information on default values and when this parameter is required,
see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.: `"{\"key1\":
\"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you
create a stack we recommend that you use the configuration manager
to specify the Chef version, 0.9 or 11.4. The default value is
currently 0.9. However, we expect to change the default value to
11.4 in September 2013.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
"""
params = {
'Name': name,
'Region': region,
'ServiceRoleArn': service_role_arn,
'DefaultInstanceProfileArn': default_instance_profile_arn,
}
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CreateStack',
body=json.dumps(params))
def create_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Creates a new user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name.
:type ssh_public_key: string
:param ssh_public_key: The user's public SSH key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
``_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='CreateUserProfile',
body=json.dumps(params))
def delete_app(self, app_id):
"""
Deletes a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
"""
params = {'AppId': app_id, }
return self.make_request(action='DeleteApp',
body=json.dumps(params))
def delete_instance(self, instance_id, delete_elastic_ip=None,
delete_volumes=None):
"""
Deletes a specified instance. You must stop an instance before
you can delete it. For more information, see `Deleting
Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type delete_elastic_ip: boolean
:param delete_elastic_ip: Whether to delete the instance Elastic IP
address.
:type delete_volumes: boolean
:param delete_volumes: Whether to delete the instance Amazon EBS
volumes.
"""
params = {'InstanceId': instance_id, }
if delete_elastic_ip is not None:
params['DeleteElasticIp'] = delete_elastic_ip
if delete_volumes is not None:
params['DeleteVolumes'] = delete_volumes
return self.make_request(action='DeleteInstance',
body=json.dumps(params))
def delete_layer(self, layer_id):
"""
Deletes a specified layer. You must first stop and then delete
all associated instances. For more information, see `How to
Delete a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='DeleteLayer',
body=json.dumps(params))
def delete_stack(self, stack_id):
"""
Deletes a specified stack. You must first delete all
instances, layers, and apps. For more information, see `Shut
Down a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DeleteStack',
body=json.dumps(params))
def delete_user_profile(self, iam_user_arn):
"""
Deletes a user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
"""
params = {'IamUserArn': iam_user_arn, }
return self.make_request(action='DeleteUserProfile',
body=json.dumps(params))
def deregister_elastic_ip(self, elastic_ip):
"""
Deregisters a specified Elastic IP address. The address can
then be registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DeregisterElasticIp',
body=json.dumps(params))
def deregister_volume(self, volume_id):
"""
Deregisters an Amazon EBS volume. The volume can then be
registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='DeregisterVolume',
body=json.dumps(params))
def describe_apps(self, stack_id=None, app_ids=None):
"""
Requests a description of a specified set of apps.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The app stack ID. If you use this parameter,
`DescribeApps` returns a description of the apps in the specified
stack.
:type app_ids: list
:param app_ids: An array of app IDs for the apps to be described. If
you use this parameter, `DescribeApps` returns a description of the
specified apps. Otherwise, it returns a description of every app.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_ids is not None:
params['AppIds'] = app_ids
return self.make_request(action='DescribeApps',
body=json.dumps(params))
def describe_commands(self, deployment_id=None, instance_id=None,
command_ids=None):
"""
Describes the results of specified commands.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type deployment_id: string
:param deployment_id: The deployment ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified deployment.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified instance.
:type command_ids: list
:param command_ids: An array of command IDs. If you include this
parameter, `DescribeCommands` returns a description of the
specified commands. Otherwise, it returns a description of every
command.
"""
params = {}
if deployment_id is not None:
params['DeploymentId'] = deployment_id
if instance_id is not None:
params['InstanceId'] = instance_id
if command_ids is not None:
params['CommandIds'] = command_ids
return self.make_request(action='DescribeCommands',
body=json.dumps(params))
def describe_deployments(self, stack_id=None, app_id=None,
deployment_ids=None):
"""
Requests a description of a specified set of deployments.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified stack.
:type app_id: string
:param app_id: The app ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified app.
:type deployment_ids: list
:param deployment_ids: An array of deployment IDs to be described. If
you include this parameter, `DescribeDeployments` returns a
description of the specified deployments. Otherwise, it returns a
description of every deployment.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_id is not None:
params['AppId'] = app_id
if deployment_ids is not None:
params['DeploymentIds'] = deployment_ids
return self.make_request(action='DescribeDeployments',
body=json.dumps(params))
def describe_elastic_ips(self, instance_id=None, stack_id=None, ips=None):
"""
Describes `Elastic IP addresses`_.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses associated with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses that are registered with the specified stack.
:type ips: list
:param ips: An array of Elastic IP addresses to be described. If you
include this parameter, `DescribeElasticIps` returns a description
of the specified Elastic IP addresses. Otherwise, it returns a
description of every Elastic IP address.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if ips is not None:
params['Ips'] = ips
return self.make_request(action='DescribeElasticIps',
body=json.dumps(params))
def describe_elastic_load_balancers(self, stack_id=None, layer_ids=None):
"""
Describes a stack's Elastic Load Balancing instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's Elastic
Load Balancing instances.
:type layer_ids: list
:param layer_ids: A list of layer IDs. The action describes the Elastic
Load Balancing instances for the specified layers.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeElasticLoadBalancers',
body=json.dumps(params))
def describe_instances(self, stack_id=None, layer_id=None,
instance_ids=None):
"""
Requests a description of a set of instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified stack.
:type layer_id: string
:param layer_id: A layer ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified layer.
:type instance_ids: list
:param instance_ids: An array of instance IDs to be described. If you
use this parameter, `DescribeInstances` returns a description of
the specified instances. Otherwise, it returns a description of
every instance.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_id is not None:
params['LayerId'] = layer_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
return self.make_request(action='DescribeInstances',
body=json.dumps(params))
def describe_layers(self, stack_id=None, layer_ids=None):
"""
Requests a description of one or more layers in a specified
stack.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array of layer IDs that specify the layers to be
described. If you omit this parameter, `DescribeLayers` returns a
description of every layer in the specified stack.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeLayers',
body=json.dumps(params))
def describe_load_based_auto_scaling(self, layer_ids):
"""
Describes load-based auto scaling configurations for specified
layers.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type layer_ids: list
:param layer_ids: An array of layer IDs.
"""
params = {'LayerIds': layer_ids, }
return self.make_request(action='DescribeLoadBasedAutoScaling',
body=json.dumps(params))
def describe_my_user_profile(self):
"""
Describes a user's SSH information.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
"""
params = {}
return self.make_request(action='DescribeMyUserProfile',
body=json.dumps(params))
def describe_permissions(self, iam_user_arn=None, stack_id=None):
"""
Describes the permissions for a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN. For more information about IAM
ARNs, see `Using Identifiers`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {}
if iam_user_arn is not None:
params['IamUserArn'] = iam_user_arn
if stack_id is not None:
params['StackId'] = stack_id
return self.make_request(action='DescribePermissions',
body=json.dumps(params))
def describe_raid_arrays(self, instance_id=None, raid_array_ids=None):
"""
Describe an instance's RAID arrays.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeRaidArrays` returns descriptions of the RAID arrays
associated with the specified instance.
:type raid_array_ids: list
:param raid_array_ids: An array of RAID array IDs. If you use this
parameter, `DescribeRaidArrays` returns descriptions of the
specified arrays. Otherwise, it returns a description of every
array.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if raid_array_ids is not None:
params['RaidArrayIds'] = raid_array_ids
return self.make_request(action='DescribeRaidArrays',
body=json.dumps(params))
def describe_service_errors(self, stack_id=None, instance_id=None,
service_error_ids=None):
"""
Describes AWS OpsWorks service errors.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified stack.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified instance.
:type service_error_ids: list
:param service_error_ids: An array of service error IDs. If you use
this parameter, `DescribeServiceErrors` returns descriptions of the
specified errors. Otherwise, it returns a description of every
error.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if instance_id is not None:
params['InstanceId'] = instance_id
if service_error_ids is not None:
params['ServiceErrorIds'] = service_error_ids
return self.make_request(action='DescribeServiceErrors',
body=json.dumps(params))
def describe_stack_summary(self, stack_id):
"""
Describes the number of layers and apps in a specified stack,
and the number of instances in each state, such as
`running_setup` or `online`.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackSummary',
body=json.dumps(params))
def describe_stacks(self, stack_ids=None):
"""
Requests a description of one or more stacks.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_ids: list
:param stack_ids: An array of stack IDs that specify the stacks to be
described. If you omit this parameter, `DescribeStacks` returns a
description of every stack.
"""
params = {}
if stack_ids is not None:
params['StackIds'] = stack_ids
return self.make_request(action='DescribeStacks',
body=json.dumps(params))
def describe_time_based_auto_scaling(self, instance_ids):
"""
Describes time-based auto scaling configurations for specified
instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_ids: list
:param instance_ids: An array of instance IDs.
"""
params = {'InstanceIds': instance_ids, }
return self.make_request(action='DescribeTimeBasedAutoScaling',
body=json.dumps(params))
def describe_user_profiles(self, iam_user_arns=None):
"""
Describe specified users.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arns: list
:param iam_user_arns: An array of IAM user ARNs that identify the users
to be described.
"""
params = {}
if iam_user_arns is not None:
params['IamUserArns'] = iam_user_arns
return self.make_request(action='DescribeUserProfiles',
body=json.dumps(params))
def describe_volumes(self, instance_id=None, stack_id=None,
raid_array_id=None, volume_ids=None):
"""
Describes an instance's Amazon EBS volumes.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's
registered Amazon EBS volumes.
:type raid_array_id: string
:param raid_array_id: The RAID array ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified RAID array.
:type volume_ids: list
:param volume_ids: Am array of volume IDs. If you use this parameter,
`DescribeVolumes` returns descriptions of the specified volumes.
Otherwise, it returns a description of every volume.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_id is not None:
params['RaidArrayId'] = raid_array_id
if volume_ids is not None:
params['VolumeIds'] = volume_ids
return self.make_request(action='DescribeVolumes',
body=json.dumps(params))
def detach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Detaches a specified Elastic Load Balancing instance from its
layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='DetachElasticLoadBalancer',
body=json.dumps(params))
def disassociate_elastic_ip(self, elastic_ip):
"""
Disassociates an Elastic IP address from its instance. The
address remains registered with the stack. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DisassociateElasticIp',
body=json.dumps(params))
def get_hostname_suggestion(self, layer_id):
"""
Gets a generated host name for the specified layer, based on
the current host name theme.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='GetHostnameSuggestion',
body=json.dumps(params))
def reboot_instance(self, instance_id):
"""
Reboots a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='RebootInstance',
body=json.dumps(params))
def register_elastic_ip(self, elastic_ip, stack_id):
"""
Registers an Elastic IP address with a specified stack. An
address can be registered with only one stack at a time. If
the address is already registered, you must first deregister
it by calling DeregisterElasticIp. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'ElasticIp': elastic_ip, 'StackId': stack_id, }
return self.make_request(action='RegisterElasticIp',
body=json.dumps(params))
def register_volume(self, stack_id, ec_2_volume_id=None):
"""
Registers an Amazon EBS volume with a specified stack. A
volume can be registered with only one stack at a time. If the
volume is already registered, you must first deregister it by
calling DeregisterVolume. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type ec_2_volume_id: string
:param ec_2_volume_id: The Amazon EBS volume ID.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
if ec_2_volume_id is not None:
params['Ec2VolumeId'] = ec_2_volume_id
return self.make_request(action='RegisterVolume',
body=json.dumps(params))
def set_load_based_auto_scaling(self, layer_id, enable=None,
up_scaling=None, down_scaling=None):
"""
Specify the load-based auto scaling configuration for a
specified layer. For more information, see `Managing Load with
Time-based and Load-based Instances`_.
To use load-based auto scaling, you must create a set of load-
based auto scaling instances. Load-based auto scaling operates
only on the instances from that set, so you must ensure that
you have created enough instances to handle the maximum
anticipated load.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type enable: boolean
:param enable: Enables load-based auto scaling for the layer.
:type up_scaling: dict
:param up_scaling: An `AutoScalingThresholds` object with the upscaling
threshold configuration. If the load exceeds these thresholds for a
specified amount of time, AWS OpsWorks starts a specified number of
instances.
:type down_scaling: dict
:param down_scaling: An `AutoScalingThresholds` object with the
downscaling threshold configuration. If the load falls below these
thresholds for a specified amount of time, AWS OpsWorks stops a
specified number of instances.
"""
params = {'LayerId': layer_id, }
if enable is not None:
params['Enable'] = enable
if up_scaling is not None:
params['UpScaling'] = up_scaling
if down_scaling is not None:
params['DownScaling'] = down_scaling
return self.make_request(action='SetLoadBasedAutoScaling',
body=json.dumps(params))
def set_permission(self, stack_id, iam_user_arn, allow_ssh=None,
allow_sudo=None, level=None):
"""
Specifies a stack's permissions. For more information, see
`Security and Permissions`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type allow_ssh: boolean
:param allow_ssh: The user is allowed to use SSH to communicate with
the instance.
:type allow_sudo: boolean
:param allow_sudo: The user is allowed to use **sudo** to elevate
privileges.
:type level: string
:param level: The user's permission level, which must be set to one of
the following strings. You cannot set your own permissions level.
+ `deny`
+ `show`
+ `deploy`
+ `manage`
+ `iam_only`
For more information on the permissions associated with these levels,
see `Managing User Permissions`_
"""
params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, }
if allow_ssh is not None:
params['AllowSsh'] = allow_ssh
if allow_sudo is not None:
params['AllowSudo'] = allow_sudo
if level is not None:
params['Level'] = level
return self.make_request(action='SetPermission',
body=json.dumps(params))
def set_time_based_auto_scaling(self, instance_id,
auto_scaling_schedule=None):
"""
Specify the time-based auto scaling configuration for a
specified instance. For more information, see `Managing Load
with Time-based and Load-based Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type auto_scaling_schedule: dict
:param auto_scaling_schedule: An `AutoScalingSchedule` with the
instance schedule.
"""
params = {'InstanceId': instance_id, }
if auto_scaling_schedule is not None:
params['AutoScalingSchedule'] = auto_scaling_schedule
return self.make_request(action='SetTimeBasedAutoScaling',
body=json.dumps(params))
def start_instance(self, instance_id):
"""
Starts a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StartInstance',
body=json.dumps(params))
def start_stack(self, stack_id):
"""
Starts stack's instances.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StartStack',
body=json.dumps(params))
def stop_instance(self, instance_id):
"""
Stops a specified instance. When you stop a standard instance,
the data disappears and must be reinstalled when you restart
the instance. You can stop an Amazon EBS-backed instance
without losing data. For more information, see `Starting,
Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StopInstance',
body=json.dumps(params))
def stop_stack(self, stack_id):
"""
Stops a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StopStack',
body=json.dumps(params))
def unassign_volume(self, volume_id):
"""
Unassigns an assigned Amazon EBS volume. The volume remains
registered with the stack. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='UnassignVolume',
body=json.dumps(params))
def update_app(self, app_id, name=None, description=None, type=None,
app_source=None, domains=None, enable_ssl=None,
ssl_configuration=None, attributes=None):
"""
Updates a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type type: string
:param type: The app type.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app's virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether SSL is enabled for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes bag.
"""
params = {'AppId': app_id, }
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if type is not None:
params['Type'] = type
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
return self.make_request(action='UpdateApp',
body=json.dumps(params))
def update_elastic_ip(self, elastic_ip, name=None):
"""
Updates a registered Elastic IP address's name. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The address.
:type name: string
:param name: The new name.
"""
params = {'ElasticIp': elastic_ip, }
if name is not None:
params['Name'] = name
return self.make_request(action='UpdateElasticIp',
body=json.dumps(params))
def update_instance(self, instance_id, layer_ids=None,
instance_type=None, auto_scaling_type=None,
hostname=None, os=None, ami_id=None,
ssh_key_name=None, architecture=None,
install_updates_on_boot=None):
"""
Updates a specified instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The instance's layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type:
The instance's auto scaling type, which has three possible values:
+ **AlwaysRunning**: A 24/7 instance, which is not affected by auto
scaling.
+ **TimeBasedAutoScaling**: A time-based auto scaling instance, which
is started and stopped based on a specified schedule.
+ **LoadBasedAutoScaling**: A load-based auto scaling instance, which
is started and stopped based on load metrics.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance operating system, which must be set to one of
the following.
+ Standard operating systems: `Amazon Linux` or `Ubuntu 12.04 LTS`
+ Custom AMIs: `Custom`
The default option is `Amazon Linux`. If you set this parameter to
`Custom`, you must use the CreateInstance action's AmiId parameter
to specify the custom AMI that you want to use. For more
information on the standard operating systems, see `Operating
Systems`_For more information on how to use custom AMIs with
OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id: A custom AMI ID to be used to create the instance. The
AMI should be based on one of the standard AWS OpsWorks APIs:
Amazon Linux or Ubuntu 12.04 LTS. For more information, see
`Instances`_
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type architecture: string
:param architecture: The instance architecture. Instance types do not
necessarily support both architectures. For a list of the
architectures that are supported by the different instance types,
see `Instance Families and Types`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
"""
params = {'InstanceId': instance_id, }
if layer_ids is not None:
params['LayerIds'] = layer_ids
if instance_type is not None:
params['InstanceType'] = instance_type
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if architecture is not None:
params['Architecture'] = architecture
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
return self.make_request(action='UpdateInstance',
body=json.dumps(params))
def update_layer(self, layer_id, name=None, shortname=None,
attributes=None, custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None):
"""
Updates a specified layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorksand by Chef. The short name is also used as the name for
the directory where your app files are installed. It can have a
maximum of 200 characters and must be in the following format:
/\A[a-z0-9\-\_\.]+\Z/.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes bag.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile to be
used for all of the layer's EC2 instances. For more information
about IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer's
custom security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the
layer's packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer's custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
"""
params = {'LayerId': layer_id, }
if name is not None:
params['Name'] = name
if shortname is not None:
params['Shortname'] = shortname
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
return self.make_request(action='UpdateLayer',
body=json.dumps(params))
def update_my_user_profile(self, ssh_public_key=None):
"""
Updates a user's SSH public key.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
:type ssh_public_key: string
:param ssh_public_key: The user's SSH public key.
"""
params = {}
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
return self.make_request(action='UpdateMyUserProfile',
body=json.dumps(params))
def update_stack(self, stack_id, name=None, attributes=None,
service_role_arn=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, use_custom_cookbooks=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
default_root_device_type=None):
"""
Updates a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type name: string
:param name: The stack's new name.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes bag.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. For more information about IAM ARNs, see `Using
Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the stack's
current service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's default operating system, which must be
set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is
`Amazon Linux`.
:type hostname_theme: string
:param hostname_theme: The stack's new host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see CreateStack.
:type default_subnet_id: string
:param default_subnet_id: The stack's default subnet ID. All instances
will be launched into this subnet unless you specify otherwise when
you create the instance. If you also specify a value for
`DefaultAvailabilityZone`, the subnet must be in that zone. For
more information, see CreateStack.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.: `"{\"key1\":
\"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you
update a stack you can optionally use the configuration manager to
specify the Chef version, 0.9 or 11.4. If you omit this parameter,
AWS OpsWorks does not change the Chef version.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
"""
params = {'StackId': stack_id, }
if name is not None:
params['Name'] = name
if attributes is not None:
params['Attributes'] = attributes
if service_role_arn is not None:
params['ServiceRoleArn'] = service_role_arn
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='UpdateStack',
body=json.dumps(params))
def update_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Updates a specified user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user IAM ARN.
:type ssh_username: string
:param ssh_username: The user's new SSH user name.
:type ssh_public_key: string
:param ssh_public_key: The user's new SSH public key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Managing User Permissions`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='UpdateUserProfile',
body=json.dumps(params))
def update_volume(self, volume_id, name=None, mount_point=None):
"""
Updates an Amazon EBS volume's name or mount point. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type name: string
:param name: The new name.
:type mount_point: string
:param mount_point: The new mount point.
"""
params = {'VolumeId': volume_id, }
if name is not None:
params['Name'] = name
if mount_point is not None:
params['MountPoint'] = mount_point
return self.make_request(action='UpdateVolume',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| 41.972597 | 79 | 0.638578 |
6efd2fc08e3f82e3345e69597451da3b28f309e2 | 12,376 | py | Python | Validation/RecoTau/test/RunValidation_cfg.py | bisnupriyasahu/cmssw | 6cf37ca459246525be0e8a6f5172c6123637d259 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | Validation/RecoTau/test/RunValidation_cfg.py | bisnupriyasahu/cmssw | 6cf37ca459246525be0e8a6f5172c6123637d259 | [
"Apache-2.0"
] | 3 | 2018-08-23T13:40:24.000Z | 2019-12-05T21:16:03.000Z | Validation/RecoTau/test/RunValidation_cfg.py | bisnupriyasahu/cmssw | 6cf37ca459246525be0e8a6f5172c6123637d259 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | from __future__ import print_function
#!/usr/bin/env cmsRun
import shutil
import sys
from Validation.RecoTau.ValidationOptions_cff import *
import Validation.RecoTau.RecoTauValidation_cfi as validation
process = cms.Process("TEST")
# command options defined in Validation/RecoTau/python/ValidationOptions_cfi
options.parseArguments()
checkOptionsForBadInput()
if not calledBycmsRun() and not options.gridJob:
print("Run 'cmsRun RunTauValidation_cfg.py help' for options.")
# quit here so we dont' create a bunch of directories
# if the user only wants the help
#sys.exit()
# Make sure we dont' clobber another directory! Skip in batch mode (runs from an LSF machine)
if not CMSSWEnvironmentIsCurrent() and options.batchNumber == -1 and not options.gridJob:
print("CMSSW_BASE points to a different directory, please rerun cmsenv!")
sys.exit()
# DQM store, PDT sources etc
process.load("Configuration.StandardSequences.Services_cff")
######################################
# #
# Output Info Store #
# #
######################################
"""
Data is stored in
TauID/[EventType]_[DataSource]_[Conditions][label]
"""
#outputDirName = "Validation_%s" % ReleaseVersion
outputDirName = "TauID"
outputDir = os.path.join(os.getcwd(), outputDirName)
# This is the directory where we store the stuff about our current configuration
outputBaseDir = outputDir
subDirName = ""
subDirName += "%s_%s" % (options.eventType, options.dataSource)
if options.conditions != "whatever":
subDirName += "_%s" % options.conditions.replace('::', '_')
if (options.label != "none"):
subDirName += "_" + options.label
outputDir = os.path.join(outputDir, subDirName)
# Store configuration, showtags, etc in a sub directory
configDir = os.path.join(outputDir, "Config")
if os.path.exists(outputDir) and options.batchNumber < 0:# and not options.gridJob:
print("Output directory %s already exists! OK to overwrite?" % outputDir)
while True:
input = raw_input("Please enter [y/n] ")
if (input == 'y'):
break
elif (input == 'n'):
print(" ...exiting.")
sys.exit()
if not os.path.exists(outputDir):
os.makedirs(outputDir)
if not os.path.exists(configDir):
os.makedirs(configDir)
######################################
# #
# Data Source Setup #
# #
######################################
def LoadDataCffFile(theFile):
outputFileName = os.path.join(configDir, "DataSource_cff.py")
process.load(theFile)
outputFile = open(outputFileName,'w')
outputFile.write('import FWCore.ParameterSet.Config as cms\n')
outputFile.write('source = %s\n'%process.source)
process.schedule = cms.Schedule()
# Check if we are simulating events - if so we need to define our generator
if options.dataSource.find('sim') != -1:
if options.eventType == "ZTT":
process.load("Configuration.Generator.ZTT_Tauola_All_hadronic_cfi")
elif options.eventType == "QCD":
process.load("Configuration.Generator.QCDForPF_cfi")
# Run on a RECO (eg RelVal)
if options.dataSource.find('recoFiles') != -1:
myFile = options.sourceFile
if myFile == 'none':
myFile = "Validation.RecoTau.sources.EventSource_%s_RECO_cff" % options.eventType
#myFile = os.path.join(ReleaseBase, "Validation/RecoTau/test", "EventSource_%s_RECO_cff.py" % options.eventType)
LoadDataCffFile(myFile)
if len(process.source.fileNames) == 0 and not options.gridJob:
import Validation.RecoTau.DBSApi_cff as mydbs
if os.path.isfile('SourcesDatabase.xml'):
print("Trying to retrieve the input files from SourcesDatabase.xml...")
xml = open('SourcesDatabase.xml','r')
mydbs.loadXML(xml,options.eventType,process.source)
if len(process.source.fileNames) == 0:
print("Accessing DBS to retrieve the input files...")
mydbs.FillSource(options.eventType,process.source)
if len(process.source.fileNames) == 0:
sys.exit(0)
print(process.source)
# check if we want to rerun PFTau
if options.dataSource.find('PFTau') != -1:
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("RecoTauTag.Configuration.RecoPFTauTag_cff")
process.runPFTau = cms.Path(process.PFTau)
process.schedule.append(process.runPFTau)
if options.dataSource.find('CaloTau') != -1:
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("RecoTauTag.Configuration.RecoTauTag_cff")
process.runCaloTau = cms.Path(process.tautagging)
process.schedule.append(process.runCaloTau)
# Run on DIGI files and re-RECO
elif options.dataSource == 'digiFiles':
myFile = options.sourceFile
if myFile == 'none':
myFile = "Validation.RecoTau.EventSource_%s_DIGI_cff" % options.eventType
#myFile = os.path.join(ReleaseBase, "Validation/RecoTau/test", "EventSource_%s_DIGI_cff.py" % options.eventType)
LoadDataCffFile(myFile)
# get the sequences need to redo RECO
process.load("Validation.RecoTau.ProduceTausFromDigis_cff")
process.makeTausFromDigiFiles = cms.Path(proces.makeTausFromDigis)
process.schedule.append(process.makeTausFromDigiFiles)
# Generate FASTSIM DATA
elif options.dataSource == 'fastsim':
process.load("Validation.RecoTau.ProduceTausWithFastSim_cff")
process.fastSimTaus = cms.Path(process.makeTausWithFastSim)
process.schedule.append(process.fastSimTaus)
# Generate FULLSIM DATA
elif options.dataSource == 'fullsim':
process.load("Validation.RecoTau.ProduceFullSimAndDigisForTaus_cff")
process.load("Validation.RecoTau.ProduceTausFromDigis_cff")
process.fullSimTaus = cms.Path(process.simAndDigitizeForTaus*process.makeTausFromDigis)
process.schedule.append(process.fullSimTaus)
# Specify conditions if desired
if options.conditions != "whatever":
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = options.conditions
# have to set max events here, since it may get written by the
# dataSource cffs
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
# Skip events, if we are running in batch mode on files
if options.batchNumber >= 0 and options.dataSource.find('Files') != -1:
process.source.skipEvents = cms.untracked.uint32(options.batchNumber*options.maxEvents)
######################################
# #
# Validation Setup #
# #
######################################
# Store the tags and CVS diff to the tags, and the current release
# only do this once in a batch job. The additional tar file is a fail safe -
# the parameters shouldn't change in outputDir.
if (options.batchNumber <= 0 ):#and not options.gridJob):
os.system("cd $CMSSW_BASE/src; \
showtags -t -r > showtags.txt; \
cvs -q diff >& diffToTags.patch;\
cvs -q diff -r %s >& diffToVanillaRelease.patch; \
tar -cvzf TagsAndDiff.tar.gz showtags.txt *.patch; \
mv showtags.txt *.patch %s; \
mv TagsAndDiff.tar.gz %s" % (ReleaseVersion, configDir, configDir))
if options.batchNumber >= 0:
# store the batch produced root files in a sub directory
outputDir = os.path.join(outputDir, "BatchJobs")
if not os.path.exists(outputDir):
os.mkdir(outputDir)
#Validation output file
outputFileNameBase = "TauVal_%s" % ReleaseVersion
if options.label != "none":
outputFileNameBase += "_%s" % options.label
outputFileNameBase += "_"
outputFileNameBase += options.eventType
if options.batchNumber >= 0:
outputFileNameBase += "_%i" % options.batchNumber
options.writeEDMFile = options.writeEDMFile.replace(".root", "_%i.root" % options.batchNumber)
outputFileNameBase += ".root"
if validation.StandardMatchingParameters.recoCuts.value() != "" and validation.StandardMatchingParameters.genCuts.value() != "":
print('Matching: cut(s) set to: reco "%s", gen "%s".' % (validation.StandardMatchingParameters.recoCuts.value(), validation.StandardMatchingParameters.genCuts.value()))
else:
if validation.StandardMatchingParameters.recoCuts.value() != "":
print('Matching: reco cut(s) set to: "%s".' % validation.StandardMatchingParameters.recoCuts.value())
if validation.StandardMatchingParameters.genCuts.value() != "":
print('Matching: gen cut(s) set to: "%s".' % validation.StandardMatchingParameters.genCuts.value())
if options.gridJob:
outputFileName = 'TauVal_GridJob.root'
else:
outputFileName = os.path.join(outputDir, outputFileNameBase)
print('The output file will be: '+outputFileName)
if options.gridJob:
cfg=open('./crab.cfg', 'r')
cfgContent=cfg.read()
if cfgContent.find(outputFileName) == -1:
print("ERROR: CRAB output file not matching the grid one!\nexiting...")
sys.exit()
process.saveTauEff = cms.EDAnalyzer("TauDQMSimpleFileSaver",
outputFileName = cms.string(outputFileName)
)
process.load("Validation.RecoTau.dataTypes.ValidateTausOn%s_cff" % options.eventType)
process.validation = cms.Path( process.produceDenominator )#getattr(process,'produceDenominator'+options.eventType) )
if options.batchNumber >= 0 or options.gridJob:
process.validation *= process.runTauValidationBatchMode #in batch mode, the efficiencies are not computed - only the num/denom
else:
process.validation *= process.runTauValidation
process.validation *= process.saveTauEff #save the output
process.schedule.append(process.validation)
if options.batchNumber >= 0:
newSeed = process.RandomNumberGeneratorService.theSource.initialSeed.value() + options.batchNumber
process.RandomNumberGeneratorService.theSource.initialSeed = cms.untracked.uint32(newSeed)
process.RandomNumberGeneratorService.generator.initialSeed = cms.untracked.uint32(newSeed)
print("I'm setting the random seed to ", newSeed)
process.load("RecoTauTag.Configuration.RecoTauTag_EventContent_cff")
TauTagValOutputCommands = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep recoPFCandidates_*_*_*',
'keep *_genParticles*_*_*',
'keep *_ak5GenJets_*_*',
'keep *_tauGenJets*_*_*',
'keep *_selectedGenTauDecays*_*_*'
)
)
TauTagValOutputCommands.outputCommands.extend(process.RecoTauTagRECO.outputCommands)
# talk to output module
if options.writeEDMFile != "":
# Find where the EDM file should be written. This is set by the
# to the working directory when running jobs on lxbatch
try:
edmOutputDir = os.environ['edmOutputDir']
options.writeEDMFile = os.path.join(edmOutputDir, options.writeEDMFile)
except KeyError:
pass
process.out = cms.OutputModule("PoolOutputModule",
TauTagValOutputCommands,
verbose = cms.untracked.bool(False),
fileName = cms.untracked.string (options.writeEDMFile)
)
myOutpath = cms.EndPath(process.out)
process.schedule.append(myOutpath)
if options.myModifications != ['none']:
for aModifier in options.myModifications:
process.load(aModifier.replace('.py',''))
######################################
# #
# CFG dump #
# #
######################################
#process.Timing = cms.Service("Timing",
# useJobReport = cms.untracked.bool(True)
# )
#process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
# useJobReport = cms.untracked.bool(True)
# )
#if grid job end here
if not options.gridJob:
dumpFileName = "cfgDump"
if options.batchNumber >= 0:
dumpFileName += "_"
dumpFileName += str(options.batchNumber)
dumpFileName += ".py"
processDumpFile = open('%s/%s' % (configDir, dumpFileName), 'w')
print(process.dumpPython(), file=processDumpFile)
| 38.197531 | 170 | 0.687217 |
67e0213e566250fa61142a70c41535f7491d394e | 1,069 | py | Python | pulsar/tests/test_integration.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | pulsar/tests/test_integration.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | pulsar/tests/test_integration.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.base.constants import ServiceCheck
from datadog_checks.dev.utils import get_metadata_metrics
from .common import EXPECTED_METRICS, METRICS_URL, OPTIONAL_METRICS
pytestmark = [pytest.mark.usefixtures('dd_environment'), pytest.mark.integration]
def test_check(aggregator, dd_run_check, pulsar_check, instance):
check = pulsar_check(instance)
dd_run_check(check)
aggregator.assert_service_check('pulsar.openmetrics.health', ServiceCheck.OK)
for metric in EXPECTED_METRICS:
aggregator.assert_metric(metric)
aggregator.assert_metric_has_tag(metric, f'endpoint:{METRICS_URL}')
aggregator.assert_metric_has_tag(metric, 'pulsar_cluster:standalone')
for metric in OPTIONAL_METRICS:
aggregator.assert_metric(metric, at_least=0)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
| 35.633333 | 96 | 0.791394 |
14878f31f8efdee12e633a2d2ceb6b32d52486d9 | 807 | py | Python | soma-matrizes.py | DariaMachado/Algoritmos_Python | 1ad8827f1bb0b7298309d457c995fbd5a2c066fd | [
"MIT"
] | 1 | 2021-07-21T01:53:58.000Z | 2021-07-21T01:53:58.000Z | soma-matrizes.py | DariaMachado/Algoritmos_Python | 1ad8827f1bb0b7298309d457c995fbd5a2c066fd | [
"MIT"
] | null | null | null | soma-matrizes.py | DariaMachado/Algoritmos_Python | 1ad8827f1bb0b7298309d457c995fbd5a2c066fd | [
"MIT"
] | null | null | null | m: int; n: int; j: int; i: int
m = int(input("Quantas linhas vai ter cada matriz? "))
n = int(input("Quantas colunas vai ter cada matriz? "))
A: [[int]] = [[0 for x in range(n)] for x in range(m)]
B: [[int]] = [[0 for x in range(n)] for x in range(m)]
C: [[int]] = [[0 for x in range(n)] for x in range(m)]
print("Digite os valores da matriz A:")
for i in range(0, m):
for j in range(0, n):
A[i][j] = int(input(f"Elemento [{i},{j}]: "))
print("Digite os valores da matriz B:")
for i in range(0, m):
for j in range(0, n):
B[i][j] = int(input(f"Elemento [{i},{j}]: "))
for i in range(0, m):
for j in range(0, n):
C[i][j] = A[i][j] + B[i][j]
print("MATRIZ SOMA:")
for i in range(0, m):
for j in range(0, n):
print(f"{C[i][j]} ", end="")
print()
| 22.416667 | 55 | 0.52912 |
482d7b7d50aa628d80a45fc900b480866541b5d4 | 319 | py | Python | 1798_MaximumNumberOfConsecutiveValuesYouCanMake.py | yingzhuo1994/LeetCode | 636eef90867d21e3439d258ec99fbb8e5ad5a742 | [
"MIT"
] | null | null | null | 1798_MaximumNumberOfConsecutiveValuesYouCanMake.py | yingzhuo1994/LeetCode | 636eef90867d21e3439d258ec99fbb8e5ad5a742 | [
"MIT"
] | null | null | null | 1798_MaximumNumberOfConsecutiveValuesYouCanMake.py | yingzhuo1994/LeetCode | 636eef90867d21e3439d258ec99fbb8e5ad5a742 | [
"MIT"
] | null | null | null | class Solution(object):
def getMaximumConsecutive(self, coins):
"""
:type coins: List[int]
:rtype: int
"""
coins.sort()
amount = 0
for coin in coins:
if coin > amount + 1:
break
amount += coin
return amount + 1
| 22.785714 | 43 | 0.460815 |
ad48860d113cf22cfab3960c0135ebcba4a27747 | 1,500 | py | Python | gaservices/utils/gaconstants.py | MicrohexHQ/garecovery | 4455a2fddcd4f15f098138cc1aa74d26fd427a45 | [
"MIT"
] | 1 | 2020-08-27T05:00:42.000Z | 2020-08-27T05:00:42.000Z | gaservices/utils/gaconstants.py | johnmahlon/garecovery | 62aeb66e3fa674843c19a81c45e2ca35a693df97 | [
"MIT"
] | null | null | null | gaservices/utils/gaconstants.py | johnmahlon/garecovery | 62aeb66e3fa674843c19a81c45e2ca35a693df97 | [
"MIT"
] | null | null | null | """ Constant values for recovery/BTC """
import decimal
import sys
PY3 = sys.version_info.major > 2
SATOSHI_PER_BTC = decimal.Decimal(1e8)
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# BIP32 hardened derivation flag
HARDENED = 0x80000000
SUPPORTED_NETWORKS = ['mainnet', 'testnet']
P2PKH_MAINNET = 0x00
P2SH_MAINNET = 0x05
P2PKH_TESTNET = 0x6f
P2SH_TESTNET = 0xc4
ADDR_VERSIONS_MAINNET = [P2PKH_MAINNET, P2SH_MAINNET]
ADDR_VERSIONS_TESTNET = [P2PKH_TESTNET, P2SH_TESTNET]
ADDR_FAMILY_MAINNET = 'bc'
ADDR_FAMILY_TESTNET = 'tb'
def get_address_versions(network):
return {'testnet': ADDR_VERSIONS_TESTNET, 'mainnet': ADDR_VERSIONS_MAINNET}[network]
def get_address_family(network):
return {'testnet': ADDR_FAMILY_TESTNET, 'mainnet': ADDR_FAMILY_MAINNET}[network]
# GreenAddress script type for standard p2sh multisig UTXOs
P2SH_FORTIFIED_OUT = 10
# GreenAddress script type for p2sh-p2wsh multisig segwit UTXOs
P2SH_P2WSH_FORTIFIED_OUT = 14
# GreenAddress xpubs for mainnet/testnet
GA_KEY_DATA_MAINNET = {
'chaincode': 'e9a563d68686999af372a33157209c6860fe79197a4dafd9ec1dbaa49523351d',
'pubkey': '0322c5f5c9c4b9d1c3e22ca995e200d724c2d7d8b6953f7b38fddf9296053c961f',
}
GA_KEY_DATA_TESTNET = {
'chaincode': 'b60befcc619bb1c212732770fe181f2f1aa824ab89f8aab49f2e13e3a56f0f04',
'pubkey': '036307e560072ed6ce0aa5465534fb5c258a2ccfbc257f369e8e7a181b16d897b3',
}
def get_ga_key_data(network):
return {'testnet': GA_KEY_DATA_TESTNET, 'mainnet': GA_KEY_DATA_MAINNET}[network]
| 28.846154 | 88 | 0.806 |
1db728908137b0c1242498dfdb2e794e6ef8b6f9 | 4,278 | py | Python | third_party/WebKit/Source/build/scripts/make_style_builder.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | third_party/WebKit/Source/build/scripts/make_style_builder.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | third_party/WebKit/Source/build/scripts/make_style_builder.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | #!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import css_properties
import in_generator
from name_utilities import lower_first
import template_expander
class StyleBuilderWriter(css_properties.CSSProperties):
filters = {
'lower_first': lower_first,
}
def __init__(self, in_file_path):
super(StyleBuilderWriter, self).__init__(in_file_path)
self._outputs = {('StyleBuilderFunctions.h'): self.generate_style_builder_functions_h,
('StyleBuilderFunctions.cpp'): self.generate_style_builder_functions_cpp,
('StyleBuilder.cpp'): self.generate_style_builder,
}
def set_if_none(property, key, value):
if property[key] is None:
property[key] = value
for property in self._properties.values():
upper_camel = property['upper_camel_name']
set_if_none(property, 'name_for_methods', upper_camel.replace('Webkit', ''))
name = property['name_for_methods']
simple_type_name = str(property['type_name']).split('::')[-1]
set_if_none(property, 'type_name', 'E' + name)
set_if_none(property, 'getter', lower_first(name) if simple_type_name != name else 'get' + name)
set_if_none(property, 'setter', 'set' + name)
set_if_none(property, 'inherited', False)
set_if_none(property, 'initial', 'initial' + name)
if property['custom_all']:
property['custom_initial'] = True
property['custom_inherit'] = True
property['custom_value'] = True
if property['inherited']:
property['is_inherited_setter'] = 'set' + name + 'IsInherited'
property['should_declare_functions'] = not property['use_handlers_for'] and not property['longhands'] \
and not property['direction_aware'] and not property['builder_skip'] \
and not property['descriptor_only']
@template_expander.use_jinja('StyleBuilderFunctions.h.tmpl',
filters=filters)
def generate_style_builder_functions_h(self):
return {
'properties': self._properties,
}
@template_expander.use_jinja('StyleBuilderFunctions.cpp.tmpl',
filters=filters)
def generate_style_builder_functions_cpp(self):
return {
'properties': self._properties,
}
@template_expander.use_jinja('StyleBuilder.cpp.tmpl', filters=filters)
def generate_style_builder(self):
return {
'properties': self._properties,
}
if __name__ == '__main__':
in_generator.Maker(StyleBuilderWriter).main(sys.argv)
| 44.103093 | 115 | 0.679523 |
5128219ea540952a0c85019d777756117a9d97ef | 805 | py | Python | scraper/model/schedule/series.py | mathsdada/mathsdada | dfffcc83bbf2cb68d95978ba6bec000d3ff9d3e7 | [
"MIT"
] | null | null | null | scraper/model/schedule/series.py | mathsdada/mathsdada | dfffcc83bbf2cb68d95978ba6bec000d3ff9d3e7 | [
"MIT"
] | null | null | null | scraper/model/schedule/series.py | mathsdada/mathsdada | dfffcc83bbf2cb68d95978ba6bec000d3ff9d3e7 | [
"MIT"
] | null | null | null | from scraper.common_util import Common
class Series:
def __init__(self, title, link, category):
self.title = title
self.id = Common.get_id_from_link(link)
self.gender = "Men"
self.category = category
if "women" in title.lower():
self.gender = "Women"
self.format = []
self.link = link
self.matches_list = []
self.__extract_series_info()
def __extract_series_info(self):
soup = Common.get_soup_object(self.link)
self.format = soup.find('div', class_='cb-col-100 cb-col cb-nav-main cb-bg-white')\
.find('div').text.split(".")[0]
def add_match(self, match):
self.matches_list.append(match)
def get_matches_list(self):
return self.matches_list
| 28.75 | 91 | 0.598758 |
58f306e9763e48d4243788c1a6f8442a7ea8f559 | 14,677 | py | Python | contrib/migrate.py | TinlokLee/Django-Celery | 320b36ee2091aa876ecf92003af6b8e11b3041ed | [
"Apache-2.0"
] | null | null | null | contrib/migrate.py | TinlokLee/Django-Celery | 320b36ee2091aa876ecf92003af6b8e11b3041ed | [
"Apache-2.0"
] | null | null | null | contrib/migrate.py | TinlokLee/Django-Celery | 320b36ee2091aa876ecf92003af6b8e11b3041ed | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Message migration tools (Broker <-> Broker)."""
from __future__ import absolute_import, print_function, unicode_literals
import socket
from functools import partial
from itertools import cycle, islice
from kombu import Queue, eventloop
from kombu.common import maybe_declare
from kombu.utils.encoding import ensure_bytes
from celery.app import app_or_default
from celery.five import python_2_unicode_compatible, string, string_t
from celery.utils.nodenames import worker_direct
from celery.utils.text import str_to_list
__all__ = (
'StopFiltering', 'State', 'republish', 'migrate_task',
'migrate_tasks', 'move', 'task_id_eq', 'task_id_in',
'start_filter', 'move_task_by_id', 'move_by_idmap',
'move_by_taskmap', 'move_direct', 'move_direct_by_id',
)
MOVING_PROGRESS_FMT = """\
Moving task {state.filtered}/{state.strtotal}: \
{body[task]}[{body[id]}]\
"""
class StopFiltering(Exception):
"""Semi-predicate used to signal filter stop."""
@python_2_unicode_compatible
class State(object):
"""Migration progress state."""
count = 0
filtered = 0
total_apx = 0
@property
def strtotal(self):
if not self.total_apx:
return '?'
return string(self.total_apx)
def __repr__(self):
if self.filtered:
return '^{0.filtered}'.format(self)
return '{0.count}/{0.strtotal}'.format(self)
def republish(producer, message, exchange=None, routing_key=None,
remove_props=['application_headers',
'content_type',
'content_encoding',
'headers']):
"""Republish message."""
body = ensure_bytes(message.body) # use raw message body.
info, headers, props = (message.delivery_info,
message.headers, message.properties)
exchange = info['exchange'] if exchange is None else exchange
routing_key = info['routing_key'] if routing_key is None else routing_key
ctype, enc = message.content_type, message.content_encoding
# remove compression header, as this will be inserted again
# when the message is recompressed.
compression = headers.pop('compression', None)
for key in remove_props:
props.pop(key, None)
producer.publish(ensure_bytes(body), exchange=exchange,
routing_key=routing_key, compression=compression,
headers=headers, content_type=ctype,
content_encoding=enc, **props)
def migrate_task(producer, body_, message, queues=None):
"""Migrate single task message."""
info = message.delivery_info
queues = {} if queues is None else queues
republish(producer, message,
exchange=queues.get(info['exchange']),
routing_key=queues.get(info['routing_key']))
def filter_callback(callback, tasks):
def filtered(body, message):
if tasks and body['task'] not in tasks:
return
return callback(body, message)
return filtered
def migrate_tasks(source, dest, migrate=migrate_task, app=None,
queues=None, **kwargs):
"""Migrate tasks from one broker to another."""
app = app_or_default(app)
queues = prepare_queues(queues)
producer = app.amqp.Producer(dest, auto_declare=False)
migrate = partial(migrate, producer, queues=queues)
def on_declare_queue(queue):
new_queue = queue(producer.channel)
new_queue.name = queues.get(queue.name, queue.name)
if new_queue.routing_key == queue.name:
new_queue.routing_key = queues.get(queue.name,
new_queue.routing_key)
if new_queue.exchange.name == queue.name:
new_queue.exchange.name = queues.get(queue.name, queue.name)
new_queue.declare()
return start_filter(app, source, migrate, queues=queues,
on_declare_queue=on_declare_queue, **kwargs)
def _maybe_queue(app, q):
if isinstance(q, string_t):
return app.amqp.queues[q]
return q
def move(predicate, connection=None, exchange=None, routing_key=None,
source=None, app=None, callback=None, limit=None, transform=None,
**kwargs):
"""Find tasks by filtering them and move the tasks to a new queue.
Arguments:
predicate (Callable): Filter function used to decide the messages
to move. Must accept the standard signature of ``(body, message)``
used by Kombu consumer callbacks. If the predicate wants the
message to be moved it must return either:
1) a tuple of ``(exchange, routing_key)``, or
2) a :class:`~kombu.entity.Queue` instance, or
3) any other true value means the specified
``exchange`` and ``routing_key`` arguments will be used.
connection (kombu.Connection): Custom connection to use.
source: List[Union[str, kombu.Queue]]: Optional list of source
queues to use instead of the default (queues
in :setting:`task_queues`). This list can also contain
:class:`~kombu.entity.Queue` instances.
exchange (str, kombu.Exchange): Default destination exchange.
routing_key (str): Default destination routing key.
limit (int): Limit number of messages to filter.
callback (Callable): Callback called after message moved,
with signature ``(state, body, message)``.
transform (Callable): Optional function to transform the return
value (destination) of the filter function.
Also supports the same keyword arguments as :func:`start_filter`.
To demonstrate, the :func:`move_task_by_id` operation can be implemented
like this:
.. code-block:: python
def is_wanted_task(body, message):
if body['id'] == wanted_id:
return Queue('foo', exchange=Exchange('foo'),
routing_key='foo')
move(is_wanted_task)
or with a transform:
.. code-block:: python
def transform(value):
if isinstance(value, string_t):
return Queue(value, Exchange(value), value)
return value
move(is_wanted_task, transform=transform)
Note:
The predicate may also return a tuple of ``(exchange, routing_key)``
to specify the destination to where the task should be moved,
or a :class:`~kombu.entitiy.Queue` instance.
Any other true value means that the task will be moved to the
default exchange/routing_key.
"""
app = app_or_default(app)
queues = [_maybe_queue(app, queue) for queue in source or []] or None
with app.connection_or_acquire(connection, pool=False) as conn:
producer = app.amqp.Producer(conn)
state = State()
def on_task(body, message):
ret = predicate(body, message)
if ret:
if transform:
ret = transform(ret)
if isinstance(ret, Queue):
maybe_declare(ret, conn.default_channel)
ex, rk = ret.exchange.name, ret.routing_key
else:
ex, rk = expand_dest(ret, exchange, routing_key)
republish(producer, message,
exchange=ex, routing_key=rk)
message.ack()
state.filtered += 1
if callback:
callback(state, body, message)
if limit and state.filtered >= limit:
raise StopFiltering()
return start_filter(app, conn, on_task, consume_from=queues, **kwargs)
def expand_dest(ret, exchange, routing_key):
try:
ex, rk = ret
except (TypeError, ValueError):
ex, rk = exchange, routing_key
return ex, rk
def task_id_eq(task_id, body, message):
"""Return true if task id equals task_id'."""
return body['id'] == task_id
def task_id_in(ids, body, message):
"""Return true if task id is member of set ids'."""
return body['id'] in ids
def prepare_queues(queues):
if isinstance(queues, string_t):
queues = queues.split(',')
if isinstance(queues, list):
queues = dict(tuple(islice(cycle(q.split(':')), None, 2))
for q in queues)
if queues is None:
queues = {}
return queues
class Filterer(object):
def __init__(self, app, conn, filter,
limit=None, timeout=1.0,
ack_messages=False, tasks=None, queues=None,
callback=None, forever=False, on_declare_queue=None,
consume_from=None, state=None, accept=None, **kwargs):
self.app = app
self.conn = conn
self.filter = filter
self.limit = limit
self.timeout = timeout
self.ack_messages = ack_messages
self.tasks = set(str_to_list(tasks) or [])
self.queues = prepare_queues(queues)
self.callback = callback
self.forever = forever
self.on_declare_queue = on_declare_queue
self.consume_from = [
_maybe_queue(self.app, q)
for q in consume_from or list(self.queues)
]
self.state = state or State()
self.accept = accept
def start(self):
# start migrating messages.
with self.prepare_consumer(self.create_consumer()):
try:
for _ in eventloop(self.conn, # pragma: no cover
timeout=self.timeout,
ignore_timeouts=self.forever):
pass
except socket.timeout:
pass
except StopFiltering:
pass
return self.state
def update_state(self, body, message):
self.state.count += 1
if self.limit and self.state.count >= self.limit:
raise StopFiltering()
def ack_message(self, body, message):
message.ack()
def create_consumer(self):
return self.app.amqp.TaskConsumer(
self.conn,
queues=self.consume_from,
accept=self.accept,
)
def prepare_consumer(self, consumer):
filter = self.filter
update_state = self.update_state
ack_message = self.ack_message
if self.tasks:
filter = filter_callback(filter, self.tasks)
update_state = filter_callback(update_state, self.tasks)
ack_message = filter_callback(ack_message, self.tasks)
consumer.register_callback(filter)
consumer.register_callback(update_state)
if self.ack_messages:
consumer.register_callback(self.ack_message)
if self.callback is not None:
callback = partial(self.callback, self.state)
if self.tasks:
callback = filter_callback(callback, self.tasks)
consumer.register_callback(callback)
self.declare_queues(consumer)
return consumer
def declare_queues(self, consumer):
# declare all queues on the new broker.
for queue in consumer.queues:
if self.queues and queue.name not in self.queues:
continue
if self.on_declare_queue is not None:
self.on_declare_queue(queue)
try:
_, mcount, _ = queue(
consumer.channel).queue_declare(passive=True)
if mcount:
self.state.total_apx += mcount
except self.conn.channel_errors:
pass
def start_filter(app, conn, filter, limit=None, timeout=1.0,
ack_messages=False, tasks=None, queues=None,
callback=None, forever=False, on_declare_queue=None,
consume_from=None, state=None, accept=None, **kwargs):
"""Filter tasks."""
return Filterer(
app, conn, filter,
limit=limit,
timeout=timeout,
ack_messages=ack_messages,
tasks=tasks,
queues=queues,
callback=callback,
forever=forever,
on_declare_queue=on_declare_queue,
consume_from=consume_from,
state=state,
accept=accept,
**kwargs).start()
def move_task_by_id(task_id, dest, **kwargs):
"""Find a task by id and move it to another queue.
Arguments:
task_id (str): Id of task to find and move.
dest: (str, kombu.Queue): Destination queue.
**kwargs (Any): Also supports the same keyword
arguments as :func:`move`.
"""
return move_by_idmap({task_id: dest}, **kwargs)
def move_by_idmap(map, **kwargs):
"""Move tasks by matching from a ``task_id: queue`` mapping.
Where ``queue`` is a queue to move the task to.
Example:
>>> move_by_idmap({
... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'),
... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'),
... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')},
... queues=['hipri'])
"""
def task_id_in_map(body, message):
return map.get(body['id'])
# adding the limit means that we don't have to consume any more
# when we've found everything.
return move(task_id_in_map, limit=len(map), **kwargs)
def move_by_taskmap(map, **kwargs):
"""Move tasks by matching from a ``task_name: queue`` mapping.
``queue`` is the queue to move the task to.
Example:
>>> move_by_taskmap({
... 'tasks.add': Queue('name'),
... 'tasks.mul': Queue('name'),
... })
"""
def task_name_in_map(body, message):
return map.get(body['task']) # <- name of task
return move(task_name_in_map, **kwargs)
def filter_status(state, body, message, **kwargs):
print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs))
move_direct = partial(move, transform=worker_direct)
move_direct_by_id = partial(move_task_by_id, transform=worker_direct)
move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct)
move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct)
| 35.366265 | 80 | 0.597057 |
b73a6465a185230781c222b9a44cc9e483c1ecd7 | 128 | py | Python | docs/test/test_docs.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | 63 | 2020-08-15T19:02:06.000Z | 2022-03-29T16:19:00.000Z | docs/test/test_docs.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | 133 | 2020-08-18T03:51:05.000Z | 2022-03-05T13:43:22.000Z | docs/test/test_docs.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | 10 | 2020-08-25T05:19:31.000Z | 2022-02-03T10:33:41.000Z | from unittest import TestCase
class PassingTestCase(TestCase):
def test_should_pass(self):
self.assertTrue(True)
| 16 | 32 | 0.742188 |
8cfb717129028d671892ac27985d2d989ae3eb81 | 11,372 | py | Python | deepmath/deephol/train/architectures.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 830 | 2016-11-07T21:46:27.000Z | 2022-03-23T08:01:03.000Z | deepmath/deephol/train/architectures.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 26 | 2016-11-07T22:06:31.000Z | 2022-02-16T00:18:29.000Z | deepmath/deephol/train/architectures.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 168 | 2016-11-07T21:48:55.000Z | 2022-03-19T02:47:14.000Z | """Architecture functions for HOLparam models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from deepmath.deephol.train import losses
from deepmath.deephol.train import utils
from deepmath.deephol.train import wavenet
FLAGS = tf.flags.FLAGS
TRAIN = tf.estimator.ModeKeys.TRAIN
def get_vocab_embedding(embedding_str, params):
return tf.get_variable(
embedding_str,
shape=(params.vocab_size, params.word_embedding_size),
dtype=tf.float32)
def _pad_up_to(value, size, axis, name=None):
"""Pad a tensor with zeros on the right along axis to a least the given size.
Args:
value: Tensor to pad.
size: Minimum size along axis.
axis: A nonnegative integer.
name: Optional name for this operation.
Returns:
Padded value.
"""
with tf.name_scope(name, 'pad_up_to') as name:
value = tf.convert_to_tensor(value, name='value')
axis = tf.convert_to_tensor(axis, name='axis')
need = tf.nn.relu(size - tf.shape(value)[axis])
ids = tf.stack([tf.stack([axis, 1])])
paddings = tf.sparse_to_dense(ids, tf.stack([tf.rank(value), 2]), need)
padded = tf.pad(value, paddings, name=name)
# Fix shape inference
axis = tf.contrib.util.constant_value(axis)
shape = value.get_shape()
if axis is not None and shape.ndims is not None:
shape = shape.as_list()
shape[axis] = None
padded.set_shape(shape)
return padded
def _pad_to_multiple(value, size, axis, name=None):
"""Pad a tensor with zeros on the right to a multiple of the given size.
Args:
value: Tensor to pad.
size: The result will be a multiple of `size` along `axis`.
axis: A nonnegative integer.
name: Optional name for this operation.
Returns:
Padded value.
"""
with tf.name_scope(name, 'pad_to_multiple') as name:
length = tf.shape(value)[axis]
new_length = length // -size * -size # Round up to multiple of size
return _pad_up_to(value, size=new_length, axis=axis, name=name)
def wavenet_encoding(net, params, mode):
"""Embed a given input tensor using multiple wavenet blocks.
Arguments:
net: input tensor of shape [batch, text_length, word_embedding_size]
params: Hyperparameters.
mode: Estimator mode.
Returns:
output: output tensor of shape [batch, 1, text length, hidden_size]
"""
if params.word_embedding_size != params.hidden_size:
net = tf.layers.dense(net, params.hidden_size, activation=None)
net = _pad_to_multiple(net, 2**params.wavenet_layers, axis=1)
net = tf.expand_dims(net, 2)
if params.input_keep_prob < 1.0 and mode == TRAIN:
net = tf.nn.dropout(
net,
rate=(1.0 - params.input_keep_prob),
noise_shape=(tf.shape(net)[0], tf.shape(net)[1], 1, 1))
layer_keep_prob = params.layer_keep_prob
if mode != TRAIN:
layer_keep_prob = 1.0
for _ in range(params.wavenet_blocks):
net = wavenet.wavenet_block(
net,
num_layers=params.wavenet_layers,
depth=params.wavenet_depth,
comb_weight=params.layer_comb_weight,
keep_prob=layer_keep_prob)
return net
class EncodingSpec(
collections.namedtuple(
'EncodingSpec',
['enc', 'dist', 'pfstate_enc', 'thm_enc', 'att_key_sim'])):
"""Encoding specification.
enc: Encoding of (pfstate, thm), possibly drawn from a learned distribution.
dist: Conditional distribution, trained by a regularizer.
pfstate_enc: Encoding of the proof state (goal only or goal with context).
thm_enc: Encoding of the theorem.
att_key_sim: Similarities of attention keys in the encoder.
"""
__slots__ = ()
def __new__(cls,
enc=None,
dist=None,
pfstate_enc=None,
thm_enc=None,
att_key_sim=None):
return super(EncodingSpec, cls).__new__(cls, enc, dist, pfstate_enc,
thm_enc, att_key_sim)
def dilated_cnn_goal_encoder(features, labels, mode, params, config):
"""Dilated convolution network.
Args:
features: goal and theorem pair. goal_ids has shape [batch_size, length of
longest goal]
labels: labels are unused.
mode: train or eval.
params: hyperparameters
config: configuration object
Returns:
Encoding for the goal. [batch_size * (1 + ratio_neg_examples), hidden_size]
"""
del labels, config # Unused by this encoder
# goal_ids shape is [batch_size, length of goal]
tf.add_to_collection('goal_ids', features['goal_ids'])
goal_embedding = get_vocab_embedding('goal_embedding', params)
# output shape is [batch_size, goal length, word_embedding_size]
goal_net = tf.nn.embedding_lookup(goal_embedding, features['goal_ids'])
tf.add_to_collection('goal_embedding', goal_net)
with tf.variable_scope('goal', reuse=False):
# output shape: [batch_size, 1, goal length, hidden_size]
goal_net = wavenet_encoding(goal_net, params, mode)
# output shape is [batch_size, hidden_size]
goal_net = tf.reduce_max(goal_net, [1, 2])
# The first goal_net in the collection matches the number of unique goals.
# This will be used by the predictor to compute the embedding of the goals.
tf.add_to_collection('goal_net', goal_net)
# The second goal_net in the collection contains duplicates, aligning with the
# number of positive and negative theorems. The predictor will feed this value
# in to compute the score of goal/theorem pairs.
# output shape: [goal_tiling_size * batch_size, hidden_size]
goal_tiling_size = params.ratio_neg_examples + 1
goal_net = tf.tile(goal_net, [goal_tiling_size, 1])
tf.add_to_collection('goal_net', goal_net)
return goal_net
def dilated_cnn_thm_encoder(features, labels, mode, params, config):
"""Dilated convolution network.
Args:
features: goal and theorem pair. thm_ids has shape [batch_size, length of
longest theorem]
labels: labels are unused.
mode: train or eval.
params: hyperparameters
config: configuration object
Returns:
Encoding for the thm, shape [batch_size, hidden_size]
"""
del labels, config # Unused by this encoder
tf.add_to_collection('thm_ids', features['thm_ids'])
# thm_ids shape is [batch_size, length of thm]
if params.thm_vocab is not None:
thm_embedding = get_vocab_embedding('thm_embedding', params)
# output shape is [batch_size, thm length, word_embedding_size]
thm_net = tf.nn.embedding_lookup(thm_embedding, features['thm_ids'])
else:
goal_embedding = get_vocab_embedding('goal_embedding', params)
# output shape is [batch_size, thm length, word_embedding_size]
thm_net = tf.nn.embedding_lookup(goal_embedding, features['thm_ids'])
tf.add_to_collection('thm_embedding', thm_net)
with tf.variable_scope('thm', reuse=False):
# output shape: [batch_size, 1, thm length, hidden_size]
thm_net = wavenet_encoding(thm_net, params, mode)
# output shape is [batch_size, hidden_size]
thm_net = tf.reduce_max(thm_net, [1, 2])
tf.add_to_collection('thm_net', thm_net)
return thm_net
def _concat_net_tac_id(net, labels, params):
"""Concatenate net with one-hot vectors of tac_id."""
if labels is not None:
tac_id = labels['tac_id']
else:
tac_id = tf.tile(tf.constant([-1]), [tf.shape(net)[0]])
tf.add_to_collection('label_tac_id', tac_id)
# shape: [batch_size, num_tactics]
label_tac_one_hot = tf.one_hot(tac_id, params.num_tactics)
tf.add_to_collection('label_tac_one_hot', label_tac_one_hot)
# shape: [batch_size, hidden_size + num_tactics]
net = tf.concat([net, tf.to_float(label_tac_one_hot)], axis=1)
tf.add_to_collection('pfstate_and_tac', net)
return net
def dilated_cnn_pairwise_encoder(features, labels, mode, params, config):
"""Dilated convolution network for goal_ids and thm_ids.
Follows Estimator signature.
Args:
features: only the goal (represented as token ids) is used.
labels: tactic id label is used for PARAMETERS_CONDITIONED_ON_TAC.
mode: dropout only in mode train.
params: hyperparameters
config: configuration object
Returns:
encoding_distribution: A normal distribution that can be sampled from.
"""
params = utils.Params(params)
with tf.variable_scope('dilated_cnn_pairwise_encoder'):
goal_net = dilated_cnn_goal_encoder(features, labels, mode, params, config)
thm_net = dilated_cnn_thm_encoder(features, labels, mode, params, config)
spec = EncodingSpec(pfstate_enc=goal_net, thm_enc=thm_net)
# Concatenate theorem encoding, goal encoding, their dot product.
# This attention-style concatenation performed well in language models.
# Output shape: [batch_size, 3 * hidden_size]
net = tf.concat([
spec.pfstate_enc, spec.thm_enc,
tf.multiply(spec.pfstate_enc, spec.thm_enc)
], -1)
if params.parameters_conditioned_on_tac:
# Concatenate one-hot encoding of tac_ids.
net = _concat_net_tac_id(net, labels, params)
if mode == TRAIN:
net = tf.nn.dropout(net, rate=(1 - params.thm_keep_prob))
net = tf.layers.dense(net, params.hidden_size, activation=tf.nn.relu)
if mode == TRAIN:
net = tf.nn.dropout(net, rate=(1 - params.thm_keep_prob))
net = tf.layers.dense(net, params.hidden_size, activation=tf.nn.relu)
if mode == TRAIN:
net = tf.nn.dropout(net, rate=(1 - params.thm_keep_prob))
net = tf.layers.dense(net, params.hidden_size, activation=tf.nn.relu)
tf.add_to_collection('thm_goal_fc', net)
return EncodingSpec(
enc=net, dist=None, pfstate_enc=spec.pfstate_enc, thm_enc=spec.thm_enc)
def tactic_classifier(encoding_spec, labels, mode, params, config):
"""Given a proof state encoding, compute tactic logits."""
del config # Unused
if encoding_spec.pfstate_enc is not None:
# If negative examples were added, use only goal encodings.
net = encoding_spec.pfstate_enc
else:
net = encoding_spec.enc
# Shape: 2D [batch_size, hidden_size]
tf.add_to_collection('tactic_net', net)
if mode == TRAIN:
net = tf.nn.dropout(net, rate=(1 - params.tac_keep_prob))
net = tf.layers.dense(net, params.hidden_size, activation=tf.nn.relu)
if mode == TRAIN:
net = tf.nn.dropout(net, rate=(1 - params.tac_keep_prob))
net = tf.layers.dense(net, params.hidden_size, activation=tf.nn.relu)
if mode == TRAIN:
net = tf.nn.dropout(net, rate=(1 - params.tac_keep_prob))
tactic_logits = tf.layers.dense(net, params.num_tactics, activation=None)
# Shape: 2D [batch_size, num_tactics]
tf.add_to_collection('tactic_logits', tactic_logits)
predictions = losses.tactic_predictions(tactic_logits, labels, mode, params)
eval_metric_ops = losses.add_tactic_losses(predictions, labels, params)
return predictions, eval_metric_ops
def pairwise_scorer(encoding_spec, labels, mode, params):
"""Given a (pfstate, thm) encoding, computes thm parameter scores."""
del mode # Unused in this scorer.
net = encoding_spec.enc
# Shape: 2D [batch_size, hidden_size]
logits = tf.layers.dense(net, 1, activation=None)
# Shape: 2D [batch_size, 1]
tf.add_to_collection('pairwise_score', logits)
predictions = losses.pairwise_predictions(logits, labels, params)
eval_metric_ops = losses.add_pairwise_losses(predictions, params)
return predictions, eval_metric_ops
| 35.20743 | 80 | 0.715002 |
2b92fc166e949f2f39bd84ada1c9d6a71f39e921 | 6,298 | py | Python | fetcher/integration_tests/fetcher_dispatcher/blackbox/test_fetcher.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | 6 | 2020-09-29T09:03:04.000Z | 2022-03-14T06:52:25.000Z | fetcher/integration_tests/fetcher_dispatcher/blackbox/test_fetcher.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | null | null | null | fetcher/integration_tests/fetcher_dispatcher/blackbox/test_fetcher.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | 4 | 2020-10-01T07:49:22.000Z | 2021-06-16T19:44:12.000Z | import dataclasses
import pytest
from kafka import KafkaProducer, KafkaConsumer
from time import time
from typing import Callable
from bai_kafka_utils.cmd_callback import KafkaCommandCallback
from bai_kafka_utils.events import (
BenchmarkEvent,
DownloadableContent,
BenchmarkDoc,
FetcherPayload,
FetchedType,
FetcherStatus,
Status,
)
from bai_kafka_utils.integration_tests.test_loop import (
CombinedFilter,
wait_for_response,
EventFilter,
get_is_status_filter,
get_is_command_return_filter,
get_cancel_event,
)
from bai_kafka_utils.kafka_service import KafkaServiceConfig
DataSetFilter = Callable[[DownloadableContent], bool]
# Should be successful in any environment - has delay of 10s for consumer group to setup
EXISTING_CONTENT_WITH_DELAY = "http://files.grouplens.org/datasets/movielens/ml-1m.zip?delay"
# Should fail in any environment - has delay of 10s for consumer group to setup
FAILING_CONTENT_WITH_DELAY = "http://files.grouplens.org/datasets/movielens/fail.zip?delay"
def get_salted_src(src: str) -> str:
cur_time = time()
return f"{src}?time={cur_time}"
def get_fetcher_benchmark_event(template_event: BenchmarkEvent, dataset_src: str, model_src: str):
doc = BenchmarkDoc({"var": "val"}, "var = val", "")
datasets = [] if not dataset_src else [DownloadableContent(src=get_salted_src(dataset_src), path="/mount/path")]
models = [] if not model_src else [DownloadableContent(src=get_salted_src(model_src), path="/mount/path")]
fetch_payload = FetcherPayload(toml=doc, datasets=datasets, models=models)
return dataclasses.replace(template_event, payload=fetch_payload)
def is_dataset_successful(content: DownloadableContent) -> bool:
return content.dst is not None and content.type == FetchedType.FILE and content.status == FetcherStatus.DONE
def get_is_fetch_response_filter(src_event: BenchmarkEvent, kafka_service_config: KafkaServiceConfig) -> EventFilter:
src_to_check = (src_event.payload.datasets or src_event.payload.models)[0].src
def filter_fetcher_event(event: BenchmarkEvent) -> bool:
return (
event.type == kafka_service_config.producer_topic
and isinstance(event.payload, FetcherPayload)
and any(
data_set.src == src_to_check and is_dataset_successful(data_set)
for data_set in event.payload.datasets + event.payload.models
)
)
return filter_fetcher_event
@pytest.mark.parametrize(
"dataset_src,model_src,expected_status",
[
# data set download only
(EXISTING_CONTENT_WITH_DELAY, None, Status.SUCCEEDED),
(FAILING_CONTENT_WITH_DELAY, None, Status.FAILED),
# model download only
(None, EXISTING_CONTENT_WITH_DELAY, Status.SUCCEEDED),
(None, FAILING_CONTENT_WITH_DELAY, Status.FAILED),
# both data set and model downloads
(EXISTING_CONTENT_WITH_DELAY, EXISTING_CONTENT_WITH_DELAY, Status.SUCCEEDED),
(FAILING_CONTENT_WITH_DELAY, EXISTING_CONTENT_WITH_DELAY, Status.FAILED),
(EXISTING_CONTENT_WITH_DELAY, FAILING_CONTENT_WITH_DELAY, Status.FAILED),
],
ids=[
"dataset_only_successful",
"dataset_only_failing",
"model_only_successful",
"model_only_failing",
"dataset_and_model_successful",
"dataset_failing_model_successful",
"dataset_successful_model_failing",
],
)
def test_fetcher(
benchmark_event_dummy_payload: BenchmarkEvent,
kafka_producer_to_consume: KafkaProducer,
kafka_consumer_of_produced: KafkaConsumer,
kafka_service_config: KafkaServiceConfig,
dataset_src: str,
model_src: str,
expected_status: Status,
):
benchmark_event = send_salted_fetch_request(
benchmark_event_dummy_payload,
kafka_producer_to_consume,
kafka_service_config.consumer_topic,
dataset_src,
model_src,
)
status_event_filter = get_is_status_filter(benchmark_event, expected_status, kafka_service_config)
filters = [status_event_filter]
if expected_status == Status.SUCCEEDED:
fetcher_event_filter = get_is_fetch_response_filter(benchmark_event, kafka_service_config)
filters.append(fetcher_event_filter)
combined_filter = CombinedFilter(filters)
return wait_for_response(combined_filter, kafka_consumer_of_produced)
@pytest.mark.parametrize(
"dataset_src,model_src",
[
# data set download only
(EXISTING_CONTENT_WITH_DELAY, None),
# models only
(None, EXISTING_CONTENT_WITH_DELAY),
# both data set and model downloads
(EXISTING_CONTENT_WITH_DELAY, EXISTING_CONTENT_WITH_DELAY),
],
ids=["dataset_only", "model_only", "dataset_and_model"],
)
def test_cancel(
benchmark_event_dummy_payload: BenchmarkEvent,
kafka_producer_to_consume: KafkaProducer,
kafka_consumer_of_produced: KafkaConsumer,
kafka_service_config: KafkaServiceConfig,
dataset_src,
model_src,
):
benchmark_event = send_salted_fetch_request(
benchmark_event_dummy_payload,
kafka_producer_to_consume,
kafka_service_config.consumer_topic,
dataset_src=dataset_src,
model_src=model_src,
)
cancel_event = get_cancel_event(benchmark_event, kafka_service_config.cmd_submit_topic)
kafka_producer_to_consume.send(
kafka_service_config.cmd_submit_topic, value=cancel_event, key=cancel_event.client_id
)
status_event_filter = get_is_status_filter(benchmark_event, Status.CANCELED, kafka_service_config)
command_return_filter = get_is_command_return_filter(
cancel_event, KafkaCommandCallback.CODE_SUCCESS, kafka_service_config
)
combined_filter = CombinedFilter([status_event_filter, command_return_filter])
return wait_for_response(combined_filter, kafka_consumer_of_produced)
def send_salted_fetch_request(benchmark_event_dummy_payload, kafka_producer_to_consume, topic, dataset_src, model_src):
benchmark_event = get_fetcher_benchmark_event(benchmark_event_dummy_payload, dataset_src, model_src)
print(f"Sending event {benchmark_event}")
kafka_producer_to_consume.send(topic, value=benchmark_event, key=benchmark_event.client_id)
return benchmark_event
| 36.830409 | 119 | 0.753096 |
2618abe83f197447a187e7458582eaab4de9c3e9 | 292,424 | py | Python | headers/header_operations.py | Ikaguia/LWBR-WarForge | 0099fe20188b2dbfff237e8690ae54c33671656f | [
"Unlicense"
] | null | null | null | headers/header_operations.py | Ikaguia/LWBR-WarForge | 0099fe20188b2dbfff237e8690ae54c33671656f | [
"Unlicense"
] | null | null | null | headers/header_operations.py | Ikaguia/LWBR-WarForge | 0099fe20188b2dbfff237e8690ae54c33671656f | [
"Unlicense"
] | null | null | null | ################################################################################
# header_operations expanded v.1.0.0 #
################################################################################
# TABLE OF CONTENTS
################################################################################
#
# [ Z00 ] Introduction and Credits.
# [ Z01 ] Operation Modifiers.
# [ Z02 ] Flow Control.
# [ Z03 ] Mathematical Operations.
# [ Z04 ] Script/Trigger Parameters and Results.
# [ Z05 ] Keyboard and Mouse Input.
# [ Z06 ] World Map.
# [ Z07 ] Game Settings.
# [ Z08 ] Factions.
# [ Z09 ] Parties and Party Templates.
# [ Z10 ] Troops.
# [ Z11 ] Quests.
# [ Z12 ] Items.
# [ Z13 ] Sounds and Music Tracks.
# [ Z14 ] Positions.
# [ Z15 ] Game Notes.
# [ Z16 ] Tableaus and Heraldics.
# [ Z17 ] String Operations.
# [ Z18 ] Output And Messages.
# [ Z19 ] Game Control: Screens, Menus, Dialogs and Encounters.
# [ Z20 ] Scenes and Missions.
# [ Z21 ] Scene Props and Prop Instances.
# [ Z22 ] Agents and Teams.
# [ Z23 ] Presentations.
# [ Z24 ] Multiplayer And Networking.
# [ Z25 ] Remaining Esoteric Stuff.
# [ Z26 ] Hardcoded Compiler-Related Code.
#
################################################################################
################################################################################
# [ Z00 ] INTRODUCTION AND CREDITS
################################################################################
# Everyone who has ever tried to mod Mount&Blade games knows perfectly well,
# that the documentation for it's Module System is severely lacking. Warband
# Module System, while introducing many new and useful operations, did not
# improve considerably in the way of documentation. What's worse, a number of
# outright errors and inconsistencies appeared between what was documented in
# the comments to the header_operations.py file (which was the root source of
# all Warband scripting documentation, whether you like it or not), and what
# was actually implemented in the game engine.
# Sooner or later someone was bound to dedicate some time and effort to fix
# this problem by properly documenting the file. It just so happened that I
# was the first person crazy enough to accept the challenge.
# I have tried to make this file a self-sufficient source of information on
# every operation that the Warband scripting engine knows of. Naturally I
# failed - there are still many operations for which there is simply not
# enough information, or operations with effects that have not yet been
# thoroughly tested and confirmed. But as far as I know, there is currently
# no other reference more exhaustive than this. I tried to make the file
# useful to both seasoned scripters and complete newbies, and to a certain
# degree this file can even serve as a tutorial into Warband scripting -
# though it still won't replace the wealth of tutorials produced by the
# Warband modding community.
# I really hope you will find it useful as well.
# Alexander Lomski AKA Lav. Jan 18th, 2012.
# And the credits.
# First of all, I should credit Taleworlds for the creation of this game and
# it's Module System. Without them, I wouldn't be able to work on this file
# so even though I'm often sceptical about their programming style and quality
# of their code, they still did a damn good job delivering this game to all
# of us.
# And then I should credit many members from the Warband modding community
# who have shared their knowledge and helped me clear out many uncertainties
# and inconsistencies. Special credits (in no particular order) go to
# cmpxchg8b, Caba'drin, SonKidd, MadVader, dunde, Ikaguia, MadocComadrin,
# Cjkjvfnby, shokkueibu.
################################################################################
# [ Z01 ] OPERATION MODIFIERS
################################################################################
neg = 0x80000000 # (neg|<operation_name>, ...),
# Used in combination with conditional operations to invert their results.
this_or_next = 0x40000000 # (this_or_next|<operation_name>, ...),
# Used in combination with conditional operations to group them into OR blocks.
################################################################################
# [ Z02 ] FLOW CONTROL
################################################################################
call_script = 1 # (call_script, <script_id>, [<script_param>...]),
# Calls specified script with or without parameters.
try_begin = 4 # (try_begin),
# Opens a conditional block.
else_try = 5 # (else_try),
# If conditional operations in the conditional block fail, this block of code will be executed.
else_try_begin = 5 # (else_try_begin),
# Deprecated form of (else_try).
try_end = 3 # (try_end),
# Concludes a conditional block or a cycle.
end_try = 3 # (end_try),
# Deprecated form of (try_end),
try_for_range = 6 # (try_for_range, <destination>, <lower_bound>, <upper_bound>),
# Runs a cycle, iterating the value in the <lower_bound>..<upper_bound>-1 range.
try_for_range_backwards = 7 # (try_for_range_backwards, <destination>, <lower_bound>, <upper_bound>),
# Same as above, but iterates the value in the opposite direction (from higher values to lower).
try_for_parties = 11 # (try_for_parties, <destination>),
# Runs a cycle, iterating all parties on the map.
try_for_agents = 12 # (try_for_agents, <destination>),
# Runs a cycle, iterating all agents on the scene.
try_for_prop_instances = 16 # (try_for_prop_instances, <destination>, [<scene_prop_id>]),
# Version 1.161+. Runs a cycle, iterating all scene prop instances on the scene, or all scene prop instances of specific type if optional parameter is provided.
try_for_players = 17 # (try_for_players, <destination>, [skip_server]),
# Version 1.165+. Iterates through all players in a multiplayer game. Set optional parameter to 1 to skip server player entry.
################################################################################
# [ Z03 ] MATHEMATICAL OPERATIONS
################################################################################
# Mathematical operations deal with numbers. Warband Module System can only
# deal with integers. Floating point numbers are emulated by the so-called
# "fixed point numbers". Wherever you encounter a fixed point parameter for
# some Module System operation, keep in mind that it is actually just a
# regular integer number, HOWEVER it is supposed to represent a floating
# point number equal to fixed_point_number / fixed_point_multiplier. As you
# might have guessed, to convert a floating point number to fixed point, you
# have to multiply it by fixed_point_multiplier. You can change the value of
# multiplier with the operation (set_fixed_point_multiplier), thus influencing
# the precision of all operations dealing with fixed point numbers.
# A notion very important for Warband modding is that you reference all
# Warband objects by their numeric values. In other words, you can do maths
# with your items, troops, agents, scenes, parties et cetera. This is used
# extensively in the code, so don't be surprised to see code looking like
# (store_add, ":value", "itm_pike", 4). This code is just calculating a
# reference to an item which is located 4 positions after "itm_pike" inside
# the module_items.py file.
# Conditional operations
gt = 32 # (gt, <value1>, <value2>),
# Checks that value1 > value2
ge = 30 # (ge, <value1>, <value2>),
# Checks that value1 >= value2
eq = 31 # (eq, <value1>, <value2>),
# Checks that value1 == value2
neq = neg|eq # (neq, <value1>, <value2>),
# Checks that value1 != value2
le = neg|gt # (le, <value1>, <value2>),
# Checks that value1 <= value2
lt = neg|ge # (lt, <value1>, <value2>),
# Checks that value1 < value2
is_between = 33 # (is_between, <value>, <lower_bound>, <upper_bound>),
# Checks that lower_bound <= value < upper_bound
# Mathematical and assignment operations
assign = 2133 # (assign, <destination>, <value>),
# Directly assigns a value to a variable or register.
store_add = 2120 # (store_add, <destination>, <value>, <value>),
# Assigns <destination> := <value> + <value>
store_sub = 2121 # (store_sub, <destination>, <value>, <value>),
# Assigns <destination> := <value> - <value>
store_mul = 2122 # (store_mul, <destination>, <value>, <value>),
# Assigns <destination> := <value> * <value>
store_div = 2123 # (store_div, <destination>, <value>, <value>),
# Assigns <destination> := <value> / <value>
store_mod = 2119 # (store_mod, <destination>, <value>, <value>),
# Assigns <destination> := <value> MOD <value>
val_add = 2105 # (val_add, <destination>, <value>),
# Assigns <destination> := <destination> + <value>
val_sub = 2106 # (val_sub, <destination>, <value>),
# Assigns <destination> := <destination> - <value>
val_mul = 2107 # (val_mul, <destination>, <value>),
# Assigns <destination> := <destination> * <value>
val_div = 2108 # (val_div, <destination>, <value>),
# Assigns <destination> := <destination> / <value>
val_mod = 2109 # (val_mod, <destination>, <value>),
# Assigns <destination> := <destination> MOD <value>
val_min = 2110 # (val_min, <destination>, <value>),
# Assigns <destination> := MIN (<destination>, <value>)
val_max = 2111 # (val_max, <destination>, <value>),
# Assigns <destination> := MAX (<destination>, <value>)
val_clamp = 2112 # (val_clamp, <destination>, <lower_bound>, <upper_bound>),
# Enforces <destination> value to be within <lower_bound>..<upper_bound>-1 range.
val_abs = 2113 # (val_abs, <destination>),
# Assigns <destination> := ABS (<destination>)
store_or = 2116 # (store_or, <destination>, <value>, <value>),
# Binary OR
store_and = 2117 # (store_and, <destination>, <value>, <value>),
# Binary AND
val_or = 2114 # (val_or, <destination>, <value>),
# Binary OR, overwriting first operand.
val_and = 2115 # (val_and, <destination>, <value>),
# Binary AND, overwriting first operand.
val_lshift = 2100 # (val_lshift, <destination>, <value>),
# Bitwise shift left (dest = dest * 2 ^ value)
val_rshift = 2101 # (val_rshift, <destination>, <value>),
# Bitwise shift right (dest = dest / 2 ^ value)
store_sqrt = 2125 # (store_sqrt, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := SQRT (value)
store_pow = 2126 # (store_pow, <destination_fixed_point>, <value_fixed_point>, <power_fixed_point),
# Assigns dest := value ^ power
store_sin = 2127 # (store_sin, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := SIN (value)
store_cos = 2128 # (store_cos, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := COS (value)
store_tan = 2129 # (store_tan, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := TAN (value)
store_asin = 2140 # (store_asin, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := ARCSIN (value)
store_acos = 2141 # (store_acos, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := ARCCOS (value)
store_atan = 2142 # (store_atan, <destination_fixed_point>, <value_fixed_point>),
# Assigns dest := ARCTAN (value)
store_atan2 = 2143 # (store_atan2, <destination_fixed_point>, <y_fixed_point>, <x_fixed_point>),
# Returns the angle between the x axis and a point with coordinates (X,Y) in degrees. Note the angle is calculated counter-clockwise, i.e. (1,1) will return 45, not -45.
# Random number generation
store_random = 2135 # (store_random, <destination>, <upper_range>),
# Stores a random value in the range of 0..<upper_range>-1. Deprecated, use (store_random_in_range) instead.
store_random_in_range = 2136 # (store_random_in_range, <destination>, <range_low>, <range_high>),
# Stores a random value in the range of <range_low>..<range_high>-1.
shuffle_range = 2134 # (shuffle_range, <reg_no>, <reg_no>),
# Randomly shuffles a range of registers, reordering the values contained in them. Commonly used for list randomization.
# Fixed point values handling
set_fixed_point_multiplier = 2124 # (set_fixed_point_multiplier, <value>),
# Affects all operations dealing with fixed point numbers. Default value is 1.
convert_to_fixed_point = 2130 # (convert_to_fixed_point, <destination_fixed_point>),
# Converts integer value to fixed point (multiplies by the fixed point multiplier).
convert_from_fixed_point = 2131 # (convert_from_fixed_point, <destination>),
# Converts fixed point value to integer (divides by the fixed point multiplier).
################################################################################
# [ Z04 ] SCRIPT/TRIGGER PARAMETERS AND RESULTS
################################################################################
# Many scripts can accept additional parameters, and many triggers have some
# parameters of their own (as details in header_triggers.py file). You can
# only pass numeric values as parameters. Since string constants are also
# Warband objects, you can pass them as well, and you can also pass string
# or position registers. However you cannot pass quick strings (string
# defined directly in the code).
# You can declare your scripts with as many parameters as you wish. Triggers,
# however, are always called with their predefined parameters. Also the game
# engine does not support more than 3 parameters per trigger. As the result,
# some triggers receive extra information which could not be fit into those
# three parameters in numeric, string or position registers.
# Some triggers and scripts called from the game engine (those have names
# starting with "game_") expect you to return some value to the game engine.
# That value may be either a number or a string and is set by special
# operations listed below. Scripts called from the Module System, however,
# typically use registers to store their return data.
# Note that if you call a script from a trigger, you can still use operations
# to retrieve trigger's calling parameters, and they will retrieve values that
# have been passed to the trigger, not values that have been passed to the
# script.
store_script_param_1 = 21 # (store_script_param_1, <destination>),
# Retrieve the value of the first script parameter.
store_script_param_2 = 22 # (store_script_param_2, <destination>),
# Retrieve the value of the second script parameter.
store_script_param = 23 # (store_script_param, <destination>, <script_param_index>),
# Retrieve the value of arbitrary script parameter (generally used when script accepts more than two). Parameters are enumerated starting from 1.
set_result_string = 60 # (set_result_string, <string>),
# Sets the return value of a game_* script, when a string value is expected by game engine.
store_trigger_param_1 = 2071 # (store_trigger_param_1, <destination>),
# Retrieve the value of the first trigger parameter. Will retrieve trigger's parameters even when called from inside a script, for as long as that script is running within trigger context.
store_trigger_param_2 = 2072 # (store_trigger_param_2, <destination>),
# Retrieve the value of the second trigger parameter. Will retrieve trigger's parameters even when called from inside a script, for as long as that script is running within trigger context.
store_trigger_param_3 = 2073 # (store_trigger_param_3, <destination>),
# Retrieve the value of the third trigger parameter. Will retrieve trigger's parameters even when called from inside a script, for as long as that script is running within trigger context.
store_trigger_param = 2070 # (store_trigger_param, <destination>, <trigger_param_no>),
# Version 1.153+. Retrieve the value of arbitrary trigger parameter. Parameters are enumerated starting from 1. Note that despite the introduction of this operation, there's not a single trigger with more than 3 parameters.
get_trigger_object_position = 702 # (get_trigger_object_position, <position>),
# Retrieve the position of an object which caused the trigger to fire (when appropriate).
set_trigger_result = 2075 # (set_trigger_result, <value>),
# Sets the return value of a trigger or game_* script, when an integer value is expected by game engine.
################################################################################
# [ Z05 ] KEYBOARD AND MOUSE INPUT
################################################################################
# The game provides modders with limited ability to control keyboard input and
# mouse movements. It is also possible to tamper with game keys (i.e. keys
# bound to specific game actions), including the ability to override game's
# reaction to those keys. Note that mouse buttons are keys too, and can be
# detected with the corresponding operations.
# Conditional operations
key_is_down = 70 # (key_is_down, <key_code>),
# Checks that the specified key is currently pressed. See header_triggers.py for key code reference.
key_clicked = 71 # (key_clicked, <key_code>),
# Checks that the specified key has just been pressed. See header_triggers.py for key code reference.
game_key_is_down = 72 # (game_key_is_down, <game_key_code>),
# Checks that the specified game key is currently pressed. See header_triggers.py for game key code reference.
game_key_clicked = 73 # (game_key_clicked, <game_key_code>),
# Checks that the specified key has just been pressed. See header_triggers.py for game key code reference.
# Generic operations
omit_key_once = 77 # (omit_key_once, <key_code>),
# Forces the game to ignore default bound action for the specified game key on current game frame.
clear_omitted_keys = 78 # (clear_omitted_keys),
# Commonly called when exiting from a presentation which made any calls to (omit_key_once). However the effects of those calls disappear by the next frame, so apparently usage of this operation is not necessary. It is still recommended to be on the safe side though.
mouse_get_position = 75 # (mouse_get_position, <position>),
# Stores mouse x and y coordinates in the specified position.
################################################################################
# [ Z06 ] WORLD MAP
################################################################################
# Generally, all operations which only make sense on the worldmap and have no
# specific category have been assembled here. These mostly deal with weather,
# time and resting.
# Conditional operations
is_currently_night = 2273 # (is_currently_night),
# Checks that it's currently night in the game.
map_free = 37 # (map_free),
# Checks that the player is currently on the global map and no game screens are open.
# Weather-handling operations
get_global_cloud_amount = 90 # (get_global_cloud_amount, <destination>),
# Returns current cloudiness (a value between 0..100).
set_global_cloud_amount = 91 # (set_global_cloud_amount, <value>),
# Sets current cloudiness (value is clamped to 0..100).
get_global_haze_amount = 92 # (get_global_haze_amount, <destination>),
# Returns current fogginess (value between 0..100).
set_global_haze_amount = 93 # (set_global_haze_amount, <value>),
# Sets current fogginess (value is clamped to 0..100).
# Time-related operations
store_current_hours = 2270 # (store_current_hours, <destination>),
# Stores number of hours that have passed since beginning of the game. Commonly used to track time when accuracy up to hours is required.
store_time_of_day = 2271 # (store_time_of_day, <destination>),
# Stores current day hour (value in 0..24 range).
store_current_day = 2272 # (store_current_day, <destination>),
# Stores number of days that have passed since beginning of the game. Commonly used to track time when high accuracy is not required.
rest_for_hours = 1030 # (rest_for_hours, <rest_time_in_hours>, [time_speed_multiplier], [remain_attackable]),
# Forces the player party to rest for specified number of hours. Time can be accelerated and player can be made immune or subject to attacks.
rest_for_hours_interactive = 1031 # (rest_for_hours_interactive, <rest_time_in_hours>, [time_speed_multiplier], [remain_attackable]),
# Forces the player party to rest for specified number of hours. Player can break the rest at any moment. Time can be accelerated and player can be made immune or subject to attacks.
################################################################################
# [ Z07 ] GAME SETTINGS AND STATISTICS
################################################################################
# This group of operations allows you to retrieve some of the game settings
# as configured by the player on Options page, and change them as necessary
# (possibly forcing a certain level of difficulty on the player). Operations
# dealing with achievements (an interesting, but underdeveloped feature of
# Warband) are also placed in this category.
# Conditional operations
is_trial_version = 250 # (is_trial_version),
# Checks if the game is in trial mode (has not been purchased). Player cannot get higher than level 6 in this mode.
is_edit_mode_enabled = 255 # (is_edit_mode_enabled),
# Version 1.153+. Checks that Edit Mode is currently enabled in the game.
# Generic operations
get_operation_set_version = 55 # (get_operation_set_version, <destination>),
# Version 1.165+. 4research. Apparently returns the current version of Module System operations set, allowing transparent support for multiple Warband engine versions.
set_player_troop = 47 # (set_player_troop, <troop_id>),
# Changes the troop player controls. Generally used in quick-battle scenarios to give player a predefined character.
show_object_details_overlay = 960 # (show_object_details_overlay, <value>),
# Turns various popup tooltips on (value = 1) and off (value = 0). This includes agent names and dropped item names during missions, item stats in inventory on mouse over, etc.
auto_save = 985 # (auto_save),
# Version 1.161+. Saves the game to the current save slot.
# Access to game options
options_get_damage_to_player = 260 # (options_get_damage_to_player, <destination>),
# 0 = 1/4, 1 = 1/2, 2 = 1/1
options_set_damage_to_player = 261 # (options_set_damage_to_player, <value>),
# 0 = 1/4, 1 = 1/2, 2 = 1/1
options_get_damage_to_friends = 262 # (options_get_damage_to_friends, <destination>),
# 0 = 1/2, 1 = 3/4, 2 = 1/1
options_set_damage_to_friends = 263 # (options_set_damage_to_friends, <value>),
# 0 = 1/2, 1 = 3/4, 2 = 1/1
options_get_combat_ai = 264 # (options_get_combat_ai, <destination>),
# 0 = good, 1 = average, 2 = poor
options_set_combat_ai = 265 # (options_set_combat_ai, <value>),
# 0 = good, 1 = average, 2 = poor
game_get_reduce_campaign_ai = 424 # (game_get_reduce_campaign_ai, <destination>),
# Deprecated operation. Use options_get_campaign_ai instead
options_get_campaign_ai = 266 # (options_get_campaign_ai, <destination>),
# 0 = good, 1 = average, 2 = poor
options_set_campaign_ai = 267 # (options_set_campaign_ai, <value>),
# 0 = good, 1 = average, 2 = poor
options_get_combat_speed = 268 # (options_get_combat_speed, <destination>),
# 0 = slowest, 1 = slower, 2 = normal, 3 = faster, 4 = fastest
options_set_combat_speed = 269 # (options_set_combat_speed, <value>),
# 0 = slowest, 1 = slower, 2 = normal, 3 = faster, 4 = fastest
options_get_battle_size = 270 # (options_get_battle_size, <destination>),
# Version 1.161+. Retrieves current battle size slider value (in the range of 0..1000). Note that this is the slider value, not the battle size itself.
options_set_battle_size = 271 # (options_set_battle_size, <value>),
# Version 1.161+. Sets battle size slider to provided value (in the range of 0..1000). Note that this is the slider value, not the battle size itself.
get_average_game_difficulty = 990 # (get_average_game_difficulty, <destination>),
# Returns calculated game difficulty rating (as displayed on the Options page). Commonly used for score calculation when ending the game.
# Achievements and kill stats
get_achievement_stat = 370 # (get_achievement_stat, <destination>, <achievement_id>, <stat_index>),
# Retrieves the numeric value associated with an achievement. Used to keep track of player's results before finally unlocking it.
set_achievement_stat = 371 # (set_achievement_stat, <achievement_id>, <stat_index>, <value>),
# Sets the new value associated with an achievement. Used to keep track of player's results before finally unlocking it.
unlock_achievement = 372 # (unlock_achievement, <achievement_id>),
# Unlocks player's achievement. Apparently doesn't have any game effects.
get_player_agent_kill_count = 1701 # (get_player_agent_kill_count, <destination>, [get_wounded]),
# Retrieves the total number of enemies killed by the player. Call with non-zero <get_wounded> parameter to retrieve the total number of knocked down enemies.
get_player_agent_own_troop_kill_count = 1705 # (get_player_agent_own_troop_kill_count, <destination>, [get_wounded]),
# Retrieves the total number of allies killed by the player. Call with non-zero <get_wounded> parameter to retrieve the total number of knocked down allies.
################################################################################
# [ Z08 ] FACTIONS
################################################################################
# Despite the importance of factions to the game, there aren't that many
# actions to deal with them. Essentially, you can control colors and name of
# existing game factions, set or retrieve relations between them, and work
# with faction slots. There's also a number of operations which assign or
# retrieve the factional allegiance of other game objects, like parties and
# troops, but these have been placed in the respective sections of the file.
# Slot operations for factions
faction_set_slot = 502 # (faction_set_slot, <faction_id>, <slot_no>, <value>),
faction_get_slot = 522 # (faction_get_slot, <destination>, <faction_id>, <slot_no>),
faction_slot_eq = 542 # (faction_slot_eq, <faction_id>, <slot_no>, <value>),
faction_slot_ge = 562 # (faction_slot_ge, <faction_id>, <slot_no>, <value>),
# Generic operations
set_relation = 1270 # (set_relation, <faction_id_1>, <faction_id_2>, <value>),
# Sets relation between two factions. Relation is in -100..100 range.
store_relation = 2190 # (store_relation, <destination>, <faction_id_1>, <faction_id_2>),
# Retrieves relation between two factions. Relation is in -100..100 range.
faction_set_name = 1275 # (faction_set_name, <faction_id>, <string>),
# Sets the name of the faction. See also (str_store_faction_name) in String Operations.
faction_set_color = 1276 # (faction_set_color, <faction_id>, <color_code>),
# Sets the faction color. All parties and centers belonging to this faction will be displayed with this color on global map.
faction_get_color = 1277 # (faction_get_color, <destination>, <faction_id>)
# Gets the faction color value.
################################################################################
# [ Z09 ] PARTIES AND PARTY TEMPLATES
################################################################################
# Parties are extremely important element of single-player modding, because
# they are the only object which can be present on the world map. Each party
# is a semi-independent object with it's own behavior. Note that you cannot
# control party's behavior directly, instead you can change various factors
# which affect party behavior (including party AI settings).
# There are two things of importance when dealing with parties. First, parties
# can be attached to each other, this allows you, for example, to stack a
# number of armies inside a single city. Second, parties may encounter each
# other. When two AI parties are in encounter, it usually means they are
# fighting. Player's encounter with an AI party is usually much more complex
# and may involve pretty much anything, which is why player's encounters are
# covered in a separate section of the file.
# Each party consists of troop stacks. Each troop stack is either a single
# hero (troop defined as tf_hero in module_troops.py file) or a number of
# regular troops (their number may vary from 1 and above). Each party has two
# sets of troop stacks: members (or companions) set of stacks, and prisoners
# set of stacks. Many operations will only affect members, others may only
# affect prisoners, and there are even operations to switch their roles.
# Another important concept is a party template. It's definition looks very
# similar to a party. Templates are used when there's a need to create a
# number of parties with similar set of members, parameters or flags. Also
# templates can be easily used to differentiate parties from each other,
# so they are akin to a "party_type" in the game.
# Note that parties are the only game object which is persistent (i.e. it
# will be saved to the savegame file and restored on load), has slots and
# can be created during runtime. This makes parties ideal candidates for
# dynamic information storage of unlimited volume, which the game otherwise
# lacks.
# Conditional operations
hero_can_join = 101 # (hero_can_join, [party_id]),
# Checks if party can accept one hero troop. Player's party is default value.
hero_can_join_as_prisoner = 102 # (hero_can_join_as_prisoner, [party_id]),
# Checks if party can accept one hero prisoner troop. Player's party is default value.
party_can_join = 103 # (party_can_join),
# During encounter dialog, checks if encountered party can join player's party.
party_can_join_as_prisoner = 104 # (party_can_join_as_prisoner),
# During encounter dialog, checks if encountered party can join player's party as prisoners.
troops_can_join = 105 # (troops_can_join, <value>),
# Checks if player party has enough space for provided number of troops.
troops_can_join_as_prisoner = 106 # (troops_can_join_as_prisoner, <value>),
# Checks if player party has enough space for provided number of prisoners..
party_can_join_party = 107 # (party_can_join_party, <joiner_party_id>, <host_party_id>, [flip_prisoners]),
# Checks if first party can join second party (enough space for both troops and prisoners). If flip_prisoners flag is 1, then members and prisoners in the joinning party are flipped.
main_party_has_troop = 110 # (main_party_has_troop, <troop_id>),
# Checks if player party has specified troop.
party_is_in_town = 130 # (party_is_in_town, <party_id>, <town_party_id>),
# Checks that the party has successfully reached it's destination (after being set to ai_bhvr_travel_to_party) and that it's destination is actually the referenced town_party_id.
party_is_in_any_town = 131 # (party_is_in_any_town, <party_id>),
# Checks that the party has successfully reached it's destination (after being set to ai_bhvr_travel_to_party).
party_is_active = 132 # (party_is_active, <party_id>),
# Checks that <party_id> is valid and not disabled.
# Slot operations for parties and party templates
party_template_set_slot = 504 # (party_template_set_slot, <party_template_id>, <slot_no>, <value>),
party_template_get_slot = 524 # (party_template_get_slot, <destination>, <party_template_id>, <slot_no>),
party_template_slot_eq = 544 # (party_template_slot_eq, <party_template_id>, <slot_no>, <value>),
party_template_slot_ge = 564 # (party_template_slot_ge, <party_template_id>, <slot_no>, <value>),
party_set_slot = 501 # (party_set_slot, <party_id>, <slot_no>, <value>),
party_get_slot = 521 # (party_get_slot, <destination>, <party_id>, <slot_no>),
party_slot_eq = 541 # (party_slot_eq, <party_id>, <slot_no>, <value>),
party_slot_ge = 561 # (party_slot_ge, <party_id>, <slot_no>, <value>),
# Generic operations
set_party_creation_random_limits = 1080 # (set_party_creation_random_limits, <min_value>, <max_value>),
# Affects party sizes spawned from templates. May be used to spawn larger parties when player is high level. Values should be in 0..100 range.
set_spawn_radius = 1103 # (set_spawn_radius, <value>),
# Sets radius for party spawning with subsequent <spawn_around_party> operations.
spawn_around_party = 1100 # (spawn_around_party, <party_id>, <party_template_id>),
# Creates a new party from a party template and puts it's <party_id> into reg0.
disable_party = 1230 # (disable_party, <party_id>),
# Party disappears from the map. Note that (try_for_parties) will still iterate over disabled parties, so you need to make additional checks with (party_is_active).
enable_party = 1231 # (enable_party, <party_id>),
# Reactivates a previously disabled party.
remove_party = 1232 # (remove_party, <party_id>),
# Destroys a party completely. Should ONLY be used with dynamically spawned parties, as removing parties pre-defined in module_parties.py file will corrupt the savegame.
party_get_current_terrain = 1608 # (party_get_current_terrain, <destination>, <party_id>),
# Returns a value from header_terrain_types.py
party_relocate_near_party = 1623 # (party_relocate_near_party, <relocated_party_id>, <target_party_id>, <spawn_radius>),
# Teleports party into vicinity of another party.
party_get_position = 1625 # (party_get_position, <dest_position>, <party_id>),
# Stores current position of the party on world map.
party_set_position = 1626 # (party_set_position, <party_id>, <position>),
# Teleports party to a specified position on the world map.
set_camera_follow_party = 1021 # (set_camera_follow_party, <party_id>),
# Self-explanatory. Can be used on world map only. Commonly used to make camera follow a party which has captured player as prisoner.
party_attach_to_party = 1660 # (party_attach_to_party, <party_id>, <party_id_to_attach_to>),
# Attach a party to another one (like lord's army staying in a town/castle).
party_detach = 1661 # (party_detach, <party_id>),
# Remove a party from attachments and place it on the world map.
party_collect_attachments_to_party = 1662 # (party_collect_attachments_to_party, <source_party_id>, <collected_party_id>),
# Mostly used in various battle and AI calculations. Will create an aggregate party from all parties attached to the source party.
party_get_cur_town = 1665 # (party_get_cur_town, <destination>, <party_id>),
# When a party has reached it's destination (using ai_bhvr_travel_to_party), this operation will retrieve the party_id of the destination party.
party_get_attached_to = 1694 # (party_get_attached_to, <destination>, <party_id>),
# Retrieves the party that the referenced party is attached to, if any.
party_get_num_attached_parties = 1695 # (party_get_num_attached_parties, <destination>, <party_id>),
# Retrieves total number of parties attached to referenced party.
party_get_attached_party_with_rank = 1696 # (party_get_attached_party_with_rank, <destination>, <party_id>, <attached_party_index>),
# Extract party_id of a specified party among attached.
party_set_name = 1669 # (party_set_name, <party_id>, <string>),
# Sets party name (will be displayed as label and/or in the party details popup).
party_set_extra_text = 1605 # (party_set_extra_text, <party_id>, <string>),
# Allows to put extra text in party details popup. Used in Native to set status for villages or towns (being raided, razed, under siege...).
party_get_icon = 1681 # (party_get_icon, <destination>, <party_id>),
# Retrieve map icon used for the party.
party_set_icon = 1676 # (party_set_icon, <party_id>, <map_icon_id>),
# Sets what map icon will be used for the party.
party_set_banner_icon = 1677 # (party_set_banner_icon, <party_id>, <map_icon_id>),
# Sets what map icon will be used as the party banner. Use 0 to remove banner from a party.
party_set_extra_icon = 1682 # (party_set_extra_icon, <party_id>, <map_icon_id>, <vertical_offset_fixed_point>, <up_down_frequency_fixed_point>, <rotate_frequency_fixed_point>, <fade_in_out_frequency_fixed_point>),
# Adds or removes an extra map icon to a party, possibly with some animations. Use -1 as map_icon_id to remove extra icon.
party_add_particle_system = 1678 # (party_add_particle_system, <party_id>, <particle_system_id>),
# Appends some special visual effects to the party on the map. Used in Native to add fire and smoke over villages.
party_clear_particle_systems = 1679 # (party_clear_particle_systems, <party_id>),
# Removes all special visual effects from the party on the map.
context_menu_add_item = 980 # (context_menu_add_item, <string_id>, <value>),
# Must be called inside script_game_context_menu_get_buttons. Adds context menu option for a party and it's respective identifier (will be passed to script_game_event_context_menu_button_clicked).
party_get_template_id = 1609 # (party_get_template_id, <destination>, <party_id>),
# Retrieves what party template was used to create the party (if any). Commonly used to identify encountered party type.
party_set_faction = 1620 # (party_set_faction, <party_id>, <faction_id>),
# Sets party faction allegiance. Party color is changed appropriately.
store_faction_of_party = 2204 # (store_faction_of_party, <destination>, <party_id>),
# Retrieves current faction allegiance of the party.
store_random_party_in_range = 2254 # (store_random_party_in_range, <destination>, <lower_bound>, <upper_bound>),
# Retrieves one random party from the range. Generally used only for predefined parties (towns, villages etc).
store01_random_parties_in_range = 2255 # (store01_random_parties_in_range, <lower_bound>, <upper_bound>),
# Stores two random, different parties in a range to reg0 and reg1. Generally used only for predefined parties (towns, villages etc).
store_distance_to_party_from_party = 2281 # (store_distance_to_party_from_party, <destination>, <party_id>, <party_id>),
# Retrieves distance between two parties on the global map.
store_num_parties_of_template = 2310 # (store_num_parties_of_template, <destination>, <party_template_id>),
# Stores number of active parties which were created using specified party template.
store_random_party_of_template = 2311 # (store_random_party_of_template, <destination>, <party_template_id>),
# Retrieves one random party which was created using specified party template. Fails if no party exists with provided template.
store_num_parties_created = 2300 # (store_num_parties_created, <destination>, <party_template_id>),
# Stores the total number of created parties of specified type. Not used in Native.
store_num_parties_destroyed = 2301 # (store_num_parties_destroyed, <destination>, <party_template_id>),
# Stores the total number of destroyed parties of specified type.
store_num_parties_destroyed_by_player = 2302 # (store_num_parties_destroyed_by_player, <destination>, <party_template_id>),
# Stores the total number of parties of specified type which have been destroyed by player.
party_get_morale = 1671 # (party_get_morale, <destination>, <party_id>),
# Returns a value in the range of 0..100. Party morale does not affect party behavior on the map, but will be taken in account if the party is engaged in battle (except auto-calc).
party_set_morale = 1672 # (party_set_morale, <party_id>, <value>),
# Value should be in the range of 0..100. Party morale does not affect party behavior on the map, but will be taken in account if the party is engaged in battle (except auto-calc).
# Party members manipulation
party_join = 1201 # (party_join),
# During encounter, joins encountered party to player's party
party_join_as_prisoner = 1202 # (party_join_as_prisoner),
# During encounter, joins encountered party to player's party as prisoners
troop_join = 1203 # (troop_join, <troop_id>),
# Specified hero joins player's party
troop_join_as_prisoner = 1204 # (troop_join_as_prisoner, <troop_id>),
# Specified hero joins player's party as prisoner
add_companion_party = 1233 # (add_companion_party, <troop_id_hero>),
# Creates a new empty party with specified hero as party leader and the only member. Party is spawned at the position of player's party.
party_add_members = 1610 # (party_add_members, <party_id>, <troop_id>, <number>),
# Returns total number of added troops in reg0.
party_add_prisoners = 1611 # (party_add_prisoners, <party_id>, <troop_id>, <number>),
# Returns total number of added prisoners in reg0.
party_add_leader = 1612 # (party_add_leader, <party_id>, <troop_id>, [number]),
# Adds troop(s) to the party and makes it party leader.
party_force_add_members = 1613 # (party_force_add_members, <party_id>, <troop_id>, <number>),
# Adds troops to party ignoring party size limits. Mostly used to add hero troops.
party_force_add_prisoners = 1614 # (party_force_add_prisoners, <party_id>, <troop_id>, <number>),
# Adds prisoners to party ignoring party size limits. Mostly used to add hero prisoners.
party_add_template = 1675 # (party_add_template, <party_id>, <party_template_id>, [reverse_prisoner_status]),
# Reinforces the party using the specified party template. Optional flag switches troop/prisoner status for reinforcements.
distribute_party_among_party_group = 1698 # (distribute_party_among_party_group, <party_to_be_distributed>, <group_root_party>),
# Distributes troops from first party among all parties attached to the second party. Commonly used to divide prisoners and resqued troops among NPC parties.
remove_member_from_party = 1210 # (remove_member_from_party, <troop_id>, [party_id]),
# Removes hero member from party. Player party is default value. Will display a message about companion leaving the party. Should not be used with regular troops (it will successfully remove one of them, but will produce some meaningless spam).
remove_regular_prisoners = 1211 # (remove_regular_prisoners, <party_id>),
# Removes all non-hero prisoners from the party.
remove_troops_from_companions = 1215 # (remove_troops_from_companions, <troop_id>, <value>),
# Removes troops from player's party, duplicating functionality of (party_remove_members) but providing less flexibility.
remove_troops_from_prisoners = 1216 # (remove_troops_from_prisoners, <troop_id>, <value>),
# Removes prisoners from player's party.
party_remove_members = 1615 # (party_remove_members, <party_id>, <troop_id>, <number>),
# Removes specified number of troops from a party. Stores number of actually removed troops in reg0.
party_remove_prisoners = 1616 # (party_remove_members, <party_id>, <troop_id>, <number>),
# Removes specified number of prisoners from a party. Stores number of actually removed prisoners in reg0.
party_clear = 1617 # (party_clear, <party_id>),
# Removes all members and prisoners from the party.
add_gold_to_party = 1070 # (add_gold_to_party, <value>, <party_id>),
# Marks the party as carrying the specified amount of gold, which can be pillaged by player if he destroys it. Operation must not be used to give gold to player's party.
# Calculating party and stack sizes
party_get_num_companions = 1601 # (party_get_num_companions, <destination>, <party_id>),
# Returns total number of party members, including leader.
party_get_num_prisoners = 1602 # (party_get_num_prisoners, <destination>, <party_id>),
# Returns total number of party prisoners.
party_count_members_of_type = 1630 # (party_count_members_of_type, <destination>, <party_id>, <troop_id>),
# Returns total number of party members of specific type.
party_count_companions_of_type = 1631 # (party_count_companions_of_type, <destination>, <party_id>, <troop_id>),
# Duplicates (party_count_members_of_type).
party_count_prisoners_of_type = 1632 # (party_count_prisoners_of_type, <destination>, <party_id>, <troop_id>),
# Returns total number of prisoners of specific type.
party_get_free_companions_capacity = 1633 # (party_get_free_companions_capacity, <destination>, <party_id>),
# Calculates how many members can be added to the party.
party_get_free_prisoners_capacity = 1634 # (party_get_free_prisoners_capacity, <destination>, <party_id>),
# Calculates how many prisoners can be added to the party.
party_get_num_companion_stacks = 1650 # (party_get_num_companion_stacks, <destination>, <party_id>),
# Returns total number of troop stacks in the party (including player and heroes).
party_get_num_prisoner_stacks = 1651 # (party_get_num_prisoner_stacks, <destination>, <party_id>),
# Returns total number of prisoner stacks in the party (including any heroes).
party_stack_get_troop_id = 1652 # (party_stack_get_troop_id, <destination>, <party_id>, <stack_no>),
# Extracts troop type of the specified troop stack.
party_stack_get_size = 1653 # (party_stack_get_size, <destination>, <party_id>, <stack_no>),
# Extracts number of troops in the specified troop stack.
party_stack_get_num_wounded = 1654 # (party_stack_get_num_wounded, <destination>, <party_id>, <stack_no>),
# Extracts number of wounded troops in the specified troop stack.
party_stack_get_troop_dna = 1655 # (party_stack_get_troop_dna, <destination>, <party_id>, <stack_no>),
# Extracts DNA from the specified troop stack. Used to properly generate appereance in conversations.
party_prisoner_stack_get_troop_id = 1656 # (party_get_prisoner_stack_troop, <destination>, <party_id>, <stack_no>),
# Extracts troop type of the specified prisoner stack.
party_prisoner_stack_get_size = 1657 # (party_get_prisoner_stack_size, <destination>, <party_id>, <stack_no>),
# Extracts number of troops in the specified prisoner stack.
party_prisoner_stack_get_troop_dna = 1658 # (party_prisoner_stack_get_troop_dna, <destination>, <party_id>, <stack_no>),
# Extracts DNA from the specified prisoner stack. Used to properly generate appereance in conversations.
store_num_free_stacks = 2154 # (store_num_free_stacks, <destination>, <party_id>),
# Deprecated, as Warband no longer has limits on number of stacks in the party. Always returns 10.
store_num_free_prisoner_stacks = 2155 # (store_num_free_prisoner_stacks, <destination>, <party_id>),
# Deprecated, as Warband no longer has limits on number of stacks in the party. Always returns 10.
store_party_size = 2156 # (store_party_size, <destination>,[party_id]),
# Stores total party size (all members and prisoners).
store_party_size_wo_prisoners = 2157 # (store_party_size_wo_prisoners, <destination>, [party_id]),
# Stores total number of members in the party (without prisoners), duplicating (party_get_num_companions).
store_troop_kind_count = 2158 # (store_troop_kind_count, <destination>, <troop_type_id>),
# Counts number of troops of specified type in player's party. Deprecated, use party_count_members_of_type instead.
store_num_regular_prisoners = 2159 # (store_num_regular_prisoners, <destination>, <party_id>),
# Deprecated and does not work. Do not use.
store_troop_count_companions = 2160 # (store_troop_count_companions, <destination>, <troop_id>, [party_id]),
# Apparently deprecated, duplicates (party_get_num_companions). Not used in Native.
store_troop_count_prisoners = 2161 # (store_troop_count_prisoners, <destination>, <troop_id>, [party_id]),
# Apparently deprecated, duplicates (party_get_num_prisoners). Not used in Native.
# Party experience and skills
party_add_xp_to_stack = 1670 # (party_add_xp_to_stack, <party_id>, <stack_no>, <xp_amount>),
# Awards specified number of xp points to a single troop stack in the party.
party_upgrade_with_xp = 1673 # (party_upgrade_with_xp, <party_id>, <xp_amount>, <upgrade_path>), #upgrade_path can be:
# Awards specified number of xp points to entire party (split between all stacks) and upgrades all eligible troops. Upgrade direction: (0 = random, 1 = first, 2 = second).
party_add_xp = 1674 # (party_add_xp, <party_id>, <xp_amount>),
# Awards specified number of xp points to entire party (split between all stacks).
party_get_skill_level = 1685 # (party_get_skill_level, <destination>, <party_id>, <skill_no>),
# Retrieves skill level for the specified party (usually max among the heroes). Makes a callback to (script_game_get_skill_modifier_for_troop).
# Combat related operations
heal_party = 1225 # (heal_party, <party_id>),
# Heals all wounded party members.
party_wound_members = 1618 # (party_wound_members, <party_id>, <troop_id>, <number>),
# Wounds a specified number of troops in the party.
party_remove_members_wounded_first = 1619 # (party_remove_members_wounded_first, <party_id>, <troop_id>, <number>),
# Removes a certain number of troops from the party, starting with wounded. Stores total number removed in reg0.
party_quick_attach_to_current_battle = 1663 # (party_quick_attach_to_current_battle, <party_id>, <side>),
# Adds any party into current encounter at specified side (0 = ally, 1 = enemy).
party_leave_cur_battle = 1666 # (party_leave_cur_battle, <party_id>),
# Forces the party to leave it's current battle (if it's engaged).
party_set_next_battle_simulation_time = 1667 # (party_set_next_battle_simulation_time, <party_id>, <next_simulation_time_in_hours>),
# Defines the period of time (in hours) after which the battle must be simulated for the specified party for the next time. When a value <= 0 is passed, the combat simulation round is performed immediately.
party_get_battle_opponent = 1680 # (party_get_battle_opponent, <destination>, <party_id>)
# When a party is engaged in battle with another party, returns it's opponent party. Otherwise returns -1.
inflict_casualties_to_party_group = 1697 # (inflict_casualties_to_party, <parent_party_id>, <damage_amount>, <party_id_to_add_causalties_to>),
# Delivers auto-calculated damage to the party (and all other parties attached to it). Killed troops are moved to another party to keep track of.
party_end_battle = 108 # (party_end_battle, <party_no>),
# Version 1.153+. UNTESTED. Supposedly ends the battle in which the party is currently participating.
# Party AI
party_set_marshall = 1604 # (party_set_marshall, <party_id>, <value>),
party_set_marshal = party_set_marshall # (party_set_marshal, <party_id>, <value>),
# Sets party as a marshall party or turns it back to normal party. Value is either 1 or 0. This affects party behavior, but exact effects are not known. Alternative operation name spelling added to enable compatibility with Viking Conquest DLC module system.
party_set_flags = 1603 # (party_set_flag, <party_id>, <flag>, <clear_or_set>),
# Sets (1) or clears (0) party flags in runtime. See header_parties.py for flags reference.
party_set_aggressiveness = 1606 # (party_set_aggressiveness, <party_id>, <number>),
# Sets aggressiveness value for the party (range 0..15).
party_set_courage = 1607 # (party_set_courage, <party_id>, <number>),
# Sets courage value for the party (range 4..15).
party_get_ai_initiative = 1638 # (party_get_ai_initiative, <destination>, <party_id>),
# Gets party current AI initiative value (range 0..100).
party_set_ai_initiative = 1639 # (party_set_ai_initiative, <party_id>, <value>),
# Sets AI initiative value for the party (range 0..100).
party_set_ai_behavior = 1640 # (party_set_ai_behavior, <party_id>, <ai_bhvr>),
# Sets AI behavior for the party. See header_parties.py for reference.
party_set_ai_object = 1641 # (party_set_ai_object, <party_id>, <object_party_id>),
# Sets another party as the object for current AI behavior (follow that party).
party_set_ai_target_position = 1642 # (party_set_ai_target_position, <party_id>, <position>),
# Sets a specific world map position as the object for current AI behavior (travel to that point).
party_set_ai_patrol_radius = 1643 # (party_set_ai_patrol_radius, <party_id>, <radius_in_km>),
# Sets a radius for AI patrolling behavior.
party_ignore_player = 1644 # (party_ignore_player, <party_id>, <duration_in_hours>),
# Makes AI party ignore player for the specified time.
party_set_bandit_attraction = 1645 # (party_set_bandit_attraction, <party_id>, <attaraction>),
# Sets party attractiveness to parties with bandit behavior (range 0..100).
party_get_helpfulness = 1646 # (party_get_helpfulness, <destination>, <party_id>),
# Gets party current AI helpfulness value (range 0..100).
party_set_helpfulness = 1647 # (party_set_helpfulness, <party_id>, <number>),
# Sets AI helpfulness value for the party (range 0..10000, default 100).
get_party_ai_behavior = 2290 # (get_party_ai_behavior, <destination>, <party_id>),
# Retrieves current AI behavior pattern for the party.
get_party_ai_object = 2291 # (get_party_ai_object, <destination>, <party_id>),
# Retrieves what party is currently used as object for AI behavior.
party_get_ai_target_position = 2292 # (party_get_ai_target_position, <position>, <party_id>),
# Retrieves what position is currently used as object for AI behavior.
get_party_ai_current_behavior = 2293 # (get_party_ai_current_behavior, <destination>, <party_id>),
# Retrieves current AI behavior pattern when it was overridden by current situation (fleeing from enemy when en route to destination).
get_party_ai_current_object = 2294 # (get_party_ai_current_object, <destination>, <party_id>),
# Retrieves what party has caused temporary behavior switch.
party_set_ignore_with_player_party = 1648 # (party_set_ignore_with_player_party, <party_id>, <value>),
# Version 1.161+. Effects uncertain. 4research
party_get_ignore_with_player_party = 1649 # (party_get_ignore_with_player_party, <party_id>),
# Version 1.161+. Effects uncertain. Documented official syntax is suspicious and probably incorrect. 4research
################################################################################
# [ Z10 ] TROOPS
################################################################################
# What troops are.
# There are two major types of troops: heroes and regulars. They are treated
# very differently by the game, so it's important not to confuse them. At the
# same time, most Module System operations will not make any differentiation
# between hero and regular troops.
# First of all, hero troops do not stack. You cannot have a stack of heroes
# in a party, each hero will always occupy a separate troop slot. At the same
# time, you can put any number of regular troops into a single troop slot.
# Second, the way the game treats equipment of heroes and troops is also
# different. All heroes' items are treated in the same way as player's (no
# big surprise, since player is actually a hero troop himself). Meanwhile,
# items that the troop has are just suggestions for what this troop *might*
# take into battle. On the battlefield, each agent spawned from the regular
# troop, will only take a limited number of items from the inventory provided
# by the troop definition in module_troops.py. Choice is absolutely random and
# modder has only limited control over it through the use of guarantee flags.
# There's one more additional caveat: while you can easily change the outfit
# of a hero troop and your changes will persist through the game, same applies
# to regular troops. In other words, by changing equipment of some regular
# troop, you are changing all instances of that troop throughout the entire
# game. In other words, you cannot re-equip a stack of regulars in a single
# party - your changes will affect all parties in the world.
# Third, while all heroes have a single predefined face code, which is used
# consistently through the game, troops have entire range of face codes. This
# range is used to randomize each agent's face within those constraints, so a
# group of 12 pikemen will not look like a bunch of clones.
# Fourth, hero troops can't be killed in battle. Every time hero's hit points
# are reduced to zero, hero is always knocked down. For regular troops, chance
# to be knocked down depends on the number of factors, but their default fate
# when driven to zero health is death.
# Conditional operators
troop_has_item_equipped = 151 # (troop_has_item_equipped, <troop_id>, <item_id>),
# Checks that the troop has this item equipped (worn or wielded).
troop_is_mounted = 152 # (troop_is_mounted, <troop_id>),
# Checks the troop for tf_mounted flag (see header_troops.py). Does NOT check that the troop has a horse.
troop_is_guarantee_ranged = 153 # (troop_is_guarantee_ranged, <troop_id>),
# Checks the troop for tf_guarantee_ranged flag (see header_troops.py). Does not check that troop actually has some ranged weapon.
troop_is_guarantee_horse = 154 # (troop_is_guarantee_horse, <troop_id>),
# Checks the troop for tf_guarantee_horse flag (see header_troops.py). Does not check that troop actually has some horse.
troop_is_hero = 1507 # (troop_is_hero, <troop_id>),
# Checks the troop for tf_hero flag (see header_troops.py). Hero troops are actual characters and do not stack in party window.
troop_is_wounded = 1508 # (troop_is_wounded, <troop_id>),
# Checks that the troop is wounded. Only works for hero troops.
player_has_item = 150 # (player_has_item, <item_id>),
# Checks that player has the specified item.
# Slot operations for troops
troop_set_slot = 500 # (troop_set_slot, <troop_id>, <slot_no>, <value>),
troop_get_slot = 520 # (troop_get_slot, <destination>, <troop_id>, <slot_no>),
troop_slot_eq = 540 # (troop_slot_eq, <troop_id>, <slot_no>, <value>),
troop_slot_ge = 560 # (troop_slot_ge, <troop_id>, <slot_no>, <value>),
# Troop attributes and skills
troop_set_type = 1505 # (troop_set_type, <troop_id>, <gender>),
# Changes the troop skin. There are two skins in Native: male and female, so in effect this operation sets troop gender. However mods may declare other skins.
troop_get_type = 1506 # (troop_get_type, <destination>, <troop_id>),
# Returns troop current skin (i.e. gender).
troop_set_class = 1517 # (troop_set_class, <troop_id>, <value>),
# Sets troop class (infantry, archers, cavalry or any of custom classes). Accepts values in range 0..8. See grc_* constants in header_mission_templates.py.
troop_get_class = 1516 # (troop_get_class, <destination>, <troop_id>),
# Retrieves troop class. Returns values in range 0..8.
class_set_name = 1837 # (class_set_name, <sub_class>, <string_id>),
# Sets a new name for troop class (aka "Infantry", "Cavalry", "Custom Group 3"...).
add_xp_to_troop = 1062 # (add_xp_to_troop, <value>, [troop_id]),
# Adds some xp points to troop. Only makes sense for player and hero troops. Default troop_id is player. Amount of xp can be negative.
add_xp_as_reward = 1064 # (add_xp_as_reward, <value>),
# Adds the specified amount of xp points to player. Typically used as a quest reward operation.
troop_get_xp = 1515 # (troop_get_xp, <destination>, <troop_id>),
# Retrieves total amount of xp specified troop has.
store_attribute_level = 2172 # (store_attribute_level, <destination>, <troop_id>, <attribute_id>),
# Stores current value of troop attribute. See ca_* constants in header_troops.py for reference.
troop_raise_attribute = 1520 # (troop_raise_attribute, <troop_id>, <attribute_id>, <value>),
# Increases troop attribute by the specified amount. See ca_* constants in header_troops.py for reference. Use negative values to reduce attributes. When used on non-hero troop, will affect all instances of that troop.
store_skill_level = 2170 # (store_skill_level, <destination>, <skill_id>, [troop_id]),
# Stores current value of troop skill. See header_skills.py for reference.
troop_raise_skill = 1521 # (troop_raise_skill, <troop_id>, <skill_id>, <value>),
# Increases troop skill by the specified value. Value can be negative. See header_skills.py for reference. When used on non-hero troop, will affect all instances of that troop.
store_proficiency_level = 2176 # (store_proficiency_level, <destination>, <troop_id>, <attribute_id>),
# Stores current value of troop weapon proficiency. See wpt_* constants in header_troops.py for reference.
troop_raise_proficiency = 1522 # (troop_raise_proficiency, <troop_id>, <proficiency_no>, <value>),
# Increases troop weapon proficiency by the specified value. Value can be negative. Increase is subject to limits defined by Weapon Master skill. When used on non-hero troop, will affect all instances of that troop.
troop_raise_proficiency_linear = 1523 # (troop_raise_proficiency, <troop_id>, <proficiency_no>, <value>),
# Same as (troop_raise_proficiency), but does not take Weapon Master skill into account (i.e. can increase proficiencies indefinitely).
troop_add_proficiency_points = 1525 # (troop_add_proficiency_points, <troop_id>, <value>),
# Adds some proficiency points to a hero troop which can later be distributed by player.
store_troop_health = 2175 # (store_troop_health, <destination>, <troop_id>, [absolute]), # set absolute to 1 to get actual health; otherwise this will return percentage health in range (0-100)
# Retrieves current troop health. Use absolute = 1 to retrieve actual number of hp points left, use absolute = 0 to retrieve a value in 0..100 range (percentage).
troop_set_health = 1560 # (troop_set_health, <troop_id>, <relative health (0-100)>),
# Sets troop health. Accepts value in range 0..100 (percentage).
troop_get_upgrade_troop = 1561 # (troop_get_upgrade_troop, <destination>, <troop_id>, <upgrade_path>),
# Retrieves possible directions for non-hero troop upgrade. Use 0 to retrieve first upgrade path, and 1 to return second. Result of -1 means there's no such upgrade path for this troop.
store_character_level = 2171 # (store_character_level, <destination>, [troop_id]),
# Retrieves character level of the troop. Default troop is the player.
get_level_boundary = 991 # (get_level_boundary, <destination>, <level_no>),
# Returns the amount of experience points required to reach the specified level (will return 0 for 1st level). Maximum possible level in the game is 63.
add_gold_as_xp = 1063 # (add_gold_as_xp, <value>, [troop_id]), # Default troop is player
# Adds a certain amount of experience points, depending on the amount of gold specified. Conversion rate is unclear and apparently somewhat randomized (three runs with 1000 gold produced values 1091, 804 and 799).
# Troop equipment handling
troop_set_auto_equip = 1509 # (troop_set_auto_equip, <troop_id>, <value>),
# Sets (value = 1) or disables (value = 0) auto-equipping the troop with any items added to it's inventory or purchased. Similar to tf_is_merchant flag.
troop_ensure_inventory_space = 1510 # (troop_ensure_inventory_space, <troop_id>, <value>),
# Removes items from troop inventory until troop has specified number of free inventory slots. Will free inventory slots starting from the end (items at the bottom of inventory will be removed first if there's not enough free space).
troop_sort_inventory = 1511 # (troop_sort_inventory, <troop_id>),
# Sorts items in troop inventory by their price (expensive first).
troop_add_item = 1530 # (troop_add_item, <troop_id>, <item_id>, [modifier]),
# Adds an item to the troop, optionally with a modifier (see imod_* constants in header_item_modifiers.py).
troop_remove_item = 1531 # (troop_remove_item, <troop_id>, <item_id>),
# Removes an item from the troop equipment or inventory. Operation will remove first matching item it finds.
troop_clear_inventory = 1532 # (troop_clear_inventory, <troop_id>),
# Clears entire troop inventory. Does not affect equipped items.
troop_equip_items = 1533 # (troop_equip_items, <troop_id>),
# Makes the troop reconsider it's equipment. If troop has better stuff in it's inventory, he will equip it. Note this operation sucks with weapons and may force the troop to equip himself with 4 two-handed swords.
troop_inventory_slot_set_item_amount = 1534 # (troop_inventory_slot_set_item_amount, <troop_id>, <inventory_slot_no>, <value>),
# Sets the stack size for a specified equipment or inventory slot. Only makes sense for items like ammo or food (which show stuff like "23/50" in inventory). Equipment slots are in range 0..9, see ek_* constants in header_items.py for reference.
troop_inventory_slot_get_item_amount = 1537 # (troop_inventory_slot_get_item_amount, <destination>, <troop_id>, <inventory_slot_no>),
# Retrieves the stack size for a specified equipment or inventory slot (if some Bread is 23/50, this operation will return 23).
troop_inventory_slot_get_item_max_amount = 1538 # (troop_inventory_slot_get_item_max_amount, <destination>, <troop_id>, <inventory_slot_no>),
# Retrieves the maximum possible stack size for a specified equipment or inventory slot (if some Bread is 23/50, this operation will return 50).
troop_add_items = 1535 # (troop_add_items, <troop_id>, <item_id>, <number>),
# Adds multiple items of specified type to the troop.
troop_remove_items = 1536 # (troop_remove_items, <troop_id>, <item_id>, <number>),
# Removes multiple items of specified type from the troop. Total price of actually removed items will be stored in reg0.
troop_loot_troop = 1539 # (troop_loot_troop, <target_troop>, <source_troop_id>, <probability>),
# Adds to target_troop's inventory some items from source_troop's equipment and inventory with some probability. Does not actually remove items from source_troop. Commonly used in Native to generate random loot after the battle.
troop_get_inventory_capacity = 1540 # (troop_get_inventory_capacity, <destination>, <troop_id>),
# Returns the total inventory capacity (number of inventory slots) for the specified troop. Note that this number will include equipment slots as well. Substract num_equipment_kinds (see header_items.py) to get the number of actual *inventory* slots.
troop_get_inventory_slot = 1541 # (troop_get_inventory_slot, <destination>, <troop_id>, <inventory_slot_no>),
# Retrieves the item_id of a specified equipment or inventory slot. Returns -1 when there's nothing there.
troop_get_inventory_slot_modifier = 1542 # (troop_get_inventory_slot_modifier, <destination>, <troop_id>, <inventory_slot_no>),
# Retrieves the modifier value (see imod_* constants in header_items.py) for an item in the specified equipment or inventory slot. Returns 0 when there's nothing there, or if item does not have any modifiers.
troop_set_inventory_slot = 1543 # (troop_set_inventory_slot, <troop_id>, <inventory_slot_no>, <item_id>),
# Puts the specified item into troop's equipment or inventory slot. Be careful with setting equipment slots this way.
troop_set_inventory_slot_modifier = 1544 # (troop_set_inventory_slot_modifier, <troop_id>, <inventory_slot_no>, <imod_value>),
# Sets the modifier for the item in the troop's equipment or inventory slot. See imod_* constants in header_items.py for reference.
store_item_kind_count = 2165 # (store_item_kind_count, <destination>, <item_id>, [troop_id]),
# Calculates total number of items of specified type that the troop has. Default troop is player.
store_free_inventory_capacity = 2167 # (store_free_inventory_capacity, <destination>, [troop_id]),
# Calculates total number of free inventory slots that the troop has. Default troop is player.
# Merchandise handling
reset_price_rates = 1170 # (reset_price_rates),
# Resets customized price rates for merchants.
set_price_rate_for_item = 1171 # (set_price_rate_for_item, <item_id>, <value_percentage>),
# Sets individual price rate for a single item type. Normal price rate is 100. Deprecated, as Warband uses (game_get_item_[buy/sell]_price_factor) scripts instead.
set_price_rate_for_item_type = 1172 # (set_price_rate_for_item_type, <item_type_id>, <value_percentage>),
# Sets individual price rate for entire item class (see header_items.py for itp_type_* constants). Normal price rate is 100. Deprecated, as Warband uses (game_get_item_[buy/sell]_price_factor) scripts instead.
set_merchandise_modifier_quality = 1490 # (set_merchandise_modifier_quality, <value>),
# Affects the probability of items with quality modifiers appearing in merchandise. Value is percentage, standard value is 100.
set_merchandise_max_value = 1491 # (set_merchandise_max_value, <value>),
# Not used in Native. Apparently prevents items with price higher than listed from being generated as merchandise.
reset_item_probabilities = 1492 # (reset_item_probabilities, <value>),
# Sets all items probability of being generated as merchandise to the provided value. Use zero with subsequent calls to (set_item_probability_in_merchandise) to only allow generation of certain items.
set_item_probability_in_merchandise = 1493 # (set_item_probability_in_merchandise, <item_id>, <value>),
# Sets item probability of being generated as merchandise to the provided value.
troop_add_merchandise = 1512 # (troop_add_merchandise, <troop_id>, <item_type_id>, <value>),
# Adds a specified number of random items of certain type (see itp_type_* constants in header_items.py) to troop inventory. Only adds items with itp_merchandise flags.
troop_add_merchandise_with_faction = 1513 # (troop_add_merchandise_with_faction, <troop_id>, <faction_id>, <item_type_id>, <value>), #faction_id is given to check if troop is eligible to produce that item
# Same as (troop_add_merchandise), but with additional filter: only adds items which belong to specified faction, or without any factions at all.
# Miscellaneous troop information
troop_set_name = 1501 # (troop_set_name, <troop_id>, <string_no>),
# Renames the troop, setting a new singular name for it.
troop_set_plural_name = 1502 # (troop_set_plural_name, <troop_id>, <string_no>),
# Renames the troop, setting a new plural name for it.
troop_set_face_key_from_current_profile = 1503 # (troop_set_face_key_from_current_profile, <troop_id>),
# Forces the troop to adopt the face from player's currently selected multiplayer profile.
troop_add_gold = 1528 # (troop_add_gold, <troop_id>, <value>),
# Adds gold to troop. Generally used with player or hero troops.
troop_remove_gold = 1529 # (troop_remove_gold, <troop_id>, <value>),
# Removes gold from troop. Generally used with player or hero troops.
store_troop_gold = 2149 # (store_troop_gold, <destination>, <troop_id>),
# Retrieves total number of gold that the troop has.
troop_set_faction = 1550 # (troop_set_faction, <troop_id>, <faction_id>),
# Sets a new faction for the troop (mostly used to switch lords allegiances in Native).
store_troop_faction = 2173 # (store_troop_faction, <destination>, <troop_id>),
# Retrieves current troop faction allegiance.
store_faction_of_troop = 2173 # (store_troop_faction, <destination>, <troop_id>),
# Alternative spelling of the above operation.
troop_set_age = 1555 # (troop_set_age, <troop_id>, <age_slider_pos>),
# Defines a new age for the troop (will be used by the game engine to generate appropriately aged face). Age is in range 0.100.
store_troop_value = 2231 # (store_troop_value, <destination>, <troop_id>),
# Stores some value which is apparently related to troop's overall fighting value. Swadian infantry line troops from Native produced values 24, 47, 80, 133, 188. Calling on player produced 0.
# Troop face code handling
str_store_player_face_keys = 2747 # (str_store_player_face_keys, <string_no>, <player_id>),
# Version 1.161+. Stores player's face keys into string register.
player_set_face_keys = 2748 # (player_set_face_keys, <player_id>, <string_no>),
# Version 1.161+. Sets player's face keys from string.
str_store_troop_face_keys = 2750 # (str_store_troop_face_keys, <string_no>, <troop_no>, [<alt>]),
# Version 1.161+. Stores specified troop's face keys into string register. Use optional <alt> parameter to determine what facekey set to retrieve: 0 for first and 1 for second.
troop_set_face_keys = 2751 # (troop_set_face_keys, <troop_no>, <string_no>, [<alt>]),
# Version 1.161+. Sets troop face keys from string. Use optional <alt> parameter to determine what face keys to update: 0 for first and 1 for second.
face_keys_get_hair = 2752 # (face_keys_get_hair, <destination>, <string_no>),
# Version 1.161+. Unpacks selected hair mesh from string containing troop/player face keys to <destination>.
face_keys_set_hair = 2753 # (face_keys_set_hair, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new hair value. Hair meshes associated with skin (as defined in module_skins) are numbered from 1. Use 0 for no hair.
face_keys_get_beard = 2754 # (face_keys_get_beard, <destination>, <string_no>),
# Version 1.161+. Unpacks selected beard mesh from string containing troop/player face keys to <destination>.
face_keys_set_beard = 2755 # (face_keys_set_beard, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new beard value. Beard meshes associated with skin (as defined in module_skins) are numbered from 1. Use 0 for no beard.
face_keys_get_face_texture = 2756 # (face_keys_get_face_texture, <destination>, <string_no>),
# Version 1.161+. Unpacks selected face texture from string containing troop/player face keys to <destination>.
face_keys_set_face_texture = 2757 # (face_keys_set_face_texture, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new face texture value. Face textures associated with skin (as defined in module_skins) are numbered from 0.
face_keys_get_hair_texture = 2758 # (face_keys_get_hair_texture, <destination>, <string_no>),
# Version 1.161+. Unpacks selected hair texture from string containing troop/player face keys to <destination>. Apparently hair textures have no effect. 4 research.
face_keys_set_hair_texture = 2759 # (face_keys_set_hair_texture, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new hair texture value. Doesn't seem to have an effect. 4research.
face_keys_get_hair_color = 2760 # (face_keys_get_hair_color, <destination>, <string_no>),
# Version 1.161+. Unpacks hair color slider value from face keys string. Values are in the range of 0..63. Mapping to specific colors depends on the hair color range defined for currently selected skin / face_texture combination.
face_keys_set_hair_color = 2761 # (face_keys_set_hair_color, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new hair color slider value. Value should be in the 0..63 range.
face_keys_get_age = 2762 # (face_keys_get_age, <destination>, <string_no>),
# Version 1.161+. Unpacks age slider value from face keys string. Values are in the range of 0..63.
face_keys_set_age = 2763 # (face_keys_set_age, <string_no>, <value>),
# Version 1.161+. Updates face keys string with a new age slider value. Value should be in the 0..63 range.
face_keys_get_skin_color = 2764 # (face_keys_get_skin_color, <destination>, <string_no>),
# Version 1.161+. Apparently doesn't work. Should retrieve skin color value from face keys string into <destination>.
face_keys_set_skin_color = 2765 # (face_keys_set_skin_color, <string_no>, <value>),
# Version 1.161+. Apparently doesn't work. Should update face keys string with a new skin color value.
face_keys_get_morph_key = 2766 # (face_keys_get_morph_key, <destination>, <string_no>, <key_no>),
# Version 1.161+. Unpacks morph key value from face keys string. See morph key indices in module_skins.py file. Note that only 8 out of 27 morph keys are actually accessible (from 'chin_size' to 'cheeks'). Morph key values are in the 0..7 range.
face_keys_set_morph_key = 2767 # (face_keys_set_morph_key, <string_no>, <key_no>, <value>),
# Version 1.161+. Updates face keys string with a new morph key value. See morph key indices in module_skins.py file. Note that only 8 out of 27 morph keys are actually accessible (from 'chin_size' to 'cheeks'). Morph key values should be in the 0..7 range.
################################################################################
# [ Z11 ] QUESTS
################################################################################
# Quests are just that: some tasks that characters in the game world want the
# player to do. It's interesting to note that in Warband quests can have three
# possible outcomes: success, failure and conclusion. Generally the last
# option is used to indicate some "intermediate" quest result, which is
# neither a full success, nor a total failure.
# Conditional operations
check_quest_active = 200 # (check_quest_active, <quest_id>),
# Checks that the quest has been started but not yet cancelled or completed. Will not fail for concluded, failed or succeeded quests for as long as they have not yet been completed.
check_quest_finished = 201 # (check_quest_finished, <quest_id>),
# Checks that the quest has been completed (result does not matter) and not taken again yet.
check_quest_succeeded = 202 # (check_quest_succeeded, <quest_id>),
# Checks that the quest has succeeded and not taken again yet (check will be successful even after the quest is completed).
check_quest_failed = 203 # (check_quest_failed, <quest_id>),
# Checks that the quest has failed and not taken again yet (check will be successful even after the quest is completed).
check_quest_concluded = 204 # (check_quest_concluded, <quest_id>),
# Checks that the quest was concluded with any result and not taken again yet.
# Slot operations for quests
quest_set_slot = 506 # (quest_set_slot, <quest_id>, <slot_no>, <value>),
quest_get_slot = 526 # (quest_get_slot, <destination>, <quest_id>, <slot_no>),
quest_slot_eq = 546 # (quest_slot_eq, <quest_id>, <slot_no>, <value>),
quest_slot_ge = 566 # (quest_slot_ge, <quest_id>, <slot_no>, <value>),
# Quest management
start_quest = 1280 # (start_quest, <quest_id>, <giver_troop_id>),
# Starts the quest and marks giver_troop as the troop who gave it.
conclude_quest = 1286 # (conclude_quest, <quest_id>),
# Sets quest status as concluded but keeps it in the list. Frequently used to indicate "uncertain" quest status, when it's neither fully successful nor a total failure.
succeed_quest = 1282 # (succeed_quest, <quest_id>), #also concludes the quest
# Sets quest status as successful but keeps it in the list (player must visit quest giver to complete it before he can get another quest of the same type).
fail_quest = 1283 # (fail_quest, <quest_id>), #also concludes the quest
# Sets quest status as failed but keeps it in the list (player must visit quest giver to complete it before he can get another quest of the same type).
complete_quest = 1281 # (complete_quest, <quest_id>),
# Successfully completes specified quest, removing it from the list of active quests.
cancel_quest = 1284 # (cancel_quest, <quest_id>),
# Cancels specified quest without completing it, removing it from the list of active quests.
setup_quest_text = 1290 # (setup_quest_text, <quest_id>),
# Operation will refresh default quest description (as defined in module_quests.py). This is important when quest description contains references to variables and registers which need to be initialized with their current values.
store_partner_quest = 2240 # (store_partner_quest, <destination>),
# During conversation, if there's a quest given by conversation partner, the operation will return it's id.
setup_quest_giver = 1291 # (setup_quest_giver, <quest_id>, <string_id>),
# Apparently deprecated, as quest giver troop is now defined as a parameter of (start_quest).
store_random_quest_in_range = 2250 # (store_random_quest_in_range, <destination>, <lower_bound>, <upper_bound>),
# Apparently deprecated as the logic for picking a new quest has been moved to module_scripts.
set_quest_progression = 1285 # (set_quest_progression, <quest_id>, <value>),
# Deprecated and useless, operation has no game effects and it's impossible to retrieve quest progression status anyway.
store_random_troop_to_raise = 2251 # (store_random_troop_to_raise, <destination>, <lower_bound>, <upper_bound>),
# Apparently deprecated.
store_random_troop_to_capture = 2252 # (store_random_troop_to_capture, <destination>, <lower_bound>, <upper_bound>),
# Apparently deprecated.
store_quest_number = 2261 # (store_quest_number, <destination>, <quest_id>),
# Apparently deprecated.
store_quest_item = 2262 # (store_quest_item, <destination>, <item_id>),
# Apparently deprecated. Native now uses quest slots to keep track of this information.
store_quest_troop = 2263 # (store_quest_troop, <destination>, <troop_id>),
# Apparently deprecated. Native now uses quest slots to keep track of this information.
################################################################################
# [ Z12 ] ITEMS
################################################################################
# The title is a bit deceitful here. Items, despite the name, are not actual
# game items. Rather these are the *definitions* for real game items, and you
# can frequently see them referenced as "item types". However you should not
# confuse this with so called itp_type_* constants which define the major item
# classes existing in the game.
# Consider this: a Smoked Fish (50/50) in your character's inventory is an
# item in the game world. It's item type is "itm_smoked_fish" and it's basic
# class is itp_type_food. So take care: operations in this section are dealing
# with "itm_smoked_fish", not with actual fish in your inventory. The latter
# is actually just an inventory slot from the Module System's point of view,
# and operations to work with it are in the troops section of the file.
# Conditional operations
item_has_property = 2723 # (item_has_property, <item_kind_no>, <property>),
# Version 1.161+. Check that the item has specified property flag set. See the list of itp_* flags in header_items.py.
item_has_capability = 2724 # (item_has_capability, <item_kind_no>, <capability>),
# Version 1.161+. Checks that the item has specified capability flag set. See the list of itcf_* flags in header_items.py
item_has_modifier = 2725 # (item_has_modifier, <item_kind_no>, <item_modifier_no>),
# Version 1.161+. Checks that the specified modifiers is valid for the item. See the list of imod_* values in header_item_modifiers.py.
item_has_faction = 2726 # (item_has_faction, <item_kind_no>, <faction_no>),
# Version 1.161+. Checks that the item is available for specified faction. Note that an item with no factions set is available to all factions.
# Item slot operations
item_set_slot = 507 # (item_set_slot, <item_id>, <slot_no>, <value>),
item_get_slot = 527 # (item_get_slot, <destination>, <item_id>, <slot_no>),
item_slot_eq = 547 # (item_slot_eq, <item_id>, <slot_no>, <value>),
item_slot_ge = 567 # (item_slot_ge, <item_id>, <slot_no>, <value>),
# Generic item operations
item_get_type = 1570 # (item_get_type, <destination>, <item_id>),
# Returns item class (see header_items.py for itp_type_* constants).
store_item_value = 2230 # (store_item_value, <destination>, <item_id>),
# Stores item nominal price as listed in module_items.py. Does not take item modifier or quantity (for food items) into account.
store_random_horse = 2257 # (store_random_horse, <destination>),
# Deprecated since early M&B days.
store_random_equipment = 2258 # (store_random_equipment, <destination>),
# Deprecated since early M&B days.
store_random_armor = 2259 # (store_random_armor, <destination>),
# Deprecated since early M&B days.
cur_item_add_mesh = 1964 # (cur_item_add_mesh, <mesh_name_string>, [<lod_begin>], [<lod_end>]),
# Version 1.161+. Only call inside ti_on_init_item trigger. Adds another mesh to item, allowing the creation of combined items. Parameter <mesh_name_string> should contain mesh name itself, NOT a mesh reference. LOD values are optional. If <lod_end> is used, it will not be loaded.
cur_item_set_material = 1978 # (cur_item_set_material, <string_no>, <sub_mesh_no>, [<lod_begin>], [<lod_end>]),
# Version 1.161+. Only call inside ti_on_init_item trigger. Replaces material that will be used to render the item mesh. Use 0 for <sub_mesh_no> to replace material for base mesh. LOD values are optional. If <lod_end> is used, it will not be loaded.
item_get_weight = 2700 # (item_get_weight, <destination_fixed_point>, <item_kind_no>),
# Version 1.161+. Retrieves item weight as a fixed point value.
item_get_value = 2701 # (item_get_value, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item base price. Essentially a duplicate of (store_item_value).
item_get_difficulty = 2702 # (item_get_difficulty, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item difficulty value.
item_get_head_armor = 2703 # (item_get_head_armor, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item head armor value.
item_get_body_armor = 2704 # (item_get_body_armor, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item body armor value.
item_get_leg_armor = 2705 # (item_get_leg_armor, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item leg armor value.
item_get_hit_points = 2706 # (item_get_hit_points, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item hit points amount.
item_get_weapon_length = 2707 # (item_get_weapon_length, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item length (for weapons) or shield half-width (for shields). To get actual shield width, multiply this value by 2. Essentially, it is a distance from shield's "center" point to it's left, right and top edges (and bottom edge as well if shield height is not defined).
item_get_speed_rating = 2708 # (item_get_speed_rating, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item speed rating.
item_get_missile_speed = 2709 # (item_get_missile_speed, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item missile speed rating.
item_get_max_ammo = 2710 # (item_get_max_ammo, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item max ammo amount.
item_get_accuracy = 2711 # (item_get_accuracy, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves item accuracy value. Note that this operation will return 0 for an item with undefined accuracy, even though the item accuracy will actually default to 100.
item_get_shield_height = 2712 # (item_get_shield_height, <destination_fixed_point>, <item_kind_no>),
# Version 1.161+. Retrieves distance from shield "center" to it's bottom edge as a fixed point number. Use (set_fixed_point_multiplier, 100), to retrieve the correct value with this operation. To get actual shield height, use shield_height + weapon_length if this operation returns a non-zero value, otherwise use 2 * weapon_length.
item_get_horse_scale = 2713 # (item_get_horse_scale, <destination_fixed_point>, <item_kind_no>),
# Version 1.161+. Retrieves horse scale value as fixed point number.
item_get_horse_speed = 2714 # (item_get_horse_speed, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves horse speed value.
item_get_horse_maneuver = 2715 # (item_get_horse_maneuver, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves horse maneuverability value.
item_get_food_quality = 2716 # (item_get_food_quality, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves food quality coefficient (as of Warband 1.165, this coefficient is actually set for many food items, but never used in the code as there was no way to retrieve this coeff before 1.161 patch).
item_get_abundance = 2717 # (item_get_abundance, <destination>, <item_kind_no>),
# Version 1.161+. Retrieve item abundance value. Note that this operation will return 0 for an item with undefined abundance, even though the item abundance will actually default to 100.
item_get_thrust_damage = 2718 # (item_get_thrust_damage, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves thrust base damage value for item.
item_get_thrust_damage_type = 2719 # (item_get_thrust_damage_type, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves thrust damage type for item (see definitions for "cut", "pierce" and "blunt" in header_items.py).
item_get_swing_damage = 2720 # (item_get_swing_damage, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves swing base damage value for item.
item_get_swing_damage_type = 2721 # (item_get_swing_damage_type, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves swing damage type for item (see definitions for "cut", "pierce" and "blunt" in header_items.py).
item_get_horse_charge_damage = 2722 # (item_get_horse_charge_damage, <destination>, <item_kind_no>),
# Version 1.161+. Retrieves horse charge base damage.
################################################################################
# [ Z13 ] SOUNDS AND MUSIC TRACKS
################################################################################
# There are two types of sound in the game: sounds and tracks. Sounds are just
# short sound effects. They can be positional (i.e. emitted by some object on
# the scene or by player's opponent during the dialog). They can be generic
# sound effects, like playing some drums when player meets mountain bandits.
# Tracks are the background music. The game works as a kind of a musuc box,
# cycling the available melodies according to the situation. It is up to the
# Module System developer, however, to tell the game what the situation is.
# There are two factors which you can tell the game: situation and culture.
# So you can tell the game that the situation is "ambush" and the culture is
# "khergits", and the game will select the musuc tracks which fit this
# combination of situation and culture and will rotate them randomly. And of
# course, you can also tell the game to play one specific track if you want.
play_sound_at_position = 599 # (play_sound_at_position, <sound_id>, <position>, [options]),
# Plays a sound in specified scene position. See sf_* flags in header_sounds.py for reference on possible options.
play_sound = 600 # (play_sound, <sound_id>, [options]),
# Plays a sound. If the operation is called from agent, scene_prop or item trigger, then the sound will be positional and 3D. See sf_* flags in header_sounds.py for reference on possible options.
play_track = 601 # (play_track, <track_id>, [options]),
# Plays specified music track. Possible options: 0 = finish current then play this, 1 = fade out current and start this, 2 = stop current abruptly and start this
play_cue_track = 602 # (play_cue_track, <track_id>),
# Plays specified music track OVER any currently played music track (so you can get two music tracks playing simultaneously). Hardly useful.
music_set_situation = 603 # (music_set_situation, <situation_type>),
# Sets current situation(s) in the game (see mtf_* flags in header_music.py for reference) so the game engine can pick matching tracks from module_music.py. Use 0 to stop any currently playing music (it will resume when situation is later set to something).
music_set_culture = 604 # (music_set_culture, <culture_type>),
# Sets current culture(s) in the game (see mtf_* flags in header_music.py for reference) so the game engine can pick matching tracks from module_music.py. Use 0 to stop any currently playing music (it will resume when cultures are later set to something).
stop_all_sounds = 609 # (stop_all_sounds, [options]),
# Stops all playing sounds. Version 1.153 options: 0 = stop only looping sounds, 1 = stop all sounds. Version 1.143 options: 0 = let current track finish, 1 = fade it out, 2 = stop it abruptly.
store_last_sound_channel = 615 # (store_last_sound_channel, <destination>),
# Version 1.153+. UNTESTED. Stores the sound channel used for the last sound operation.
stop_sound_channel = 616 # (stop_sound_channel, <sound_channel_no>),
# Version 1.153+. UNTESTED. Stops sound playing on specified sound channel.
################################################################################
# [ Z14 ] POSITIONS
################################################################################
# Positions are the 3D math of the game. If you want to handle objects in
# space, you will inevitably have to deal with positions. Note that while most
# position-handling operations work both on global map and on the scenes,
# there are operations which will only work in one or another mode.
# Each position consists of three parts: coordinates, rotation and scale.
# Coordinates are three numbers - (X,Y,Z) - which define a certain point in
# space relative to the base of coordinates. Most of the time, the base of
# coordinates is either the center of the global map, or the center of the
# scene, but there may be exceptions. Note that all operations with
# coordinates nearly always use fixed point numbers.
# Position rotation determines just that - rotation around corresponding
# world axis. So rotation around Z axis means rotation around vertical axis,
# in other words - turning right and left. Rotation around X and Y axis will
# tilt the position forward/backwards and right/left respectively.
# It is common game convention that X world axis points to the East, Y world
# axis points to the North and Z world axis points straight up. However this
# is so-called global coordinates system, and more often than not you'll be
# dealing with local coordinates. Local coordinates are the coordinate system
# defined by the object's current position. For the object, his X axis is to
# the right, Y axis is forward, and Z axis is up. This is simple enough, but
# consider what happens if that object is turned upside down in world space?
# Object's Z axis will point upwards *from the object's point of view*, in
# other words, in global space it will be pointing *downwards*. And if the
# object is moving, then it's local coordinates system is moving with it...
# you get the idea.
# Imagine the position as a small point with an arrow somewhere in space.
# Position's coordinates are the point's position. Arrow points horizontally
# to the North by default, and position's rotation determines how much was
# it turned in the each of three directions.
# Final element of position is scale. It is of no direct relevance to the
# position itself, and it does not participate in any calculations. However
# it is important when you retrieve or set positions of objects. In this
# case, position's scale is object's scale - so you can shrink that wall
# or quite the opposite, make it grow to the sky, depending on your whim.
# Generic position operations
init_position = 701 # (init_position, <position>),
# Sets position coordinates to [0,0,0], without any rotation and default scale.
copy_position = 700 # (copy_position, <position_target>, <position_source>),
# Makes a duplicate of position_source.
position_copy_origin = 719 # (position_copy_origin, <position_target>, <position_source>),
# Copies coordinates from source position to target position, without changing rotation or scale.
position_copy_rotation = 718 # (position_copy_rotation, <position_target>, <position_source>),
# Copies rotation from source position to target position, without changing coordinates or scale.
position_transform_position_to_parent = 716 # (position_transform_position_to_parent, <position_dest>, <position_anchor>, <position_relative_to_anchor>),
# Converts position from local coordinate space to parent coordinate space. In other words, if you have some position on the scene (anchor) and a position describing some place *relative* to anchor (for example [10,20,0] means "20 meters forward and 10 meters to the right"), after calling this operation you will get that position coordinates on the scene in <position_dest>. Rotation and scale is also taken care of, so you can use relative angles.
position_transform_position_to_local = 717 # (position_transform_position_to_local, <position_dest>, <position_anchor>, <position_source>),
# The opposite to (position_transform_position_to_parent), this operation allows you to get source's *relative* position to your anchor. Suppose you want to run some decision making for your bot agent depending on player's position. In order to know where player is located relative to your bot you call (position_transform_position_to_local, <position_dest>, <bot_position>, <player_position>). Then we check position_dest's Y coordinate - if it's negative, then the player is behind our bot's back.
# Position (X,Y,Z) coordinates
position_get_x = 726 # (position_get_x, <destination_fixed_point>, <position>),
# Return position X coordinate (to the east, or to the right). Base unit is meters. Use (set_fixed_point_multiplier) to set another measurement unit (100 will get you centimeters, 1000 will get you millimeters, etc).
position_get_y = 727 # (position_get_y, <destination_fixed_point>, <position>),
# Return position Y coordinate (to the north, or forward). Base unit is meters. Use (set_fixed_point_multiplier) to set another measurement unit (100 will get you centimeters, 1000 will get you millimeters, etc).
position_get_z = 728 # (position_get_z, <destination_fixed_point>, <position>),
# Return position Z coordinate (to the top). Base unit is meters. Use (set_fixed_point_multiplier) to set another measurement unit (100 will get you centimeters, 1000 will get you millimeters, etc).
position_set_x = 729 # (position_set_x, <position>, <value_fixed_point>),
# Set position X coordinate.
position_set_y = 730 # (position_set_y, <position>, <value_fixed_point>),
# Set position Y coordinate.
position_set_z = 731 # (position_set_z, <position>, <value_fixed_point>),
# Set position Z coordinate.
position_move_x = 720 # (position_move_x, <position>, <movement>, [value]),
# Moves position along X axis. Movement distance is in cms. Optional parameter determines whether the position is moved along the local (value=0) or global (value=1) X axis (i.e. whether the position will be moved to it's right/left, or to the global east/west).
position_move_y = 721 # (position_move_y, <position>, <movement>,[value]),
# Moves position along Y axis. Movement distance is in cms. Optional parameter determines whether the position is moved along the local (value=0) or global (value=1) Y axis (i.e. whether the position will be moved forward/backwards, or to the global north/south).
position_move_z = 722 # (position_move_z, <position>, <movement>,[value]),
# Moves position along Z axis. Movement distance is in cms. Optional parameter determines whether the position is moved along the local (value=0) or global (value=1) Z axis (i.e. whether the position will be moved to it's above/below, or to the global above/below - these directions will be different if the position is tilted).
position_set_z_to_ground_level = 791 # (position_set_z_to_ground_level, <position>),
# This will bring the position Z coordinate so it rests on the ground level (i.e. an agent could stand on that position). This takes scene props with their collision meshes into account. Only works during a mission, so you can't measure global map height using this.
position_get_distance_to_terrain = 792 # (position_get_distance_to_terrain, <destination>, <position>),
# This will measure the distance between position and terrain below, ignoring all scene props and their collision meshes. Operation only works on the scenes and cannot be used on the global map.
position_get_distance_to_ground_level = 793 # (position_get_distance_to_ground_level, <destination>, <position>),
# This will measure the distance between position and the ground level, taking scene props and their collision meshes into account. Operation only works on the scenes and cannot be used on the global map.
# Position rotation
position_get_rotation_around_x = 742 # (position_get_rotation_around_x, <destination>, <position>),
# Returns angle (in degrees) that the position is rotated around X axis (tilt forward/backwards).
position_get_rotation_around_y = 743 # (position_get_rotation_around_y, <destination>, <position>),
# Returns angle (in degrees) that the position is rotated around Y axis (tilt right/left).
position_get_rotation_around_z = 740 # (position_get_rotation_around_z, <destination>, <position>),
# Returns angle (in degrees) that the position is rotated around Z axis (turning right/left).
position_rotate_x = 723 # (position_rotate_x, <position>, <angle>),
# Rotates position around it's X axis (tilt forward/backwards).
position_rotate_y = 724 # (position_rotate_y, <position>, <angle>),
# Rotates position around Y axis (tilt right/left).
position_rotate_z = 725 # (position_rotate_z, <position>, <angle>, [use_global_z_axis]),
# Rotates position around Z axis (rotate right/left). Pass 1 for use_global_z_axis to rotate the position around global axis instead.
position_rotate_x_floating = 738 # (position_rotate_x_floating, <position>, <angle_fixed_point>),
# Same as (position_rotate_x), but takes fixed point value as parameter, allowing for more precise rotation.
position_rotate_y_floating = 739 # (position_rotate_y_floating, <position>, <angle_fixed_point>),
# Same as (position_rotate_y), but takes fixed point value as parameter, allowing for more precise rotation.
position_rotate_z_floating = 734 # (position_rotate_z_floating, <position_no>, <angle_fixed_point>),
# Version 1.161+. Same as (position_rotate_z), but takes fixed point value as parameter, allowing for more precise rotation.
# Position scale
position_get_scale_x = 735 # (position_get_scale_x, <destination_fixed_point>, <position>),
# Retrieves position scaling along X axis.
position_get_scale_y = 736 # (position_get_scale_y, <destination_fixed_point>, <position>),
# Retrieves position scaling along Y axis.
position_get_scale_z = 737 # (position_get_scale_z, <destination_fixed_point>, <position>),
# Retrieves position scaling along Z axis.
position_set_scale_x = 744 # (position_set_scale_x, <position>, <value_fixed_point>),
# Sets position scaling along X axis.
position_set_scale_y = 745 # (position_set_scale_y, <position>, <value_fixed_point>),
# Sets position scaling along Y axis.
position_set_scale_z = 746 # (position_set_scale_z, <position>, <value_fixed_point>),
# Sets position scaling along Z axis.
# Measurement of distances and angles
get_angle_between_positions = 705 # (get_angle_between_positions, <destination_fixed_point>, <position_no_1>, <position_no_2>),
# Calculates angle between positions, using positions as vectors. Only rotation around Z axis is used. In other words, the function returns the difference between Z rotations of both positions.
position_has_line_of_sight_to_position = 707 # (position_has_line_of_sight_to_position, <position_no_1>, <position_no_2>),
# Checks that you can see one position from another. This obviously implies that both positions must be in global space. Note this is computationally expensive, so try to keep number of these to a minimum.
get_distance_between_positions = 710 # (get_distance_between_positions, <destination>, <position_no_1>, <position_no_2>),
# Returns distance between positions in centimeters.
get_distance_between_positions_in_meters = 711 # (get_distance_between_positions_in_meters, <destination>, <position_no_1>, <position_no_2>),
# Returns distance between positions in meters.
get_sq_distance_between_positions = 712 # (get_sq_distance_between_positions, <destination>, <position_no_1>, <position_no_2>),
# Returns squared distance between two positions in centimeters.
get_sq_distance_between_positions_in_meters = 713 # (get_sq_distance_between_positions_in_meters, <destination>, <position_no_1>, <position_no_2>),
# Returns squared distance between two positions in meters.
position_is_behind_position = 714 # (position_is_behind_position, <position_base>, <position_to_check>),
# Checks if the second position is behind the first.
get_sq_distance_between_position_heights = 715 # (get_sq_distance_between_position_heights, <destination>, <position_no_1>, <position_no_2>),
# Returns squared distance between position *heights* in centimeters.
position_normalize_origin = 741 # (position_normalize_origin, <destination_fixed_point>, <position>),
# What this operation seems to do is calculate the distance between the zero point [0,0,0] and the point with position's coordinates. Can be used to quickly calculate distance to relative positions.
position_get_screen_projection = 750 # (position_get_screen_projection, <position_screen>, <position_world>),
# Calculates the screen coordinates of the position and stores it as position_screen's X and Y coordinates.
# Global map positions
map_get_random_position_around_position = 1627 # (map_get_random_position_around_position, <dest_position_no>, <source_position_no>, <radius>),
# Returns a random position on the global map in the vicinity of the source_position.
map_get_land_position_around_position = 1628 # (map_get_land_position_around_position, <dest_position_no>, <source_position_no>, <radius>),
# Returns a random position on the global map in the vicinity of the source_position. Will always return a land position (i.e. some place you can walk to).
map_get_water_position_around_position = 1629 # (map_get_water_position_around_position, <dest_position_no>, <source_position_no>, <radius>),
# Returns a random position on the global map in the vicinity of the source_position. Will always return a water position (i.e. sea, lake or river).
################################################################################
# [ Z15 ] GAME NOTES
################################################################################
# The game provides the player with the Notes screen, where there are several
# sections: Troops, Factions, Parties, Quests and Information. This is the
# player's "diary", where all information player knows is supposed to be
# stored. With the operations from this section, modder can control what
# objects the player will be able to see in their corresponding sections of
# the Notes screen, and what information will be displayed on each object.
# Note that there's a number of engine-called scripts which take priority to
# text notes created by these operations. Any information in these notes will
# only be visible to the player if those scripts "refuse" to generate the note
# page dynamically. The following scripts can override these notes:
# script_game_get_troop_note
# script_game_get_center_note
# script_game_get_faction_notze
# script_game_get_quest_note
# script_game_get_info_page_note
troop_set_note_available = 1095 # (troop_set_note_available, <troop_id>, <value>),
# Enables (value = 1) or disables (value = 0) troop's page in the Notes / Characters section.
add_troop_note_tableau_mesh = 1108 # (add_troop_note_tableau_mesh, <troop_id>, <tableau_material_id>),
# Adds graphical elements to the troop's information page (usually banner and portrait).
add_troop_note_from_dialog = 1114 # (add_troop_note_from_dialog, <troop_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to troop notes. Each troop has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_troop_note_from_sreg = 1117 # (add_troop_note_from_sreg, <troop_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to troop notes. Each troop has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
faction_set_note_available = 1096 # (faction_set_note_available, <faction_id>, <value>), #1 = available, 0 = not available
# Enables (value = 1) or disables (value = 0) faction's page in the Notes / Characters section.
add_faction_note_tableau_mesh = 1109 # (add_faction_note_tableau_mesh, <faction_id>, <tableau_material_id>),
# Adds graphical elements to the faction's information page (usually graphical collage).
add_faction_note_from_dialog = 1115 # (add_faction_note_from_dialog, <faction_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to faction notes. Each faction has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_faction_note_from_sreg = 1118 # (add_faction_note_from_sreg, <faction_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to faction notes. Each faction has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
party_set_note_available = 1097 # (party_set_note_available, <party_id>, <value>), #1 = available, 0 = not available
# Enables (value = 1) or disables (value = 0) party's page in the Notes / Characters section.
add_party_note_tableau_mesh = 1110 # (add_party_note_tableau_mesh, <party_id>, <tableau_material_id>),
# Adds graphical elements to the party's information page (usually map icon).
add_party_note_from_dialog = 1116 # (add_party_note_from_dialog, <party_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to party notes. Each party has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_party_note_from_sreg = 1119 # (add_party_note_from_sreg, <party_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to party notes. Each party has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
quest_set_note_available = 1098 # (quest_set_note_available, <quest_id>, <value>), #1 = available, 0 = not available
# Enables (value = 1) or disables (value = 0) quest's page in the Notes / Characters section.
add_quest_note_tableau_mesh = 1111 # (add_quest_note_tableau_mesh, <quest_id>, <tableau_material_id>),
# Adds graphical elements to the quest's information page (not used in Native).
add_quest_note_from_dialog = 1112 # (add_quest_note_from_dialog, <quest_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to quest notes. Each quest has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_quest_note_from_sreg = 1113 # (add_quest_note_from_sreg, <quest_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to quest notes. Each quest has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_info_page_note_tableau_mesh = 1090 # (add_info_page_note_tableau_mesh, <info_page_id>, <tableau_material_id>),
# Adds graphical elements to the info page (not used in Native).
add_info_page_note_from_dialog = 1091 # (add_info_page_note_from_dialog, <info_page_id>, <note_slot_no>, <expires_with_time>),
# Adds current dialog text to info page notes. Each info page has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
add_info_page_note_from_sreg = 1092 # (add_info_page_note_from_sreg, <info_page_id>, <note_slot_no>, <string_id>, <expires_with_time>),
# Adds any text stored in string register to info page notes. Each info page has 16 note slots. Last parameter is used to mark the note as time-dependent, if it's value is 1, then the note will be marked ("Report is current") and will be updated appropriately as the game progresses ("Report is X days old").
################################################################################
# [ Z16 ] TABLEAUS AND HERALDICS
################################################################################
# Tableaus are the tool that gives you limited access to the game graphical
# renderer. If you know 3D graphics, you know that all 3D objects consist of
# a mesh (which defines it's form) and the material (which defines how this
# mesh is "painted"). With tableau functions you can do two things. First, you
# can replace or alter the materials used to render the game objects (with
# many restrictions). If this sounds esoteric to you, have a look at the game
# heraldry - it is implemented using tableaus. Second, you can render images
# of various game objects and place them on the game menus, presentations and
# so on. For example, if you open the game Inventory window, you can see your
# character in his current equipment. This character is rendered using tableau
# operations. Similarly, if you open the Notes screen and select some kingdom
# lord on the Troops section, you will see that lord's face and banner. Both
# face and banner are drawn using tableaus.
cur_item_set_tableau_material = 1981 # (cur_item_set_tableu_material, <tableau_material_id>, <instance_code>),
# Can only be used inside ti_on_init_item trigger in module_items.py. Assigns tableau to the item instance. Value of <instance_code> will be passed to tableau code. Commonly used for heraldic armors and shields.
cur_scene_prop_set_tableau_material = 1982 # (cur_scene_prop_set_tableau_material, <tableau_material_id>, <instance_code>),
# Can only be used inside ti_on_init_scene_prop trigger in module_scene_props.py. Assigns tableau to the scene prop instance. Value of <instance_code> will be passed to tableau code. Commonly used for static banners.
cur_map_icon_set_tableau_material = 1983 # (cur_map_icon_set_tableau_material, <tableau_material_id>, <instance_code>),
# Can only be used inside ti_on_init_map_icon trigger in module_map_icons.py. Assigns tableau to the icon prop instance. Value of <instance_code> will be passed to tableau code. Commonly used for player/lord party banners.
cur_agent_set_banner_tableau_material = 1986 # (cur_agent_set_banner_tableau_material, <tableau_material_id>)
# Can only be used inside ti_on_agent_spawn trigger in module_mission_templates. Assigns heraldry .
# Operations used in module_tableau_materials.py module
cur_tableau_add_tableau_mesh = 1980 # (cur_tableau_add_tableau_mesh, <tableau_material_id>, <value>, <position_register_no>),
# Used in module_tableau_materials.py to add one tableau to another. Value parameter is passed to tableau_material as is.
cur_tableau_render_as_alpha_mask = 1984 # (cur_tableau_render_as_alpha_mask)
# Tells the engine to treat the tableau as an alpha (transparency) mask.
cur_tableau_set_background_color = 1985 # (cur_tableau_set_background_color, <value>),
# Defines solid background color for the current tableau.
cur_tableau_set_ambient_light = 1987 # (cur_tableau_set_ambient_light, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
# Not documented. Used for tableaus rendered from 3D objects to provide uniform tinted lighting.
cur_tableau_set_camera_position = 1988 # (cur_tableau_set_camera_position, <position>),
# Not documented. Used for tableaus rendered from 3D objects to position camera as necessary (usually with a perspective camera).
cur_tableau_set_camera_parameters = 1989 # (cur_tableau_set_camera_parameters, <is_perspective>, <camera_width_times_1000>, <camera_height_times_1000>, <camera_near_times_1000>, <camera_far_times_1000>),
# Not documented. Used to define camera parameters for tableau rendering. Perspective camera is generally used to render 3D objects for tableaus, while non-perspective camera is used to modify tableau texture meshes.
cur_tableau_add_point_light = 1990 # (cur_tableau_add_point_light, <position>, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
# Not documented. Typically used for tableaus rendered from 3D objects to add a point light source.
cur_tableau_add_sun_light = 1991 # (cur_tableau_add_sun_light, <position>, <red_fixed_point>, <green_fixed_point>, <blue_fixed_point>),
# Not documented. Typically used for tableaus rendered from 3D objects to add a directional light source. Note that position coordinates do not matter, only rotation (i.e. light rays direction) does.
cur_tableau_add_mesh = 1992 # (cur_tableau_add_mesh, <mesh_id>, <position>, <value_fixed_point>, <value_fixed_point>),
# Adds a static mesh to the tableau with specified offset, scale and alpha. First value fixed point is the scale factor, second value fixed point is alpha. use 0 for default values.
cur_tableau_add_mesh_with_vertex_color = 1993 # (cur_tableau_add_mesh_with_vertex_color, <mesh_id>, <position>, <value_fixed_point>, <value_fixed_point>, <value>),
# Adds a static mesh to the tableau with specified offset, scale, alpha and vertex color. First value fixed point is the scale factor, second value fixed point is alpha. Value is vertex color.
cur_tableau_add_mesh_with_scale_and_vertex_color = 2000 # (cur_tableau_add_mesh_with_scale_and_vertex_color, <mesh_id>, <position>, <scale_position>, <value_fixed_point>, <value>),
# Similar to (cur_tableau_add_mesh_with_vertex_color), but allows non-uniform scaling. Scale factors are stored as (x,y,z) position properties with fixed point values.
cur_tableau_add_map_icon = 1994 # (cur_tableau_add_map_icon, <map_icon_id>, <position>, <value_fixed_point>),
# Adds a rendered image of a map icon to current tableau. Last parameter is the scale factor for the model.
cur_tableau_add_troop = 1995 # (cur_tableau_add_troop, <troop_id>, <position>, <animation_id>, <instance_no>),
# Adds a rendered image of the troop in a specified animation to current tableau. If instance_no is 0 or less, then the face is not generated randomly (important for heroes).
cur_tableau_add_horse = 1996 # (cur_tableau_add_horse, <item_id>, <position>, <animation_id>),
# Adds a rendered image of a horse in a specified animation to current tableau.
cur_tableau_set_override_flags = 1997 # (cur_tableau_set_override_flags, <value>),
# When creating a troop image for current tableau, this operation allows to override troop's inventory partially or completely. See af_* flags in header_mission_templates.py for reference.
cur_tableau_clear_override_items = 1998 # (cur_tableau_clear_override_items),
# Removes and previously defined equipment overrides for the troop, allowing to start from scratch.
cur_tableau_add_override_item = 1999 # (cur_tableau_add_override_item, <item_kind_id>),
# When creating a troop image for current tableau, the operation will add a new item to troop's equipment.
################################################################################
# [ Z17 ] STRING OPERATIONS
################################################################################
# The game provides you only limited control over string information. Most
# operations will either retrieve some string (usually the name) from the game
# object, or set that object's name to a string.
# Two important functions are str_store_string and str_store_string_reg. They
# are different from all others because they not only assign the string to a
# string register, they *process* it. For example, if source string contains
# "{reg3}", then the resulting string will have the register name and it's
# surrounding brackets replaced with the value currently stored in that
# register. Other strings can be substituted as well, and even some limited
# logic can be implemented using this mechanism. You can try to read through
# the module_strings.py file and try to deduce what each particular
# substitution does.
# Conditional operations
str_is_empty = 2318 # (str_is_empty, <string_register>),
# Checks that referenced string register is empty.
# Other string operations
str_clear = 2319 # (str_clear, <string_register>)
# Clears the contents of the referenced string register.
str_store_string = 2320 # (str_store_string, <string_register>, <string_id>),
# Stores a string value in the referenced string register. Only string constants and quick strings can be stored this way.
str_store_string_reg = 2321 # (str_store_string, <string_register>, <string_no>),
# Copies the contents of one string register from another.
str_store_troop_name = 2322 # (str_store_troop_name, <string_register>, <troop_id>),
# Stores singular troop name in referenced string register.
str_store_troop_name_plural = 2323 # (str_store_troop_name_plural, <string_register>, <troop_id>),
# Stores plural troop name in referenced string register.
str_store_troop_name_by_count = 2324 # (str_store_troop_name_by_count, <string_register>, <troop_id>, <number>),
# Stores singular or plural troop name with number of troops ("29 Archers", "1 Bandit").
str_store_item_name = 2325 # (str_store_item_name, <string_register>, <item_id>),
# Stores singular item name in referenced string register.
str_store_item_name_plural = 2326 # (str_store_item_name_plural, <string_register>, <item_id>),
# Stores plural item name in referenced string register.
str_store_item_name_by_count = 2327 # (str_store_item_name_by_count, <string_register>, <item_id>),
# Stores singular or plural item name with number of items ("11 Swords", "1 Bottle of Wine").
str_store_party_name = 2330 # (str_store_party_name, <string_register>, <party_id>),
# Stores party name in referenced string register.
str_store_agent_name = 2332 # (str_store_agent_name, <string_register>, <agent_id>),
# Stores agent name in referenced string register.
str_store_faction_name = 2335 # (str_store_faction_name, <string_register>, <faction_id>),
# Stores faction name in referenced string register.
str_store_quest_name = 2336 # (str_store_quest_name, <string_register>, <quest_id>),
# Stores quest name (as defined in module_quests.py) in referenced string register.
str_store_info_page_name = 2337 # (str_store_info_page_name, <string_register>, <info_page_id>),
# Stores info page title (as defined in module_info_pages.py) in referenced string register.
str_store_date = 2340 # (str_store_date, <string_register>, <number_of_hours_to_add_to_the_current_date>),
# Stores formatted date string, using the number of hours since start of the game (can be retrieved by a call to store_current_hours).
str_store_troop_name_link = 2341 # (str_store_troop_name_link, <string_register>, <troop_id>),
# Stores troop name as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced troop.
str_store_party_name_link = 2342 # (str_store_party_name_link, <string_register>, <party_id>),
# Stores party name as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced party.
str_store_faction_name_link = 2343 # (str_store_faction_name_link, <string_register>, <faction_id>),
# Stores faction name as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced faction.
str_store_quest_name_link = 2344 # (str_store_quest_name_link, <string_register>, <quest_id>),
# Stores quest name as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced quest.
str_store_info_page_name_link = 2345 # (str_store_info_page_name_link, <string_register>, <info_page_id>),
# Stores info page title as an internal game link. Resulting string can be used in game notes, will be highlighted, and clicking on it will redirect the player to the details page of the referenced info page.
str_store_class_name = 2346 # (str_store_class_name, <stribg_register>, <class_id>)
# Stores name of the selected troop class (Infantry, Archers, Cavalry or any of the custom class names) in referenced string register.
game_key_get_mapped_key_name = 65 # (game_key_get_mapped_key_name, <string_register>, <game_key>),
# Version 1.161+. Stores human-readable key name that's currently assigned to the provided game key. May store "unknown" and "No key assigned" strings (the latter is defined in languages/en/ui.csv, the former seems to be hardcoded).
# Network/multiplayer-related string operations
str_store_player_username = 2350 # (str_store_player_username, <string_register>, <player_id>),
# Stores player's multiplayer username in referenced string register. Can be used in multiplayer mode only.
str_store_server_password = 2351 # (str_store_server_password, <string_register>),
# Stores server's password in referenced string register.
str_store_server_name = 2352 # (str_store_server_name, <string_register>),
# Stores server's name (as displayed to clients in server's list window) in referenced string register.
str_store_welcome_message = 2353 # (str_store_welcome_message, <string_register>),
# Stores server's welcome message in referenced string register.
str_encode_url = 2355 # (str_encode_url, <string_register>),
# This operation will "sanitize" a string to be used as part of network URL, replacing any non-standard characters with their '%'-codes.
################################################################################
# [ Z18 ] OUTPUT AND MESSAGES
################################################################################
# These operations will provide some textual information to the player during
# the game. There are three operations which will generate a game message
# (displayed as a chat-like series of text strings in the bottom-left part of
# the screen), while most others will be displaying various types of dialog
# boxes. You can also ask a question to player using these operations.
display_debug_message = 1104 # (display_debug_message, <string_id>, [hex_colour_code]),
# Displays a string message, but only in debug mode, using provided color (hex-coded 0xRRGGBB). The message is additionally written to rgl_log.txt file in both release and debug modes when edit mode is enabled.
display_log_message = 1105 # (display_log_message, <string_id>, [hex_colour_code]),
# Display a string message using provided color (hex-coded 0xRRGGBB). The message will also be written to game log (accessible through Notes / Game Log), and will persist between sessions (i.e. it will be stored as part of the savegame).
display_message = 1106 # (display_message, <string_id>,[hex_colour_code]),
# Display a string message using provided color (hex-coded 0xRRGGBB).
set_show_messages = 1107 # (set_show_messages, <value>),
# Suppresses (value = 0) or enables (value = 1) game messages, including those generated by the game engine.
tutorial_box = 1120 # (tutorial_box, <string_id>, <string_id>),
# This operation is deprecated but is still used in Native.
dialog_box = 1120 # (dialog_box, <text_string_id>, [title_string_id]),
# Displays a popup window with the text message and an optional caption.
question_box = 1121 # (question_box, <string_id>, [<yes_string_id>], [<no_string_id>]),
# Displays a popup window with the text of the question and two buttons (Yes and No by default, but can be overridden). When the player selects one of possible responses, a ti_on_question_answered trigger will be executed.
tutorial_message = 1122 # (tutorial_message, <string_id>, [color], [auto_close_time]),
# Displays a popup window with tutorial text stored in referenced string or string register. Use -1 to close any currently open tutorial box. Optional parameters allow you to define text color and time period after which the tutorial box will close automatically.
tutorial_message_set_position = 1123 # (tutorial_message_set_position, <position_x>, <position_y>),
# Defines screen position for the tutorial box. Assumes screen size is 1000*750.
tutorial_message_set_size = 1124 # (tutorial_message_set_size, <size_x>, <size_y>),
# Defines size of the tutorial box. Assumes screen size is 1000*750.
tutorial_message_set_center_justify = 1125 # (tutorial_message_set_center_justify, <val>),
# Sets tutorial box to be center justified (value = 1), or use positioning dictated by tutorial_message_set_position (value = 0).
tutorial_message_set_background = 1126 # (tutorial_message_set_background, <value>),
# Defines whether the tutorial box will have a background or not (1 or 0). Default is off.
################################################################################
# [ Z19 ] GAME CONTROL: SCREENS, MENUS, DIALOGS AND ENCOUNTERS
################################################################################
# An encounter is what happens when player's party meets another party on the
# world map. While most operations in the game can be performed outside of
# encounter, there's one thing you can only do when in encounter context -
# standard game battle. When you are initiating the battle from an encounter,
# the game engine will do most of the grunt work for you. You can order the
# engine to add some parties to the battle on this or that side, and the
# soldiers from those parties will spawn on the battlefield, in the numbers
# proportional to the party sizes, and the agents will maintain links with
# their parties. If agents earn experience, this will be reflected on the
# world map, and if some agents die, party sizes will be decreased. All this
# stuff can potentially be emulated by the Module System code, but it's tons
# of work and is still much less efficient than the tool the game engine
# already provides to you.
# An important notice: when player encounters an AI party on the map, the game
# calls "game_event_party_encounter" script in the module_scripts.py. So if
# you want to implement some non-standard processing of game encounters, this
# is the place you should start from. Also note that the game implements the
# Camp menu as an encounter with a hardcoded party "p_camp_bandits".
# Also you can find many operations in this section dealing with game screens,
# game menus and game dialogs. Keep in mind that some screens only make sense
# in certain contexts, and game menus are only available on the world map, you
# cannot use game menus during the mission.
# Conditional operations
entering_town = 36 # (entering_town, <town_id>),
# Apparently deprecated.
encountered_party_is_attacker = 39 # (encountered_party_is_attacker),
# Checks that the party encountered on the world map was following player (i.e. either player was trying to run away or at the very least this is a head-on clash).
conversation_screen_is_active = 42 # (conversation_screen_active),
# Checks that the player is currently in dialogue with some agent. Can only be used in triggers of module_mission_templates.py file.
in_meta_mission = 44 # (in_meta_mission),
# Deprecated, do not use.
# Game hardcoded windows and related operations
change_screen_return = 2040 # (change_screen_return),
# Closes any current screen and returns the player to worldmap (to scene?). 4research how it behaves in missions.
change_screen_loot = 2041 # (change_screen_loot, <troop_id>),
# Opens the Looting interface, using the provided troop as loot storage. Player has full access to troop inventory.
change_screen_trade = 2042 # (change_screen_trade, [troop_id]),
# Opens the Trade screen, using the provided troop as the trading partner. When called from module_dialogs, troop_id is optional and defaults to current dialogue partner.
change_screen_exchange_members = 2043 # (change_screen_exchange_members, [exchange_leader], [party_id]),
# Opens the Exchange Members With Party interface, using the specified party_id. If called during an encounter, party_id is optional and defaults to the encountered party. Second parameter determines whether the party leader is exchangeable (useful when managing the castle garrison).
change_screen_trade_prisoners = 2044 # (change_screen_trade_prisoners),
# Opens the Sell Prisoners interface. Script "script_game_get_prisoner_price" will be used to determine prisoner price.
change_screen_buy_mercenaries = 2045 # (change_screen_buy_mercenaries),
# Opens the Buy Mercenaries interface, where player can hire troops from the party specified with (set_mercenary_source_party) operation. Only works from the dialog.
change_screen_view_character = 2046 # (change_screen_view_character),
# Opens the character screen of another troop. Can only be used in dialogs.
change_screen_training = 2047 # (change_screen_training),
# Opens the character screen for the troop that player is currently talking to. Only works in dialogs. Deprecated, use (change_screen_view_character) instead.
change_screen_mission = 2048 # (change_screen_mission),
# Starts the mission, using previously defined scene and mission template.
change_screen_map_conversation = 2049 # (change_screen_map_conversation, <troop_id>),
# Starts the mission, same as (change_screen_mission). However once the mission starts, player will get into dialog with the specified troop, and once the dialog ends, the mission will automatically end.
change_screen_exchange_with_party = 2050 # (change_screen_exchange_with_party, <party_id>),
# Effectively duplicates (change_screen_exchange_members), but party_id parameter is obligatory and the operation doesn't have an option to prevent party leader from being exchanged.
change_screen_equip_other = 2051 # (change_screen_equip_other, [troop_id]),
# Opens the Equip Companion interface. When calling from a dialog, it is not necessary to specify troop_id.
change_screen_map = 2052 # (change_screen_map),
# Changes the screen to global map, closing any currently running game menu, dialog or mission.
change_screen_notes = 2053 # (change_screen_notes, <note_type>, <object_id>),
# Opens the Notes screen, in the selected category (note_type: 1=troops, 2=factions, 3=parties, 4=quests, 5=info_pages) and for the specified object in that category.
change_screen_quit = 2055 # (change_screen_quit),
# Quits the game to the main menu.
change_screen_give_members = 2056 # (change_screen_give_members, [party_id]),
# Opens the Give Troops to Another Party interface. Party_id parameter is optional during an encounter and will use encountered party as default value.
change_screen_controls = 2057 # (change_screen_controls),
# Opens the standard Configure Controls screen, pausing the game.
change_screen_options = 2058 # (change_screen_options),
# Opens the standard Game Options screen, pausing the game.
set_mercenary_source_party = 1320 # (set_mercenary_source_party, <party_id>),
# Defines the party from which the player will buy mercenaries with (change_screen_buy_mercenaries).
start_map_conversation = 1025 # (start_map_conversation, <troop_id>, [troop_dna]),
# Starts a conversation with the selected troop. Can be called directly from global map or game menus. Troop DNA parameter allows you to randomize non-hero troop appearances.
# Game menus
set_background_mesh = 2031 # (set_background_mesh, <mesh_id>),
# Sets the specified mesh as the background for the current menu. Possibly can be used for dialogs or presentations, but was not tested.
set_game_menu_tableau_mesh = 2032 # (set_game_menu_tableau_mesh, <tableau_material_id>, <value>, <position_register_no>),
# Adds a tableau to the current game menu screen. Position (X,Y) coordinates define mesh position, Z coordinate defines scaling. Parameter <value> will be passed as tableau_material script parameter.
jump_to_menu = 2060 # (jump_to_menu, <menu_id>),
# Opens the specified game menu. Note this only happens after the current block of code completes execution.
disable_menu_option = 2061 # (disable_menu_option),
# Never used in native. Apparently deprecated as menu options have prerequisite code blocks now.
# Game encounter handling operations
set_party_battle_mode = 1020 # (set_party_battle_mode),
# Used before or during the mission to start battle mode (and apparently make agents use appropriate AI).
finish_party_battle_mode = 1019 # (finish_party_battle_mode),
# Used during the mission to stop battle mode.
start_encounter = 1300 # (start_encounter, <party_id>),
# Forces the player party to initiate encounter with the specified party. Distance does not matter in this situation.
leave_encounter = 1301 # (leave_encounter),
# Leaves encounter mode.
encounter_attack = 1302 # (encounter_attack),
# Apparently starts the standard battle with the encountered party. 4research.
select_enemy = 1303 # (select_enemy, <value>),
# When joining a battle, this determines what side player will be helping. Defending party is always 0, and attacking party is always 1. Player can support either attackers (value = 0, i.e. defenders are the enemy) or defenders (value = 1).
set_passage_menu = 1304 # (set_passage_menu, <value>),
# When setting up a mission, this allows you to determine what game menu will be used for that mission passages instead of "mnu_town". Passage menu item number will determine what menu option (in sequential order, starting from 0) will be executed when the player activates that passage on the scene. Note that menu option condition code block will be ignored.
start_mission_conversation = 1920 # (start_mission_conversation, <troop_id>),
# During the mission, initiates the dialog with specified troop.
set_conversation_speaker_troop = 2197 # (set_conversation_speaker_troop, <troop_id>),
# Allows to dynamically switch speaking troops during the dialog when developer doesn't know in advance who will be doing the speaking. Should be placed in post-talk code section of dialog entry.
set_conversation_speaker_agent = 2198 # (set_conversation_speaker_agent, <agent_id>),
# Allows to dynamically switch speaking agents during the dialog when developer doesn't know in advance who will be doing the speaking. Should be placed in post-talk code section of dialog entry.
store_conversation_agent = 2199 # (store_conversation_agent, <destination>),
# Stores identifier of agent who is currently speaking.
store_conversation_troop = 2200 # (store_conversation_troop, <destination>),
# Stores identifier of troop who is currently speaking.
store_partner_faction = 2201 # (store_partner_faction, <destination>),
# Stores faction of the troop player is speaking to.
store_encountered_party = 2202 # (store_encountered_party, <destination>),
# Stores identifier of the encountered party.
store_encountered_party2 = 2203 # (store_encountered_party2, <destination>),
# Stores the identifier of the second encountered party (when first party is in battle, this one will return it's battle opponent).
set_encountered_party = 2205 # (set_encountered_party, <party_no>),
# Sets the specified party as encountered by player, but does not run the entire encounter routine. Used in Native during chargen to set up the starting town and then immediately throw the player into street fight without showing him the town menu.
end_current_battle = 1307 # (end_current_battle),
# Apparently ends the battle between player's party and it's opponent. Exact effects not clear. 4research.
# Operations specific to dialogs
store_repeat_object = 50 # (store_repeat_object, <destination>),
# Used in the dialogs code in combination with repeat_for_* dialog parameters, when creating dynamical player responses. Stores the value for the current iteration (i.e. a faction ID when repeat_for_factions is used, etc).
talk_info_show = 2020 # (talk_info_show, <hide_or_show>),
# Used in the dialogs code to display relations bar on opponent's portrait when mouse is hovering over it (value = 1) or disable this functionality (value = 0)
talk_info_set_relation_bar = 2021 # (talk_info_set_relation_bar, <value>),
# Sets the relations value for relationship bar in the dialog. Value should be in range -100..100.
talk_info_set_line = 2022 # (talk_info_set_line, <line_no>, <string_no>)
# Sets the additional text information (usually troop name) to be displayed together with the relations bar.
################################################################################
# [ Z20 ] SCENES AND MISSIONS
################################################################################
# To put the player into a 3D scene, you need two things. First is the scene
# itself. All scenes are defined in module_scenes.py file. The second element
# is no less important, and it's called mission template. Mission template
# will determine the context of the events on the scene - who will spawn
# where, who will be hostile or friendly to player or to each other, etc.
# Because of all this, when player is put on the 3D scene in the game, it is
# commonly said that player is "in a mission".
# Conditional operations
all_enemies_defeated = 1003 # (all_enemies_defeated, [team_id]),
# Checks if all agents from the specified team are defeated. When team_id is omitted default enemy team is checked.
race_completed_by_player = 1004 # (race_completed_by_player),
# Not documented. Not used in Native. Apparently deprecated.
num_active_teams_le = 1005 # (num_active_teams_le, <value>),
# Checks that the number of active teams (i.e. teams with at least one active agent) is less than or equal to given value.
main_hero_fallen = 1006 # (main_hero_fallen),
# Checks that the player has been knocked out.
scene_allows_mounted_units = 1834 # (scene_allows_mounted_units),
# Not documented. Used in multiplayer, but it's not clear where horses could be disallowed in the first place. 4research.
is_zoom_disabled = 2222 # (is_zoom_disabled),
# Version 1.153+. Checks that the zoom is currently disabled in the game.
# Scene slot operations
scene_set_slot = 503 # (scene_set_slot, <scene_id>, <slot_no>, <value>),
scene_get_slot = 523 # (scene_get_slot, <destination>, <scene_id>, <slot_no>),
scene_slot_eq = 543 # (scene_slot_eq, <scene_id>, <slot_no>, <value>),
scene_slot_ge = 563 # (scene_slot_ge, <scene_id>, <slot_no>, <value>),
# Scene visitors handling operations
add_troop_to_site = 1250 # (add_troop_to_site, <troop_id>, <scene_id>, <entry_no>),
# Set troop's position in the world to the specified scene and entry point. Entry point must have mtef_scene_source type. Agent will always appear at that entry when entering that scene. No longer used in Native.
remove_troop_from_site = 1251 # (remove_troop_from_site, <troop_id>, <scene_id>),
# Removes the troop from the specified scene. No longer used in Native.
modify_visitors_at_site = 1261 # (modify_visitors_at_site, <scene_id>),
# Declares the scene which visitors will be modified from that moment on.
reset_visitors = 1262 # (reset_visitors),
# Resets all visitors to the scene.
set_visitor = 1263 # (set_visitor, <entry_no>, <troop_id>, [<dna>]),
# Adds the specified troop as the visitor to the entry point of the scene defined with (modify_visitors_at_site). Entry point must have mtef_visitor_source type. Optional DNA parameter allows for randomization of agent looks (only applies to non-hero troops).
set_visitors = 1264 # (set_visitors, <entry_no>, <troop_id>, <number_of_troops>),
# Save as (set_visitors), but spawns an entire group of some troop type.
add_visitors_to_current_scene = 1265 # (add_visitors_to_current_scene, <entry_no>, <troop_id>, <number_of_troops>, <team_no>, <group_no>),
# Adds a number of troops to the specified entry point when the scene is already loaded. Team and group parameters are used in multiplayer mode only, singleplayer mode uses team settings for selected entry point as defined in module_mission_templates.py.
mission_tpl_entry_set_override_flags = 1940 # (mission_entry_set_override_flags, <mission_template_id>, <entry_no>, <value>),
# Allows modder to use a different set of equipment override flags (see af_* constants in header_mission_templates.py) for the selected entry point.
mission_tpl_entry_clear_override_items = 1941 # (mission_entry_clear_override_items, <mission_template_id>, <entry_no>),
# Clears the list of override equipment provided by the entry point definition in module_mission_templates.py.
mission_tpl_entry_add_override_item = 1942 # (mission_entry_add_override_item, <mission_template_id>, <entry_no>, <item_kind_id>),
# Specified item will be added to any agent spawning on specified entry point.
# Mission/scene general operations
set_mission_result = 1906 # (set_mission_result, <value>),
# Sets the result of the current mission (1 for victory, -1 for defeat).
finish_mission = 1907 # (finish_mission, <delay_in_seconds>),
# Exits the scene after the specified delay.
set_jump_mission = 1911 # (set_jump_mission, <mission_template_id>),
# Tells the game to use the specified mission template for the next mission. Apparently should precede the call to (jump_to_scene).
jump_to_scene = 1910 # (jump_to_scene, <scene_id>, [entry_no]),
# Tells the game to use the specified scene for the next mission. Usually followed by (change_screen_mission) call. Parameter entry_no does not seem to have any effect.
set_jump_entry = 1912 # (set_jump_entry, <entry_no>),
# Defines what entry point the player will appear at when the mission starts.
store_current_scene = 2211 # (store_current_scene, <destination>),
# Retrieves the identifier of the current scene. Note that the operation will return the scene id even after the mission is completed and the player is already on global map.
close_order_menu = 1789 # (close_order_menu),
# Version 1.161+. If orders menu is currently open, it will be closed.
entry_point_get_position = 1780 # (entry_point_get_position, <position>, <entry_no>),
# Retrieves the position of the entry point on the scene.
entry_point_set_position = 1781 # (entry_point_set_position, <entry_no>, <position>),
# Moves the entry point to the specified position on the scene.
entry_point_is_auto_generated = 1782 # (entry_point_is_auto_generated, <entry_no>),
# Checks that the entry point is auto-generated (in other words, there was no such entry point placed in the scene file).
# Scene parameters handling
scene_set_day_time = 1266 # (scene_set_day_time, <value>),
# Defines the time for the scene to force the engine to select a different skybox than the one dictated by current game time. Must be called within ti_before_mission_start trigger in module_mission_templates.py. Value should be in range 0..23.
set_rain = 1797 # (set_rain, <rain-type>, <strength>),
# Sets a new weather for the mission. Rain_type values: 0 = clear, 1 = rain, 2 = snow. Strength is in range 0..100.
set_fog_distance = 1798 # (set_fog_distance, <distance_in_meters>, [fog_color]),
# Sets the density (and optionally color) of the fog for the mission.
set_skybox = 2389 # (set_skybox, <non_hdr_skybox_index>, <hdr_skybox_index>),
# Version 1.153+. Forces the scene to be rendered with specified skybox. Index of -1 will disable.
set_startup_sun_light = 2390 # (set_startup_sun_light, <r>, <g>, <b>),
# Version 1.153+. Defines the sunlight color for the scene.
set_startup_ambient_light = 2391 # (set_startup_ambient_light, <r>, <g>, <b>),
# Version 1.153+. Defines the ambient light color for the scene.
set_startup_ground_ambient_light = 2392 # (set_startup_ground_ambient_light, <r>, <g>, <b>),
# Version 1.153+. Defines the ambient light color for the ground.
get_startup_sun_light = 2394 # (get_startup_sun_light, <position_no>),
# Version 1.165+. Returns startup sunlight color in (x, y, z) coordinates of position register.
get_startup_ambient_light = 2395 # (get_startup_ambient_light, <position_no>),
# Version 1.165+. Returns startup ambient light color in (x, y, z) coordinates of position register.
get_startup_ground_ambient_light = 2396 # (get_startup_ground_ambient_light, <position_no>),
# Version 1.165+. Returns startup ambient ground lighting color in (x, y, z) coordinates of position register.
get_battle_advantage = 1690 # (get_battle_advantage, <destination>),
# Retrieves the calculated battle advantage.
set_battle_advantage = 1691 # (set_battle_advantage, <value>),
# Sets a new value for battle advantage.
get_scene_boundaries = 1799 # (get_scene_boundaries, <position_min>, <position_max>),
# Retrieves the coordinates of the top-left and bottom-right corner of the scene to the provided position registers.
mission_enable_talk = 1935 # (mission_enable_talk),
# Allows dialogue with agents on the scene.
mission_disable_talk = 1936 # (mission_disable_talk),
# Disables dialogue with agents on the scene.
mission_get_time_speed = 2002 # (mission_get_time_speed, <destination_fixed_point>),
# Retrieves current time speed factor for the mission.
mission_set_time_speed = 2003 # (mission_set_time_speed, <value_fixed_point>),
# Instantly changes the speed of time during the mission. Speed of time cannot be set to zero or below. Operation only works when cheat mode is enabled.
mission_time_speed_move_to_value = 2004 # (mission_speed_move_to_value, <value_fixed_point>, <duration-in-1/1000-seconds>),
# Changes the speed of time during the mission gradually, within the specified duration period. Speed of time cannot be set to zero or below. Operation only works when cheat mode is enabled.
mission_set_duel_mode = 2006 # (mission_set_duel_mode, <value>),
# Sets duel mode for the multiplayer mission. Values: 0 = off, 1 = on.
store_zoom_amount = 2220 # (store_zoom_amount, <destination_fixed_point>),
# Version 1.153+. Stores current zoom rate.
set_zoom_amount = 2221 # (set_zoom_amount, <value_fixed_point>),
# Version 1.153+. Sets new zoom rate.
# Mission timers
reset_mission_timer_a = 2375 # (reset_mission_timer_a),
# Resets the value of first mission timer and starts it from zero.
reset_mission_timer_b = 2376 # (reset_mission_timer_b),
# Resets the value of second mission timer and starts it from zero.
reset_mission_timer_c = 2377 # (reset_mission_timer_c),
# Resets the value of third mission timer and starts it from zero.
store_mission_timer_a = 2370 # (store_mission_timer_a, <destination>),
# Retrieves current value of first mission timer, in seconds.
store_mission_timer_b = 2371 # (store_mission_timer_b, <destination>),
# Retrieves current value of second mission timer, in seconds.
store_mission_timer_c = 2372 # (store_mission_timer_c, <destination>),
# Retrieves current value of third mission timer, in seconds.
store_mission_timer_a_msec = 2365 # (store_mission_timer_a_msec, <destination>),
# Retrieves current value of first mission timer, in milliseconds.
store_mission_timer_b_msec = 2366 # (store_mission_timer_b_msec, <destination>),
# Retrieves current value of second mission timer, in milliseconds.
store_mission_timer_c_msec = 2367 # (store_mission_timer_c_msec, <destination>),
# Retrieves current value of third mission timer, in milliseconds.
# Camera and rendering operations
mission_cam_set_mode = 2001 # (mission_cam_set_mode, <mission_cam_mode>, <duration-in-1/1000-seconds>, <value>),
# Not documented. Changes main camera mode. Camera mode is 0 for automatic and 1 for manual (controlled by code). Duration parameter is used when switching from manual to auto, to determine how long will camera move to it's new position. Third parameter is not documented.
mission_cam_set_screen_color = 2008 # (mission_cam_set_screen_color, <value>),
# Not documented. Paints the screen with solid color. Parameter <value> contains color code with alpha component. Can be used to block screen entirely, add tint etc.
mission_cam_animate_to_screen_color = 2009 #(mission_cam_animate_to_screen_color, <value>, <duration-in-1/1000-seconds>),
# Not documented. Same as above, but color change is gradual. Used in Native to fill the screen with white before the end of marriage scene.
mission_cam_get_position = 2010 # (mission_cam_get_position, <position_register_no>)
# Retrieves the current position of camera during the mission (i.e. the point from which the player is observing the game).
mission_cam_set_position = 2011 # (mission_cam_set_position, <position_register_no>)
# Moves the camera to the specified position during the mission.
mission_cam_animate_to_position = 2012 # (mission_cam_animate_to_position, <position_register_no>, <duration-in-1/1000-seconds>, <value>)
# Moves the camera to the specified position smoothly. Second parameter determines how long it will take camera to move to destination, third parameter determines whether camera velocity will be linear (value = 0) or non-linear (value = 1).
mission_cam_get_aperture = 2013 # (mission_cam_get_aperture, <destination>)
# Not documented. View angle?
mission_cam_set_aperture = 2014 # (mission_cam_set_aperture, <value>)
# Not documented.
mission_cam_animate_to_aperture = 2015 # (mission_cam_animate_to_aperture, <value>, <duration-in-1/1000-seconds>, <value>)
# Not documented. if value = 0, then camera velocity will be linear. else it will be non-linear
mission_cam_animate_to_position_and_aperture = 2016 # (mission_cam_animate_to_position_and_aperture, <position_register_no>, <value>, <duration-in-1/1000-seconds>, <value>)
# Not documented. if value = 0, then camera velocity will be linear. else it will be non-linear
mission_cam_set_target_agent = 2017 # (mission_cam_set_target_agent, <agent_id>, <value>)
# Not documented. if value = 0 then do not use agent's rotation, else use agent's rotation
mission_cam_clear_target_agent = 2018 # (mission_cam_clear_target_agent)
# Not documented.
mission_cam_set_animation = 2019 # (mission_cam_set_animation, <anim_id>),
# Not documented.
mouse_get_world_projection = 751 # (mouse_get_world_projection, <position_no_1>, <position_no_2>),
# Version 1.161+. Returns current camera coordinates (first position) and mouse projection to the back of the world (second position). Rotation data of resulting positions seems unreliable.
cast_ray = 1900 # (cast_ray, <destination>, <hit_position_register>, <ray_position_register>, [<ray_length_fixed_point>]),
# Version 1.161+. Casts a ray starting from <ray_position_register> and stores the closest hit position into <hit_position_register> (fails if no hits). If the body hit is a scene prop, its instance id will be stored into <destination>, otherwise it will be -1. Optional <ray_length> parameter seems to have no effect.
set_postfx = 2386 # (set_postfx, ???)
# This operation is not documented nor any examples of it's use could be found. Parameters are unknown.
set_river_shader_to_mud = 2387 # (set_river_shader_to_mud, ???)
# Changes river material for muddy env. This operation is not documented nor any examples of it's use could be found. Parameters are unknown.
rebuild_shadow_map = 2393 # (rebuild_shadow_map),
# Version 1.153+. UNTESTED. Effects unknown. Rebuilds shadow map for the current scene. Apparently useful after heavy manipulation with scene props.
set_shader_param_int = 2400 # (set_shader_param_int, <parameter_name>, <value>), #Sets the int shader parameter <parameter_name> to <value>
# Version 1.153+. UNTESTED. Allows direct manupulation of shader parameters. Operation scope is unknown, possibly global. Parameter is an int value.
set_shader_param_float = 2401 # (set_shader_param_float, <parameter_name>, <value_fixed_point>),
# Version 1.153+. Allows direct manupulation of shader parameters. Operation scope is unknown, possibly global. Parameter is a float value.
set_shader_param_float4 = 2402 # (set_shader_param_float4, <parameter_name>, <valuex>, <valuey>, <valuez>, <valuew>),
# Version 1.153+. Allows direct manupulation of shader parameters. Operation scope is unknown, possibly global. Parameter is a set of 4 float values.
set_shader_param_float4x4 = 2403 # (set_shader_param_float4x4, <parameter_name>, [0][0], [0][1], [0][2], [1][0], [1][1], [1][2], [2][0], [2][1], [2][2], [3][0], [3][1], [3][2]),
# Version 1.153+. Allows direct manupulation of shader parameters. Operation scope is unknown, possibly global. Parameter is a set of 4x4 float values.
################################################################################
# [ Z21 ] SCENE PROPS, SCENE ITEMS, LIGHT SOURCES AND PARTICLE SYSTEMS
################################################################################
# On each scene in the game you can find scene props and scene items.
# Scene props are the building bricks of the scene. Nearly every 3D object you
# will see on any scene in the game is a scene prop, with the exception of
# terrain and flora (on some scenes flora elements are actually scene props
# as well though).
# Just like with troops and agents, it is important to differentiate between
# scene props and scene prop instances. You can have a dozen archer agents on
# the scene, and each of them will be an instance of the archer troop. Scene
# props are the same - there can be many castle wall sections on the scene,
# and these are instances of the same castle wall scene prop.
# It is also possible to use game items as elements of the scene. These are
# the scene items, and they behave just like normal scene props. However all
# operations will affect either scene prop instances, or scene items, but
# not both.
# Finally, there are spawned items. These are the "dropped" items which the
# player can pick up during the mission.
# Conditional operations
prop_instance_is_valid = 1838 # (prop_instance_is_valid, <scene_prop_instance_id>),
# Checks that the reference to a scene prop instance is valid (i.e. it was not removed).
prop_instance_is_animating = 1862 # (prop_instance_is_animating, <destination>, <scene_prop_id>),
# Checks that the scene prop instance is currently animating.
prop_instance_intersects_with_prop_instance = 1880 # (prop_instance_intersects_with_prop_instance, <checked_scene_prop_id>, <scene_prop_id>),
# Checks if two scene props are intersecting (i.e. collided). Useful when animating scene props movement. Pass -1 for second parameter to check the prop against all other props on the scene.
scene_prop_has_agent_on_it = 1801 # (scene_prop_has_agent_on_it, <scene_prop_instance_id>, <agent_id>)
# Checks that the specified agent is standing on the scene prop instance.
# Scene prop instance slot operations
scene_prop_set_slot = 510 # (scene_prop_set_slot, <scene_prop_instance_id>, <slot_no>, <value>),
scene_prop_get_slot = 530 # (scene_prop_get_slot, <destination>, <scene_prop_instance_id>, <slot_no>),
scene_prop_slot_eq = 550 # (scene_prop_slot_eq, <scene_prop_instance_id>, <slot_no>, <value>),
scene_prop_slot_ge = 570 # (scene_prop_slot_ge, <scene_prop_instance_id>, <slot_no>, <value>),
# Scene prop general operations
prop_instance_get_scene_prop_kind = 1853 # (prop_instance_get_scene_prop_type, <destination>, <scene_prop_id>)
# Retrieves the scene prop for the specified prop instance.
scene_prop_get_num_instances = 1810 # (scene_prop_get_num_instances, <destination>, <scene_prop_id>),
# Retrieves the total number of instances of a specified scene prop on the current scene.
scene_prop_get_instance = 1811 # (scene_prop_get_instance, <destination>, <scene_prop_id>, <instance_no>),
# Retrieves the reference to a scene prop instance by it's number.
scene_prop_enable_after_time = 1800 # (scene_prop_enable_after_time, <scene_prop_id>, <time_period>),
# Prevents usable scene prop from being used for the specified time period in 1/100th of second. Commonly used to implement "cooldown" periods.
set_spawn_position = 1970 # (set_spawn_position, <position>),
# Defines the position which will later be used by (spawn_scene_prop), (spawn_scene_item), (spawn_agent) and (spawn_horse) operations.
spawn_scene_prop = 1974 # (spawn_scene_prop, <scene_prop_id>),
# Spawns a new scene prop instance of the specified type at the position defined by the last call to (set_spawn_position). Operation was supposed to store the prop_instance_id of the spawned position in reg0, but does not do this at the moment.
prop_instance_get_variation_id = 1840 # (prop_instance_get_variation_id, <destination>, <scene_prop_id>),
# Retrieves the first variation ID number for the specified scene prop instance.
prop_instance_get_variation_id_2 = 1841 # (prop_instance_get_variation_id_2, <destination>, <scene_prop_id>),
# Retrieves the second variation ID number for the specified scene prop instance.
replace_prop_instance = 1889 # (replace_prop_instance, <scene_prop_id>, <new_scene_prop_id>),
# Replaces a single scene prop instance with an instance of another scene prop (usually with the same dimensions, but not necessarily so). Can only be called in ti_before_mission_start trigger in module_mission_templates.py.
replace_scene_props = 1890 # (replace_scene_props, <old_scene_prop_id>, <new_scene_prop_id>),
# Replaces all instances of specified scene prop type with another scene prop type. Commonly used to replace damaged walls with their intact versions during normal visits to castle scenes. Can only be called in ti_before_mission_start trigger in module_mission_templates.py.
scene_prop_fade_out = 1822 # (scene_prop_fade_out, <scene_prop_id>, <fade_out_time>)
# Version 1.153+. Makes the scene prop instance disappear within specified time.
scene_prop_fade_in = 1823 # (scene_prop_fade_in, <scene_prop_id>, <fade_in_time>)
# Version 1.153+. Makes the scene prop instance reappear within specified time.
prop_instance_set_material = 2617 # (prop_instance_set_material, <prop_instance_no>, <sub_mesh_no>, <string_register>),
# Version 1.161+. 4research. give sub mesh as -1 to change all meshes' materials.
# Scene prop manipulation
scene_prop_get_visibility = 1812 # (scene_prop_get_visibility, <destination>, <scene_prop_id>),
# Retrieves the current visibility state of the scene prop instance (1 = visible, 0 = invisible).
scene_prop_set_visibility = 1813 # (scene_prop_set_visibility, <scene_prop_id>, <value>),
# Shows (value = 1) or hides (value = 0) the scene prop instance. What does it do with collision? 4research.
scene_prop_get_hit_points = 1815 # (scene_prop_get_hit_points, <destination>, <scene_prop_id>),
# Retrieves current number of hit points that the scene prop instance has.
scene_prop_get_max_hit_points = 1816 # (scene_prop_get_max_hit_points, <destination>, <scene_prop_id>),
# Retrieves the maximum number of hit points that the scene prop instance has (useful to calculate the percent of damage).
scene_prop_set_hit_points = 1814 # (scene_prop_set_hit_points, <scene_prop_id>, <value>),
# Sets the number of hit points that the scene prop has. Both current and max hit points are affected. Only makes sense for sokf_destructible scene props.
scene_prop_set_cur_hit_points = 1820 # (scene_prop_set_cur_hit_points, <scene_prop_id>, <value>),
# Version 1.153+. Sets current HP amount for scene prop.
prop_instance_receive_damage = 1877 # (prop_instance_receive_damage, <scene_prop_id>, <agent_id>, <damage_value>),
# Makes scene prop instance receive specified amount of damage from any arbitrary agent. Agent reference is apparently necessary to properly initialize ti_on_scene_prop_hit trigger parameters.
prop_instance_refill_hit_points = 1870 # (prop_instance_refill_hit_points, <scene_prop_id>),
# Restores hit points of a scene prop instance to their maximum value.
scene_prop_get_team = 1817 # (scene_prop_get_team, <value>, <scene_prop_id>),
# Retrieves the team controlling the scene prop instance.
scene_prop_set_team = 1818 # (scene_prop_set_team, <scene_prop_id>, <value>),
# Assigns the scene prop instance to a certain team.
scene_prop_set_prune_time = 1819 # (scene_prop_set_prune_time, <scene_prop_id>, <value>),
# Not documented. Not used in Native. Taleworlds comment: Prune time can only be set to objects that are already on the prune queue. Static objects are not affected by this operation.
prop_instance_get_position = 1850 # (prop_instance_get_position, <position>, <scene_prop_id>),
# Retrieves the prop instance current position on the scene.
prop_instance_get_starting_position = 1851 # (prop_instance_get_starting_position, <position>, <scene_prop_id>),
# Retrieves the prop instance starting position on the scene (i.e. the place where it was positioned when initialized).
prop_instance_set_position = 1855 # (prop_instance_set_position, <scene_prop_id>, <position>, [dont_send_to_clients]),
# Teleports prop instance to another position. Optional flag dont_send_to_clients can be used on the server to prevent position change from being replicated to client machines (useful when doing some calculations which require to move the prop temporarily to another place).
prop_instance_animate_to_position = 1860 # (prop_instance_animate_to_position, <scene_prop_id>, position, <duration-in-1/100-seconds>),
# Moves prop instance to another position during the specified time frame (i.e. animates). Time is specified in 1/100th of second.
prop_instance_get_animation_target_position = 1863 # (prop_instance_get_animation_target_position, <pos>, <scene_prop_id>)
# Retrieves the position that the prop instance is currently animating to.
prop_instance_stop_animating = 1861 # (prop_instance_stop_animating, <scene_prop_id>),
# Stops animating of the prop instance in the current position.
prop_instance_get_scale = 1852 # (prop_instance_get_scale, <position>, <scene_prop_id>),
# Retrieves the current scaling factors of the prop instance.
prop_instance_set_scale = 1854 # (prop_instance_set_scale, <scene_prop_id>, <value_x_fixed_point>, <value_y_fixed_point>, <value_z_fixed_point>),
# Sets new scaling factors for the scene prop.
prop_instance_enable_physics = 1864 # (prop_instance_enable_physics, <scene_prop_id>, <value>),
# Enables (value = 1) or disables (value = 0) physics calculation (gravity, collision checks) for the scene prop instance.
prop_instance_initialize_rotation_angles = 1866 # (prop_instance_initialize_rotation_angles, <scene_prop_id>),
# Should be called to initialize the scene prop instance prior to any calls to (prop_instance_rotate_to_position).
prop_instance_rotate_to_position = 1865 # (prop_instance_rotate_to_position, <scene_prop_id>, <position>, <duration-in-1/100-seconds>, <total_rotate_angle_fixed_point>),
# Specified prop instance will move to the target position within the specified duration of time, and within the same time it will rotate for the specified angle. Used in Native code to simulate behavior of belfry wheels and rotating winches.
prop_instance_clear_attached_missiles = 1885 # (prop_instance_clear_attached_missiles, <scene_prop_id>),
# Version 1.153+. Removes all missiles currently attached to the scene prop. Only works with dynamic scene props.
prop_instance_dynamics_set_properties = 1871 # (prop_instance_dynamics_set_properties, <scene_prop_id>, <position>),
# Initializes physical parameters of a scene prop. Position (X,Y) coordinates are used to store object's mass and friction coefficient. Coordinate Z is reserved (set it to zero just in case). Scene prop must be defined as sokf_moveable|sokf_dynamic_physics, and a call to (prop_instance_enable_physics) must be previously made.
prop_instance_dynamics_set_velocity = 1872 # (prop_instance_dynamics_set_velocity, <scene_prop_id>, <position>),
# Sets current movement speed for a scene prop. Position's coordinates define velocity along corresponding axis. Same comments as for (prop_instance_dynamics_set_properties).
prop_instance_dynamics_set_omega = 1873 # (prop_instance_dynamics_set_omega, <scene_prop_id>, <position>),
# Sets current rotation speed for a scene prop. Position's coordinates define rotational speed around corresponding axis. Same comments as for (prop_instance_dynamics_set_properties).
prop_instance_dynamics_apply_impulse = 1874 # (prop_instance_dynamics_apply_impulse, <scene_prop_id>, <position>),
# Applies an impulse of specified scale to the scene prop. Position's coordinates define instant change in movement speed along corresponding axis. Same comments as for (prop_instance_dynamics_set_properties).
prop_instance_deform_to_time = 2610 # (prop_instance_deform_to_time, <prop_instance_no>, <value>),
# Version 1.161+. Deforms a vertex-animated scene prop to specified vertex time. If you open the mesh in OpenBrf, right one of "Time of frame" boxes contains the relevant value.
prop_instance_deform_in_range = 2611 # (prop_instance_deform_in_range, <prop_instance_no>, <start_frame>, <end_frame>, <duration-in-1/1000-seconds>),
# Version 1.161+. Animate vertex-animated scene prop from start frame to end frame within the specified time period (in milliseconds). If you open the mesh in OpenBrf, right one of "Time of frame" boxes contains the relevant values for frame parameters.
prop_instance_deform_in_cycle_loop = 2612 # (prop_instance_deform_in_cycle_loop, <prop_instance_no>, <start_frame>, <end_frame>, <duration-in-1/1000-seconds>),
# Version 1.161+. Performs looping animation of vertex-animated scene prop within the specified vertex frame ranges and within specified time (in milliseconds). If you open the mesh in OpenBrf, right one of "Time of frame" boxes contains the relevant values for frame parameters.
prop_instance_get_current_deform_progress = 2615 # (prop_instance_get_current_deform_progress, <destination>, <prop_instance_no>),
# Version 1.161+. Returns a percentage value between 0 and 100 if animation is still in progress. Returns 100 otherwise.
prop_instance_get_current_deform_frame = 2616 # (prop_instance_get_current_deform_frame, <destination>, <prop_instance_no>),
# Version 1.161+. Returns current frame of a vertex-animated scene prop, rounded to nearest integer value.
prop_instance_play_sound = 1881 # (prop_instance_play_sound, <scene_prop_id>, <sound_id>, [flags]),
# Version 1.153+. Makes the scene prop play a specified sound. See sf_* flags in header_sounds.py for reference on possible options.
prop_instance_stop_sound = 1882 # (prop_instance_stop_sound, <scene_prop_id>),
# Version 1.153+. Stops any sound currently played by the scene prop instance.
# Scene items operations
scene_item_get_num_instances = 1830 # (scene_item_get_num_instances, <destination>, <item_id>),
# Gets the number of specified scene items present on the scene. Scene items behave exactly like scene props (i.e. cannot be picked).
scene_item_get_instance = 1831 # (scene_item_get_instance, <destination>, <item_id>, <instance_no>),
# Retrieves the reference to a single instance of a scene item by it's sequential number.
scene_spawned_item_get_num_instances = 1832 # (scene_spawned_item_get_num_instances, <destination>, <item_id>),
# Retrieves the number of specified spawned items present on the scene. Spawned items are actual items, i.e. they can be picked by player.
scene_spawned_item_get_instance = 1833 # (scene_spawned_item_get_instance, <destination>, <item_id>, <instance_no>),
# Retrieves the reference to a single instance of a spawned item by it's sequential number.
replace_scene_items_with_scene_props = 1891 # (replace_scene_items_with_scene_props, <old_item_id>, <new_scene_prop_id>),
# Replaces all instances of specified scene item with scene props. Can only be called in ti_before_mission_start trigger in module_mission_templates.py.
set_spawn_position = 1970 # (set_spawn_position, <position>), ## DUPLICATE ENTRY
# Defines the position which will later be used by (spawn_scene_prop), (spawn_scene_item), (spawn_agent) and (spawn_horse) operations.
spawn_item = 1971 # (spawn_item, <item_kind_id>, <item_modifier>, [seconds_before_pruning]),
# Spawns a new item, possibly with modifier, on the scene in the position specified by previous call to (set_spawn_position). Optional parameter determines time period (in second) after which the item will disappear. Using 0 will prevent the item from disappearing.
spawn_item_without_refill = 1976 # (spawn_item_without_refill, <item_kind_id>, <item_modifier>, [seconds_before_pruning]),
# Version 1.153+. UNTESTED. It is unclear how this is different from standard (spawn_item).
# Light sources and particle systems
set_current_color = 1950 # (set_current_color, <red_value>, <green_value>, <blue_value>),
# Sets color for subsequent calls to (add_point_light) etc. Color component ranges are 0..255.
set_position_delta = 1955 # (set_position_delta, <value>, <value>, <value>),
# Can only be called inside item or scene prop triggers. Sets (X,Y,Z) offsets from the item/prop current position for subsequent calls to (add_point_light) etc. Offsets are apparently in centimeters.
add_point_light = 1960 # (add_point_light, [flicker_magnitude], [flicker_interval]),
# Adds a point light source to an object with optional flickering magnitude (range 0..100) and flickering interval (in 1/100th of second). Uses position offset and color provided to previous calls to (set_position_delta) and (set_current_color). Can only be used in item triggers.
add_point_light_to_entity = 1961 # (add_point_light_to_entity, [flicker_magnitude], [flicker_interval]),
# Adds a point light source to an object with optional flickering magnitude (range 0..100) and flickering interval (in 1/100th of second). Uses position offset and color provided to previous calls to (set_position_delta) and (set_current_color). Can only be used in scene prop triggers.
particle_system_add_new = 1965 # (particle_system_add_new, <par_sys_id>,[position]),
# Adds a new particle system to an object. Uses position offset and color provided to previous calls to (set_position_delta) and (set_current_color). Can only be used in item/prop triggers.
particle_system_emit = 1968 # (particle_system_emit, <par_sys_id>, <value_num_particles>, <value_period>),
# Adds a particle system in some fancy way. Uses position offset and color provided to previous calls to (set_position_delta) and (set_current_color). Can only be used in item/prop triggers.
particle_system_burst = 1969 # (particle_system_burst, <par_sys_id>, <position>, [percentage_burst_strength]),
# Bursts a particle system in specified position.
particle_system_burst_no_sync = 1975 # (particle_system_burst_without_sync,<par_sys_id>,<position_no>,[percentage_burst_strength]),
# Version 1.153+. Same as above, but apparently does not synchronize this between server and client.
prop_instance_add_particle_system = 1886 # (prop_instance_add_particle_system, <scene_prop_id>, <par_sys_id>, <position_no>),
# Version 1.153+. Adds a new particle system to the scene prop. Note that <position_no> is local, i.e. in relation to scene prop's coordinates and rotation.
prop_instance_stop_all_particle_systems = 1887 # (prop_instance_stop_all_particle_systems, <scene_prop_id>),
# Version 1.153+. Removes all particle systems currently associated with scene prop instance.
################################################################################
# [ Z22 ] AGENTS AND TEAMS
################################################################################
# An agent represents of a single soldier on the 3D scene. Always keep this in
# mind when dealing with regular troops. A party may have 30 Swadian Knights.
# They will form a single troop stack in the party, and they will all be
# copies of the one and only Swadian Knight troop. However when the battle
# starts, this stack will spawn 30 distinct Agents.
# Agents do not persist - they only exist in the game for the duration of the
# mission. As soon as the player returns to the world map, all agents who were
# present on the scene immediately disappear. If this was a battle during a
# normal game encounter, then the game will keep track of the battle results,
# and depending on the number of agents killed from all sides the engine will
# kill or wound some troops in the troop stacks of the parties who were
# participating in the battle.
# During the mission, all agents are split into teams. By default player and
# his companions are placed into Team 0, but this may be changed in the
# mission template or by code. Player's enemies are usually team 1 (though
# again, this is not set in stone). Module System provides the modder with
# a great degree of control over teams composition, relation to each other
# (you can make hostile, allied or neutral teams, and you can have more than
# one team on the scene).
# Conditional operations
agent_is_in_special_mode = 1693 # (agent_is_in_special_mode, <agent_id>),
# Checks that the agent is currently in scripted mode.
agent_is_routed = 1699 # (agent_is_routed, <agent_id>),
# Checks that the agent has fled from the map (i.e. reached the edge of the map in fleeing mode and then faded).
agent_is_alive = 1702 # (agent_is_alive, <agent_id>),
# Checks that the agent is alive.
agent_is_wounded = 1703 # (agent_is_wounded, <agent_id>),
# Checks that the agent has been knocked unconscious.
agent_is_human = 1704 # (agent_is_human, <agent_id>),
# Checks that the agent is human (i.e. not horse).
agent_is_ally = 1706 # (agent_is_ally, <agent_id>),
# Checks that the agent is allied to the player (belongs to player's party or allied party in current encounter).
agent_is_non_player = 1707 # (agent_is_non_player, <agent_id>),
# Checks that the agent is not a player.
agent_is_defender = 1708 # (agent_is_defender, <agent_id>),
# Checks that the agent belongs to the defending side (see encounter operations for details).
agent_is_active = 1712 # (agent_is_active, <agent_id>),
# Checks that the agent reference is active. This will succeed for dead or routed agents, for as long as the agent reference itself is valid.
agent_has_item_equipped = 1729 # (agent_has_item_equipped, <agent_id>, <item_id>),
# Checks that the agent has a specific item equipped.
agent_is_in_parried_animation = 1769 # (agent_is_in_parried_animation, <agent_id>),
# Checks that the agent is currently in parrying animation (defending from some attack).
agent_is_alarmed = 1806 # (agent_is_alarmed, <agent_id>),
# Checks that the agent is alarmed (in combat mode with weapon drawn).
class_is_listening_order = 1775 # (class_is_listening_order, <team_no>, <sub_class>),
# Checks that the specified group of specified team is listening to player's orders.
teams_are_enemies = 1788 # (teams_are_enemies, <team_no>, <team_no_2>),
# Checks that the two teams are hostile to each other.
agent_is_in_line_of_sight = 1826 # (agent_is_in_line_of_sight, <agent_id>, <position_no>),
# Version 1.153+. Checks that the agent can be seen from specified position. Rotation of position register is not used (i.e. agent will be seen even if position is "looking" the other way).
# Team and agent slot operations
team_set_slot = 509 # (team_set_slot, <team_id>, <slot_no>, <value>),
team_get_slot = 529 # (team_get_slot, <destination>, <player_id>, <slot_no>),
team_slot_eq = 549 # (team_slot_eq, <team_id>, <slot_no>, <value>),
team_slot_ge = 569 # (team_slot_ge, <team_id>, <slot_no>, <value>),
agent_set_slot = 505 # (agent_set_slot, <agent_id>, <slot_no>, <value>),
agent_get_slot = 525 # (agent_get_slot, <destination>, <agent_id>, <slot_no>),
agent_slot_eq = 545 # (agent_slot_eq, <agent_id>, <slot_no>, <value>),
agent_slot_ge = 565 # (agent_slot_ge, <agent_id>, <slot_no>, <value>),
# Agent spawning, removal and general operations
add_reinforcements_to_entry = 1930 # (add_reinforcements_to_entry, <mission_template_entry_no>, <wave_size>),
# For battle missions, adds reinforcement wave to the specified entry point. Additional parameter determines relative wave size. Agents in reinforcement wave are taken from all parties of the side that the entry point belongs to due to mtef_team_* flags.
set_spawn_position = 1970 # (set_spawn_position, <position>), ## DUPLICATE ENTRY
# Defines the position which will later be used by (spawn_scene_prop), (spawn_scene_item), (spawn_agent) and (spawn_horse) operations.
spawn_agent = 1972 # (spawn_agent, <troop_id>),
# Spawns a new troop in the specified position and saves the reference to the new agent in reg0.
spawn_horse = 1973 # (spawn_horse, <item_kind_id>, <item_modifier>),
# Spawns a new horse (with any modifier) in the specified position and saves the reference to the new agent in reg0.
remove_agent = 1755 # (remove_agent, <agent_id>),
# Immediately removes the agent from the scene.
agent_fade_out = 1749 # (agent_fade_out, <agent_id>),
# Fades out the agent from the scene (same effect as fleeing enemies when they get to the edge of map).
agent_play_sound = 1750 # (agent_play_sound, <agent_id>, <sound_id>),
# Makes the agent emit the specified sound.
agent_stop_sound = 1808 # (agent_stop_sound, <agent_id>),
# Stops whatever sound agent is currently performing.
agent_set_visibility = 2096 # (agent_set_visibility, <agent_id>, <value>),
# Version 1.153+. Sets agent visibility. 0 for invisible, 1 for visible.
get_player_agent_no = 1700 # (get_player_agent_no, <destination>),
# Retrieves the reference to the player-controlled agent. Singleplayer mode only.
agent_get_kill_count = 1723 # (agent_get_kill_count, <destination>, <agent_id>, [get_wounded]),
# Retrieves the total number of kills by the specified agent during this battle. Call with non-zero <get_wounded> parameter to retrieve the total number of enemies the agent has knocked down.
agent_get_position = 1710 # (agent_get_position, <position>, <agent_id>),
# Retrieves the position of the specified agent on the scene.
agent_set_position = 1711 # (agent_set_position, <agent_id>, <position>),
# Teleports the agent to specified position on the scene. Be careful with riders - you must teleport the horse, not the rider for the operation to work correctly!
agent_get_horse = 1714 # (agent_get_horse, <destination>, <agent_id>),
# Retrieves the reference to the horse agent that the specified agent is riding, or -1 if he's not riding a horse (or is a horse himself).
agent_get_rider = 1715 # (agent_get_rider, <destination>, <horse_agent_id>),
# Retrieves the reference to the rider agent who is riding the specified horse, or -1 if there's no rider or the specified agent is not a horse.
agent_get_party_id = 1716 # (agent_get_party_id, <destination>, <agent_id>),
# Retrieves the party that the specified agent belongs to (supposedly should only work in battle missions for agents spawned as starting/reinforcement waves).
agent_get_entry_no = 1717 # (agent_get_entry_no, <destination>, <agent_id>),
# Retrieves the entry point number where this agent has spawned. What does this return for agents spawned with (spawn_agent)? 4research.
agent_get_troop_id = 1718 # (agent_get_troop_id, <destination>, <agent_id>),
# Retrieves the troop type of the specified agent. Returns -1 for horses (because horses are items, not troops).
agent_get_item_id = 1719 # (agent_get_item_id, <destination>, <horse_agent_id>),
# Retrieves the item type of the specified horse agent. Returns -1 for humans.
# Agent combat parameters and stats
store_agent_hit_points = 1720 # (store_agent_hit_points, <destination>, <agent_id>, [absolute]),
# Retrieves current agent health. Optional last parameter determines whether actual health (absolute = 1) or relative percentile health (absolute = 0) is returned. Default is relative.
agent_set_hit_points = 1721 # (agent_set_hit_points, <agent_id>, <value>,[absolute]),
# Sets new value for agent health. Optional last parameter determines whether the value is interpreted as actual health (absolute = 1) or relative percentile health (absolute = 0). Default is relative.
agent_set_max_hit_points = 2090 # (agent_set_max_hit_points, <agent_id>, <value>, [absolute]),
# Version 1.153+. Changes agent's max hit points. Optional flag [absolute] determines if <value> is an absolute number of his points, or relative percentage (0..1000) of default value. Treated as percentage by default.
agent_deliver_damage_to_agent = 1722 # (agent_deliver_damage_to_agent, <agent_id_deliverer>, <agent_id>, [damage_amount], [weapon_item_id]),
# Makes one agent deal damage to another. Parameter damage_amount is optional, if it is skipped or <= 0, then damage will be calculated using attacker's weapon item and stats (like a normal weapon attack). Optional parameter weapon_item_id was added in 1.153 and will force the game the calculate the damage using this weapon.
agent_deliver_damage_to_agent_advanced = 1827 # (agent_deliver_damage_to_agent_advanced, <destination>, <attacker_agent_id>, <agent_id>, <value>, [weapon_item_id]),
# Version 1.153+. Same as (agent_deliver_damage_to_agent), but resulting damage is returned. Also operation takes relations between agents into account, which may result in no damage, or even damage to attacker due to friendly fire rules.
add_missile = 1829 # (add_missile, <agent_id>, <starting_position>, <starting_speed_fixed_point>, <weapon_item_id>, <weapon_item_modifier>, <missile_item_id>, <missile_item_modifier>),
# Version 1.153+. Creates a missile with specified parameters. Note that <starting_position> parameter also determines the direction in which missile flies.
agent_get_speed = 1689 # (agent_get_speed, <position>, <agent_id>),
# Retrieves agent speed to (X,Y) coordinates of the position register. What do these mean - speed by world axis?
agent_set_no_death_knock_down_only = 1733 # (agent_set_no_death_knock_down_only, <agent_id>, <value>),
# Sets the agent as unkillable (value = 1) or normal (value = 0). Unkillable agents will drop on the ground instead of dying and will stand up afterwards.
agent_set_horse_speed_factor = 1734 # (agent_set_horse_speed_factor, <agent_id>, <speed_multiplier-in-1/100>),
# Multiplies agent's horse speed (and maneuverability?) by the specified percentile value (using 100 will make the horse). Note that this is called on the rider, not on the horse! Supposedly will persist even if the agent changes horses. 4research.
agent_set_speed_limit = 1736 # (agent_set_speed_limit, <agent_id>, <speed_limit(kilometers/hour)>),
# Limits agent speed by the specified value in kph. Use 5 for average walking speed. Affects only AI agents.
agent_set_damage_modifier = 2091 # (agent_set_damage_modifier, <agent_id>, <value>),
# Version 1.153+. Changes the damage delivered by this agent. Value is in percentage, 100 is default, 1000 is max possible value.
agent_set_accuracy_modifier = 2092 # (agent_set_accuracy_modifier, <agent_id>, <value>),
# Version 1.153+. Changes agent's accuracy (with ranged weapons?). Value is in percentage, 100 is default, value can be between [0..1000]
agent_set_speed_modifier = 2093 # (agent_set_speed_modifier, <agent_id>, <value>),
# Version 1.153+. Changes agent's speed. Value is in percentage, 100 is default, value can be between [0..1000]
agent_set_reload_speed_modifier = 2094 # (agent_set_reload_speed_modifier, <agent_id>, <value>),
# Version 1.153+. Changes agent's reload speed. Value is in percentage, 100 is default, value can be between [0..1000]
agent_set_use_speed_modifier = 2095 # (agent_set_use_speed_modifier, <agent_id>, <value>),
# Version 1.153+. Changes agent's speed with using various scene props. Value is in percentage, 100 is default, value can be between [0..1000]
agent_set_ranged_damage_modifier = 2099 # (agent_set_ranged_damage_modifier, <agent_id>, <value>),
# Version 1.157+. Changes agent's damage with ranged weapons. Value is in percentage, 100 is default, value can be between [0..1000]
agent_get_time_elapsed_since_removed = 1760 # (agent_get_time_elapsed_since_removed, <destination>, <agent_id>),
# Retrieves the number of seconds that have passed since agent's death. Native uses this only for multiplayer to track player's respawns. Can it be used in singleplayer too? 4research.
# Agent equipment
agent_refill_wielded_shield_hit_points = 1692 # (agent_refill_wielded_shield_hit_points, <agent_id>),
# Restores all hit points for the shield the agent is currently wielding.
agent_set_invulnerable_shield = 1725 # (agent_set_invulnerable_shield, <agent_id>, <value>),
# Makes the agent invulnerable to any damage (value = 1) or makes him vulnerable again (value = 0).
agent_get_wielded_item = 1726 # (agent_get_wielded_item, <destination>, <agent_id>, <hand_no>),
# Retrieves the item reference that the agent is currently wielding in his right hand (hand_no = 0) or left hand (hand_no = 1). Note that weapons are always wielded in right hand, and shield in left hand. When wielding a two-handed weapon (including bows and crossbows), this operation will return -1 for left hand.
agent_get_ammo = 1727 # (agent_get_ammo, <destination>, <agent_id>, <value>),
# Retrieves the current ammo amount agent has for his wielded item (value = 1) or all his items (value = 0).
agent_get_item_cur_ammo = 1977 # (agent_get_item_cur_ammo, <destination>, <agent_id>, <slot_no>),
# Version 1.153+. Returns remaining ammo for specified agent's item.
agent_refill_ammo = 1728 # (agent_refill_ammo, <agent_id>),
# Refills all ammo and throwing weapon stacks that the agent has in his equipment.
agent_set_wielded_item = 1747 # (agent_set_wielded_item, <agent_id>, <item_id>),
# Forces the agent to wield the specified item. Agent must have that item in his equipment for this to work. Use item_id = -1 to unwield any currently wielded item.
agent_equip_item = 1779 # (agent_equip_item, <agent_id>, <item_id>, [weapon_slot_no]),
# Adds the specified item to agent and forces him to equip it. Optional weapon_slot_no parameter is only used with weapons and will put the newly added item to that slot (range 1..4). If it is omitted with a weapon item, then the agent must have an empty weapon slot for the operation to succeed.
agent_unequip_item = 1774 # (agent_unequip_item, <agent_id>, <item_id>, [weapon_slot_no]),
# Removes the specified item from the agent. Optional parameter weapon_slot_no is in range 1..4 and determines what weapon slot to remove (item_id must still be set correctly).
agent_set_ammo = 1776 # (agent_set_ammo, <agent_id>, <item_id>, <value>),
# Sets current agent ammo amount to the specified value between 0 and maximum ammo. Not clear what item_id means - weapon item or ammo item? 4research.
agent_get_item_slot = 1804 # (agent_get_item_slot, <destination>, <agent_id>, <value>),
# Retrieves item_id for specified agent's slot Possible slot values range in 0..7, order is weapon1, weapon2, weapon3, weapon4, head_armor, body_armor, leg_armor, hand_armor.
agent_get_ammo_for_slot = 1825 # (agent_get_ammo_for_slot, <destination>, <agent_id>, <slot_no>),
# Retrieves the amount of ammo agent has in the referenced slot (range 0..3).
# Agent animations
agent_set_no_dynamics = 1762 # (agent_set_no_dynamics, <agent_id>, <value>),
# Makes the agent stand on the spot (value = 1) or move normally (value = 0). When frozen on the spot the agent can still turn around and fight if necessary. Used in Native for the wedding scene.
agent_get_animation = 1768 # (agent_get_animation, <destination>, <agent_id>, <body_part),
# Retrieves current agent animation for specified body part (0 = lower, 1 = upper).
agent_set_animation = 1740 # (agent_set_animation, <agent_id>, <anim_id>, [channel_no]),
# Forces the agent to perform the specified animation. Optional channel_no parameter determines whether upper body (value = 1) or lower body (value = 0, default) is affected by animation.
agent_set_stand_animation = 1741 # (agent_set_stand_action, <agent_id>, <anim_id>),
# Defines the animation that this agent will use when standing still. Does not force the agent into actually doing this animation.
agent_set_walk_forward_animation = 1742 # (agent_set_walk_forward_action, <agent_id>, <anim_id>),
# Defines the animation that this agent will use when walking forward. Only works for NPC agents.
agent_set_animation_progress = 1743 # (agent_set_animation_progress, <agent_id>, <value_fixed_point>),
# Allows to skip the agent to a certain point in the animation cycle, as specified by the fixed point value (0..fixed_point_multiplier).
agent_ai_set_can_crouch = 2083 # (agent_ai_set_can_crouch, <agent_id>, <value>),
# Version 1.153+. Allows or forbids the agent to crouch. 0 to forbid, 1 to allow.
agent_get_crouch_mode = 2097 # (agent_ai_get_crouch_mode, <destination>, <agent_id>),
# Version 1.153+. Retrieves agent's crouch status (1 = crouching, 0 = standing).
agent_set_crouch_mode = 2098 # (agent_ai_set_crouch_mode, <agent_id>, <value>),
# Version 1.153+. Sets agent's crouch status (1 = crouch, 0 = stand up).
agent_get_attached_scene_prop = 1756 # (agent_get_attached_scene_prop, <destination>, <agent_id>)
# Retrieves the reference to scene prop instance which is attached to the agent, or -1 if there isn't any.
agent_set_attached_scene_prop = 1757 # (agent_set_attached_scene_prop, <agent_id>, <scene_prop_id>)
# Attaches the specified prop instance to the agent. Used in multiplayer CTF missions to attach flags to players.
agent_set_attached_scene_prop_x = 1758 # (agent_set_attached_scene_prop_x, <agent_id>, <value>)
# Offsets the position of the attached scene prop in relation to agent, in centimeters, along the X axis (left/right).
agent_set_attached_scene_prop_y = 1809 # (agent_set_attached_scene_prop_y, <agent_id>, <value>)
# Offsets the position of the attached scene prop in relation to agent, in centimeters, along the Y axis (backwards/forward).
agent_set_attached_scene_prop_z = 1759 # (agent_set_attached_scene_prop_z, <agent_id>, <value>)
# Offsets the position of the attached scene prop in relation to agent, in centimeters, along the Z axis (down/up).
agent_get_bone_position = 2076 # (agent_get_bone_position, <position_no>, <agent_no>, <bone_no>, [<local_or_global>]),
# Version 1.161+. Returns current position for agent's bone (examine skeleton in openBrf to learn bone numbers). Pass 1 as optional <local_or_global> parameter to retrieve global bone coordinates.
# Agent AI and scripted behavior
agent_ai_set_interact_with_player = 2077 # (agent_ai_set_interact_with_player, <agent_no>, <value>),
# Version 1.165+. Enables or disables agent AI interation with player. Dialog? Combat? 4research.
agent_set_is_alarmed = 1807 # (agent_set_is_alarmed, <agent_id>, <value>),
# Sets agent's status as alarmed (value = 1) or peaceful (value = 0).
agent_clear_relations_with_agents = 1802 # (agent_clear_relations_with_agents, <agent_id>),
# Clears any agent-to-agent relations for specified agent.
agent_add_relation_with_agent = 1803 # (agent_add_relation_with_agent, <agent_id>, <agent_id>, <value>),
# Changes relations between two agents on the scene to enemy (value = -1), neutral (value = 0), ally (value = 1). Note that neutral agents are immune to friendly fire.
agent_get_number_of_enemies_following = 1761 # (agent_get_number_of_enemies_following, <destination>, <agent_id>),
# Retrieves the total number of enemies who are currently attacking the specified agents. May be used for AI decision-making.
agent_ai_get_num_cached_enemies = 2670 # (agent_ai_get_num_cached_enemies, <destination>, <agent_no>),
# Version 1.165+. Returns total number of nearby enemies as has been cached by agent AI. Enemies are numbered from nearest to farthest.
agent_ai_get_cached_enemy = 2671 # (agent_ai_get_cached_enemy, <destination>, <agent_no>, <cache_index>),
# Version 1.165+. Return agent reference from AI's list of cached enemies, from nearest to farthest. Returns -1 if the cached enemy is not active anymore.
agent_get_attack_action = 1763 # (agent_get_attack_action, <destination>, <agent_id>),
# Retrieves agent's current attack action. Possible values: free = 0, readying_attack = 1, releasing_attack = 2, completing_attack_after_hit = 3, attack_parried = 4, reloading = 5, after_release = 6, cancelling_attack = 7.
agent_get_defend_action = 1764 # (agent_get_defend_action, <destination>, <agent_id>),
# Retrieves agent's current defend action. Possible values: free = 0, parrying = 1, blocking = 2.
agent_get_action_dir = 1767 # (agent_get_action_dir, <destination>, <agent_id>),
# Retrieves the direction of current agent's action. Possible values: invalid = -1, down = 0, right = 1, left = 2, up = 3.
agent_set_attack_action = 1745 # (agent_set_attack_action, <agent_id>, <direction_value>, <action_value>),
# Forces the agent to perform an attack action. Direction value: -2 = cancel any action (1.153+), 0 = thrust, 1 = slashright, 2 = slashleft, 3 = overswing. Action value: 0 = ready and release, 1 = ready and hold.
agent_set_defend_action = 1746 # (agent_set_defend_action, <agent_id>, <value>, <duration-in-1/1000-seconds>),
# Forces the agent to perform a defend action. Possible values: -2 = cancel any action (1.153+), 0 = defend_down, 1 = defend_right, 2 = defend_left, 3 = defend_up. Does time value determine delay, speed or duration? 4research.
agent_set_scripted_destination = 1730 # (agent_set_scripted_destination, <agent_id>, <position>, [auto_set_z_to_ground_level], [no_rethink]),
# Forces the agent to travel to specified position and stay there until new behavior is set or scripted mode cleared. First optional parameter determines whether the position Z coordinate will be automatically set to ground level (value = 1) or not (value = 0). Second optional parameter added in 1.165 patch, set it to 1 to save resources.
agent_set_scripted_destination_no_attack = 1748 # (agent_set_scripted_destination_no_attack, <agent_id>, <position>, <auto_set_z_to_ground_level>),
# Same as above, but the agent will not attack his enemies.
agent_get_scripted_destination = 1731 # (agent_get_scripted_destination, <position>, <agent_id>),
# Retrieves the position which is defined as agent's scripted destination, if any.
agent_force_rethink = 1732 # (agent_force_rethink, <agent_id>),
# Forces the agent to recalculate his current actions after setting him a new scripted destination or changing other factors affecting his behavior.
agent_clear_scripted_mode = 1735 # (agent_clear_scripted_mode, <agent_id>),
# Clears scripting mode from the agent, making him behave as usual again.
agent_ai_set_always_attack_in_melee = 1737 # (agent_ai_set_always_attack_in_melee, <agent_id>, <value>),
# Forces the agent to continuously attack in melee combat, instead of defending. Used in Native to prevent stalling at the top of the siege ladder. Use value = 0 to clear this mode.
agent_get_simple_behavior = 1738 # (agent_get_simple_behavior, <destination>, <agent_id>),
# Retrieves agent's current simple behavior (see aisb_* constants in header_mission_templates.py for details).
agent_ai_get_behavior_target = 2082 # (agent_ai_get_behavior_target, <destination>, <agent_id>),
# Version 1.153+. UNTESTED. Supposedly returns agent_id which is the target of current agent's behavior.
agent_get_combat_state = 1739 # (agent_get_combat_state, <destination>, <agent_id>),
# Retrieves agent's current combat state:
# 0 = nothing special, this value is also always returned for player and for dead agents.
# 1 = target in sight (for ranged units)
# 2 = guarding (without a shield)
# 3 = preparing a melee attack or firing a ranged weapon
# 4 = releasing a melee attack or reloading a crossbow
# 7 = recovering after being hit in melee OR blocking with a shield. Contradictory information, 4research.
# 8 = target to the right (horse archers) OR no target in sight (ranged units). Contradictory information, 4research.
agent_ai_get_move_target = 2081 # (agent_ai_get_move_target, <destination>, <agent_id>),
# Version 1.153+. UNTESTED. Supposedly returns the enemy agent to whom the agent is currently moving to.
agent_get_look_position = 1709 # (agent_get_look_position, <position>, <agent_id>),
# Retrieves the position that the agent is currently looking at.
agent_set_look_target_position = 1744 # (agent_set_look_target_position, <agent_id>, <position>),
# Forces the agent to look at specified position (turn his head as necessary). Alarmed agents will ignore this.
agent_ai_get_look_target = 2080 # (agent_ai_get_look_target, <destination>, <agent_id>),
# Version 1.153+. UNTESTED. Supposedly returns agent_id that the agent is currently looking at.
agent_set_look_target_agent = 1713 # (agent_set_look_target_agent, <watcher_agent_id>, <observed_agent_id>),
# Forces the agent to look at specified agent (track his movements). Alarmed agents will ignore this.
agent_start_running_away = 1751 # (agent_start_running_away, <agent_id>, [<position_no>]),
# Makes the agent flee the battlefield, ignoring everything else and not attacking. If the agent reaches the edge of map in this mode, he will fade out. Optional position_no parameter added in 1.153 and will make the agent flee to specified position instead (pos0 is not allowed and will be ignored).
agent_stop_running_away = 1752 # (agent_stop_run_away, <agent_id>),
# Cancels fleeing behavior for the agent, turning him back to combat state.
agent_ai_set_aggressiveness = 1753 # (agent_ai_set_aggressiveness, <agent_id>, <value>),
# Sets the aggressiveness parameter for agent AI to use. Default value is 100. Higher values make agent more aggressive. Actual game effects are not obvious, apparently used to speed up mob aggravation when previously neutral.
agent_set_kick_allowed = 1754 # (agent_set_kick_allowed, <agent_id>, <value>),
# Enables (value = 1) or disables (value = 0) kicking for the specified agent. Only makes sense for player-controlled agents as bots don't know how to kick anyway.
set_cheer_at_no_enemy = 2379 # (set_cheer_at_no_enemy, <value>),
# Version 1.153+. Determines whether the agents will cheer when no enemy remain on the map. 0 = do not cheer, 1 = cheer.
agent_add_offer_with_timeout = 1777 # (agent_add_offer_with_timeout, <agent_id>, <offerer_agent_id>, <duration-in-1/1000-seconds>),
# Esoteric stuff. Used in multiplayer duels. Second agent_id is offerer, 0 value for duration is an infinite offer.
agent_check_offer_from_agent = 1778 # (agent_check_offer_from_agent, <agent_id>, <offerer_agent_id>), #second agent_id is offerer
# Esoteric stuff. Used in multiplayer duels. Second agent_id is offerer.
# Team operations
agent_get_group = 1765 # (agent_get_group, <destination>, <agent_id>),
# Retrieves reference to player who is currently the leader of specified bot agent. Only works in multiplayer.
agent_set_group = 1766 # (agent_set_group, <agent_id>, <player_leader_id>),
# Puts the bot agent under command of specified player. Only works in multiplayer.
agent_get_team = 1770 # (agent_get_team, <destination>, <agent_id>),
# Retrieves the team that the agent belongs to.
agent_set_team = 1771 # (agent_set_team, <agent_id>, <value>),
# Puts the agent to specified team number.
agent_get_class = 1772 # (agent_get_class , <destination>, <agent_id>),
# Retrieves the agent class (see grc_* constants in header_mission_templates.py for reference). Note this operation returns the troop class that the game divines from troop equipment and flags, ignoring any custom troop class settings.
agent_get_division = 1773 # (agent_get_division , <destination>, <agent_id>),
# Retrieves the agent division (custom troop class number in 0..8 range).
agent_set_division = 1783 # (agent_set_division, <agent_id>, <value>),
# Puts the agent into the specified division. This does not affect agent's troop class. Note that there's a bug in Warband: if an order is issued to agent's original division, the agent will immediately switch back to it's original division number. Therefore, if you want to manipulate agent divisions dynamically during the battle, you need to implement some workarounds for this bug.
team_get_hold_fire_order = 1784 # (team_get_hold_fire_order, <destination>, <team_no>, <division>),
# Retrieves current status of hold fire order for specified team/division (see aordr_* constants in header_mission_templates.py for reference).
team_get_movement_order = 1785 # (team_get_movement_order, <destination>, <team_no>, <division>),
# Retrieves current movement orders for specified team/division (see mordr_* constants in header_mission_templates.py for reference).
team_get_riding_order = 1786 # (team_get_riding_order, <destination>, <team_no>, <division>),
# Retrieves current status of riding order for specified team/division (see rordr_* constants in header_mission_templates.py for reference).
team_get_weapon_usage_order = 1787 # (team_get_weapon_usage_order, <destination>, <team_no>, <division>),
# Retrieves current status of weapon usage order for specified team/division (see wordr_* constants in header_mission_templates.py for reference).
team_give_order = 1790 # (team_give_order, <team_no>, <division>, <order_id>),
# Issues an order to specified team/division.
team_set_order_position = 1791 # (team_set_order_position, <team_no>, <division>, <position>),
# Defines the position for specified team/division when currently issued order requires one.
team_get_leader = 1792 # (team_get_leader, <destination>, <team_no>),
# Retrieves the reference to the agent who is the leader of specified team.
team_set_leader = 1793 # (team_set_leader, <team_no>, <new_leader_agent_id>),
# Sets the agent as the new leader of specified team.
team_get_order_position = 1794 # (team_get_order_position, <position>, <team_no>, <division>),
# Retrieves position which is used for specified team/division current orders.
team_set_order_listener = 1795 # (team_set_order_listener, <team_no>, <division>, [add_to_listeners]),
# Set the specified division as the one which will be following orders issued by the player (assuming the player is on the same team). If optional parameter add_to_listeners is greater than 0, then the operation will instead *add* specified division to order listeners. If division number is -1, then list of order listeners is cleared. If division number is 9, then all divisions will listen to player's orders.
team_set_relation = 1796 # (team_set_relation, <team_no>, <team_no_2>, <value>),
# Sets relations between two teams. Possible values: enemy (-1), neutral (0) and friendly (1).
store_remaining_team_no = 2360 # (store_remaining_team_no, <destination>),
# Retrieves the number of the last remaining team. Currently not used in Native, possibly deprecated.
team_get_gap_distance = 1828 # (team_get_gap_distance, <destination>, <team_no>, <sub_class>),
# Version 1.153+. UNTESTED. Supposedly returns average gap between troops of a specified team/class (depends on how many Stand Closer/Spread Out orders were given).
# Combat statistics
store_enemy_count = 2380 # (store_enemy_count, <destination>),
# No longer used in Native. Apparently stores total number of active enemy agents. Possibly deprecated. 4research.
store_friend_count = 2381 # (store_friend_count, <destination>),
# No longer used in Native. Apparently stores total number of active friendly agents. Possibly deprecated. 4research.
store_ally_count = 2382 # (store_ally_count, <destination>),
# No longer used in Native. Apparently stores total number of active allied agents (how is it different from friends?). Possibly deprecated. 4research.
store_defender_count = 2383 # (store_defender_count, <destination>),
# No longer used in Native. Apparently stores total number of active agents on defender's side. Possibly deprecated. 4research.
store_attacker_count = 2384 # (store_attacker_count, <destination>),
# No longer used in Native. Apparently stores total number of active agents on attacker's side. Possibly deprecated. 4research.
store_normalized_team_count = 2385 # (store_normalized_team_count, <destination>, <team_no>),
# Stores the number of agents belonging to specified team, normalized according to battle_size and advantage. Commonly used to calculate advantage and possibly reinforcement wave sizes.
################################################################################
# [ Z23 ] PRESENTATIONS
################################################################################
# Presentations are a complex subject, because of their flexibility. Each
# presentation is nothing more but a number of screen control elements, called
# overlays. There are many types of overlays, each coming with it's own
# behavior and looks. For as long as the presentation is running, you can
# monitor the status of those overlays and change their looks, contents and
# position on the screen.
# Presentation is nothing but a set of triggers. There are only five triggers
# that the presentation can have, but skillful control of them allows you to
# do nearly everything you can think of.
# ti_on_presentation_load fires only once when the presentation is started.
# This is the place where you will usually create all overlays that your
# presentation needs, initialize their looks and contents and put them to
# their positions on the screen.
# ti_on_presentation_event_state_change is probably the most important and
# easy one. It fires every time some overlay in your presentation changes
# state. For each type of overlay this means something. For a button overlay,
# this means that the user has clicked the button. In this case, you will want
# to run the code responsible for that button effects. So you can put a "Win"
# button on your presentation, and when it's clicked, you can run the code
# which will give all castles and towns in the game to you. :-)
# ti_on_presentation_mouse_press trigger fires every time user clicks a mouse
# button on one of presentation overlays, even if the overlay did not change
# it's state as the result.
# ti_on_presentation_mouse_enter_leave trigger fires when the mouse pointer
# moves over one of presentation's overlays, or moves out of it. This might
# be useful if you want your presentation to react to user's mouse movements,
# not only clicks.
# ti_on_presentation_run trigger will fire every frame (in other words, with
# the frequency of your game FPS). You can put some code in this trigger if
# you want your presentation to constantly do something even if the user is
# passive.
# Note that while a running presentation will usually pause your game until
# you stop it, it is also possible to write presentations which will not stop
# the game, but will run as the time goes. To see an example, go into any
# battle in Warband and press Backspace. You will see the interface which
# displays the mini-map of the battle, positions of all troops, and elements
# that you can use to issue orders to your companions (if you have any). All
# this is a presentation as well, called "prsnt_battle". And if you have
# played multiplayer, then you might be interested to know that all menus,
# including equipment selection for your character, are presentations as well.
# Conditional operations
is_presentation_active = 903 # (is_presentation_active, <presentation_id),
# Checks that the specified presentation is currently running.
# General presentation operations
start_presentation = 900 # (start_presentation, <presentation_id>),
# Starts the specified presentation.
start_background_presentation = 901 # (start_background_presentation, <presentation_id>),
# Apparently allows you to start a presentation in background but stay in the menu. 4research.
presentation_set_duration = 902 # (presentation_set_duration, <duration-in-1/100-seconds>),
# Sets presentation duration time, in 1/100th of second. Must be called when a presentation is active. If several presentations are active, duration will be set for all of them.
# Creating overlays
create_text_overlay = 910 # (create_text_overlay, <destination>, <string_id>),
# Creates a text label overlay and returns it's overlay_id.
create_mesh_overlay = 911 # (create_mesh_overlay, <destination>, <mesh_id>),
# Creates a mesh overlay and returns it's overlay_id.
create_mesh_overlay_with_item_id = 944 # (create_mesh_overlay_with_item_id, <destination>, <item_id>),
# Creates a mesh overlay, using the specified item mesh. Returns overlay_id.
create_mesh_overlay_with_tableau_material = 939 # (create_mesh_overlay_with_tableau_material, <destination>, <mesh_id>, <tableau_material_id>, <value>),
# Creates a mesh overlay, using the specified tableau_material. When mesh_id = -1, it is generated automatically. Value is passed as the parameter for tableau_material script. Returns overlay_id.
create_button_overlay = 912 # (create_button_overlay, <destination>, <string_id>),
# Creates a generic button overlay and returns it's overlay_id. The only difference between this and subsequent two operations is that they use different button meshes.
create_game_button_overlay = 940 # (create_game_button_overlay, <destination>, <string_id>),
# Creates a game button overlay and returns it's overlay_id.
create_in_game_button_overlay = 941 # (create_in_game_button_overlay, <destination>, <string_id>),
# Creates an in-game button overlay and returns it's overlay_id.
create_image_button_overlay = 913 # (create_image_button_overlay, <destination>, <mesh_id>, <mesh_id>),
# Creates an image button, using two meshes for normal (1st mesh) and pressed (2nd mesh) status. Button does not have a textual label. Returns button overlay_id.
create_image_button_overlay_with_tableau_material = 938 # (create_image_button_overlay_with_tableau_material, <destination>, <mesh_id>, <tableau_material_id>, <value>),
# Creates an image button from the specified mesh, using tableau_material as the image. When mesh = -1, it is generated automatically. Value is passed as the parameter to the tableau_material script. Returns overlay_id.
create_slider_overlay = 914 # (create_slider_overlay, <destination>, <min_value>, <max_value>),
# Creates horizontal slider overlay, with positions of the slider varying between min and max values. Current value of the slider can be changed with (overlay_set_val). Returns slider's overlay_id.
create_progress_overlay = 915 # (create_progress_overlay, <destination>, <min_value>, <max_value>),
# Creates progress bar overlay, with positions of the bar varying between min and max values. Current value of the progress bar can be changed with (overlay_set_val). Returns bar's overlay_id.
create_number_box_overlay = 942 # (create_number_box_overlay, <destination>, <min_value>, <max_value>),
# Creates a number box overlay (a small field for numeric value and small increase/decrease buttons to the right) with specified min and max values. Returns number box overlay_id.
create_text_box_overlay = 917 # (create_text_box_overlay, <destination>),
# Apparently deprecated. No longer used in Native.
create_simple_text_box_overlay = 919 # (create_simple_text_box_overlay, <destination>),
# Creates a text field overlay, where user can enter any text. Returns text field's overlay_id. Text contents of the field can be retrieved from s0 trigger in ti_on_presentation_event_state_change event for the text field.
create_check_box_overlay = 918 # (create_check_box_overlay, <destination>, <checkbox_off_mesh>, <checkbox_on_mesh>),
# Creates a checkbox overlay. Returns checkbox overlay_id.
create_listbox_overlay = 943 # (create_list_box_overlay, <destination>, <string>, <value>),
# Creates a listbox overlay. Individual items can be added with (overlay_add_item) and index of currently selected item can be set with (overlay_set_val). Returns listbox overlay_id. Importance of later two parameters unclear (default text&value?). 4research.
create_combo_label_overlay = 948 # (create_combo_label_overlay, <destination>),
# Creates a combo label overlay. Looks like plain text label. Individual items can be added with (overlay_add_item) and currently selected item can be set with (overlay_set_val). Returns combo block's overlay_id.
create_combo_button_overlay = 916 # (create_combo_button_overlay, <destination>),
# Creates a combo button overlay. For example see "Screen Resolution" dropdown in Settings menu. Individual items can be added with (overlay_add_item) and currently selected item can be set with (overlay_set_val). Returns combo block's overlay_id.
overlay_add_item = 931 # (overlay_add_item, <overlay_id>, <string_id>),
# Adds an item to the listbox or combobox. Items are indexed from 0. Note the order in which items appear in the dropdown is reverse to the order in which they're added.
# Overlays hierarchy manipulation
set_container_overlay = 945 # (set_container_overlay, <overlay_id>),
# Defines the specified overlay as the container. All subsequently created overlays will be placed inside the container, and their coordinates will be based on container's position. All containers with their contents will be displayed *above* any non-container overlays. Use -1 to stop placing overlays to current container and resume normal behavior.
overlay_set_container_overlay = 951 # (overlay_set_container_overlay, <overlay_id>, <container_overlay_id>),
# Allows you to put one overlay into a container, or remove it from container (if container_overlay_id = -1) without setting current overlay. May be unreliable.
# Overlay manipulation
overlay_get_position = 946 # (overlay_get_position, <position>, <overlay_id>)
# Retrieves overlay current position to specified position trigger, using position's X and Y coordinates. Note that the screen size in Warband is (1.00,0.75), further modified by fixed point multiplier.
overlay_set_val = 927 # (overlay_set_val, <overlay_id>, <value>),
# Sets the value of the overlays which have numeric values.
overlay_set_text = 920 # (overlay_set_text, <overlay_id>, <string_id>),
# Changes the overlay text (if it has any). Works for labels, text fields, buttons with text labels...
overlay_set_boundaries = 928 # (overlay_set_boundaries, <overlay_id>, <min_value>, <max_value>),
# Changes the value boundaries for the overlays that have them.
overlay_set_position = 926 # (overlay_set_position, <overlay_id>, <position>),
# Sets the overlay position on the screen, using position's X and Y coordinates. Note that the screen size in Warband is (1.00,0.75), further modified by fixed point multiplier.
overlay_set_size = 925 # (overlay_set_size, <overlay_id>, <position>),
# Sets the overlay size, using position's X and Y coordinates. Note that the screen size in Warband is (1.00,0.75), further modified by fixed point multiplier. Also see (overlay_set_area_size).
overlay_set_area_size = 929 # (overlay_set_area_size, <overlay_id>, <position>),
# Defines the actual area on the screen used to display the overlay. If it's size is greater than area size, it will create a scrollable area with appropriate scrollbars. Can be used to create scrollable areas for large text, or scrollable containers with many children elements (see Host Game screen for a typical example).
overlay_set_additional_render_height = 952 # (overlay_set_additional_render_height, <overlay_id>, <height_adder>),
# Version 1.153+. Effects uncertain. 4research.
overlay_animate_to_position = 937 # (overlay_animate_to_position, <overlay_id>, <duration-in-1/1000-seconds>, <position>),
# Moves overlay to specified position during a specified timeframe, specified in 1/1000th of second.
overlay_animate_to_size = 936 # (overlay_animate_to_size, <overlay_id>, <duration-in-1/1000-seconds>, <position>),
# Changes overlay size to specified value during a specified timeframe, specified in 1/1000th of second.
overlay_set_mesh_rotation = 930 # (overlay_set_mesh_rotation, <overlay_id>, <position>),
# Despite the name, works with any overlay, allowing you to put it on the screen in rotated position. To determine the angles, position's rotation values are used (not coordinates!). Usually you will want to only use rotation around Z axis (which results in clockwise or anti-clockwise rotation as seen by user). Note that rotating overlays which are placed inside a container may cause strange results, so some trial and error will be necessary in such situation.
overlay_set_material = 956 # (overlay_set_material, <overlay_id>, <string_no>),
# Version 1.161+. Replaces the material used for rendering specified overlay.
overlay_set_color = 921 # (overlay_set_color, <overlay_id>, <color>),
# Changes the overlay color (hexadecimal value 0xRRGGBB). May not work with some overlay types.
overlay_set_alpha = 922 # (overlay_set_alpha, <overlay_id>, <alpha>),
# Changes the overlay alpha (hexadecimal value in 0x00..0xFF range). May not work with some overlay types.
overlay_set_hilight_color = 923 # (overlay_set_hilight_color, <overlay_id>, <color>),
# Highlights the overlay with specified color. May not work with some overlay types.
overlay_set_hilight_alpha = 924 # (overlay_set_hilight_alpha, <overlay_id>, <alpha>),
# Highlights the overlay with specified alpha. May not work with some overlay types.
overlay_animate_to_color = 932 # (overlay_animate_to_color, <overlay_id>, <duration-in-1/1000-seconds>, <color>)
# Changes overlay's color during a specified timeframe, specified in 1/000th of second.
overlay_animate_to_alpha = 933 # (overlay_animate_to_alpha, <overlay_id>, <duration-in-1/1000-seconds>, <color>),
# Changes overlay's alpha during a specified timeframe, specified in 1/000th of second.
overlay_animate_to_highlight_color = 934 # (overlay_animate_to_highlight_color, <overlay_id>, <duration-in-1/1000-seconds>, <color>),
# Highlights overlay to specified color during a specified timeframe, specified in 1/000th of second.
overlay_animate_to_highlight_alpha = 935 # (overlay_animate_to_highlight_alpha, <overlay_id>, <duration-in-1/1000-seconds>, <color>),
# Highlights overlay to specified alpha during a specified timeframe, specified in 1/000th of second.
overlay_set_display = 947 # (overlay_set_display, <overlay_id>, <value>),
# Shows (value = 1) or hides (value = 0) the specified overlay.
overlay_obtain_focus = 949 # (overlay_obtain_focus, <overlay_id>),
# Makes the specified overlay obtain input focus. Only works for text fields.
overlay_set_tooltip = 950 # (overlay_set_tooltip, <overlay_id>, <string_id>),
# Defines a text which will be displayed as a tooltip when mouse pointer will hover over the specified overlay. Unreliable, always test how it works.
# Popups and some esoteric stuff
show_item_details = 970 # (show_item_details, <item_id>, <position>, <price_multiplier_percentile>),
# Shows a popup box at the specified position, containing standard game information for the specified item. Last parameter determines price percentile multiplier. Multiplier value of 100 will display item standard price, value of 0 will display "Default Item" instead of price (used in multiplayer equipment selection presentation).
show_item_details_with_modifier = 972 # (show_item_details_with_modifier, <item_id>, <item_modifier>, <position>, <price_multiplier_percentile>),
# Same as above, but displays stats and price information for an item with a modifier.
close_item_details = 971 # (close_item_details)
# Closes the item details popup box.
show_troop_details = 2388 # (show_troop_details, <troop_id>, <position>, <troop_price>)
# Version 1.153+. Supposedly displays a popup with troop information at specified place. 4research.
################################################################################
# [ Z24 ] MULTIPLAYER AND NETWORKING (LEFT FOR SOMEONE MORE FAMILIAR WITH THIS)
################################################################################
# This section is eagerly waiting for someone to write documentation comments.
# Conditional operations
player_is_active = 401 # (player_is_active, <player_id>),
# Checks that the specified player is active (i.e. connected to server).
multiplayer_is_server = 417 # (multiplayer_is_server),
# Checks that the code is running on multiplayer server. Operation will fail on client machines or in singleplayer mode.
multiplayer_is_dedicated_server = 418 # (multiplayer_is_dedicated_server),
# Checks that the code is running on dedicated multiplayer server machine.
game_in_multiplayer_mode = 419 # (game_in_multiplayer_mode),
# Checks that the game is running in multiplayer mode.
player_is_admin = 430 # (player_is_admin, <player_id>),
# Checks that the specified player has administrative rights.
player_is_busy_with_menus = 438 # (player_is_busy_with_menus, <player_id>),
# Undocumented. Educated guess is it's true when player is running a presentation without prsntf_read_only flag.
player_item_slot_is_picked_up = 461 # (player_item_slot_is_picked_up, <player_id>, <item_slot_no>),
# Checks that the specified player's equipment slot contains an item that the player has picked up from ground.
# Player slot operations
player_set_slot = 508 # (player_set_slot, <player_id>, <slot_no>, <value>),
player_get_slot = 528 # (player_get_slot, <destination>, <player_id>, <slot_no>),
player_slot_eq = 548 # (player_slot_eq, <player_id>, <slot_no>, <value>),
player_slot_ge = 568 # (player_slot_ge, <player_id>, <slot_no>, <value>),
# Network communication operations
send_message_to_url = 380 # (send_message_to_url, <string_id>, <encode_url>),
# Sends an HTTP request. Response from that URL will be returned to "script_game_receive_url_response". Parameter <encode_url> is optional and effects are unclear. Supposedly it's equivalent of calling (str_encode_url) on the first parameter which doesn't make sense for me.
multiplayer_send_message_to_server = 388 # (multiplayer_send_message_to_server, <message_type>),
# Multiplayer client operation. Send a simple message (only message code, no data) to game server.
multiplayer_send_int_to_server = 389 # (multiplayer_send_int_to_server, <message_type>, <value>),
# Multiplayer client operation. Send a message with a single extra integer value to game server.
multiplayer_send_2_int_to_server = 390 # (multiplayer_send_2_int_to_server, <message_type>, <value>, <value>),
# Same as (multiplayer_send_int_to_server), but two integer values are sent.
multiplayer_send_3_int_to_server = 391 # (multiplayer_send_3_int_to_server, <message_type>, <value>, <value>, <value>),
# Same as (multiplayer_send_int_to_server), but three integer values are sent.
multiplayer_send_4_int_to_server = 392 # (multiplayer_send_4_int_to_server, <message_type>, <value>, <value>, <value>, <value>),
# Same as (multiplayer_send_int_to_server), but four integer values are sent.
multiplayer_send_string_to_server = 393 # (multiplayer_send_string_to_server, <message_type>, <string_id>),
# Multiplayer client operation. Send a message with a string value to game server.
multiplayer_send_message_to_player = 394 # (multiplayer_send_message_to_player, <player_id>, <message_type>),
# Multiplayer server operation. Send a simple message (only message code, no data) to one of connected players.
multiplayer_send_int_to_player = 395 # (multiplayer_send_int_to_player, <player_id>, <message_type>, <value>),
# Multiplayer server operation. Send a message with a single extra integer value to one of connected players.
multiplayer_send_2_int_to_player = 396 # (multiplayer_send_2_int_to_player, <player_id>, <message_type>, <value>, <value>),
# Same as (multiplayer_send_int_to_player), but two integer values are sent.
multiplayer_send_3_int_to_player = 397 # (multiplayer_send_3_int_to_player, <player_id>, <message_type>, <value>, <value>, <value>),
# Same as (multiplayer_send_int_to_player), but three integer values are sent.
multiplayer_send_4_int_to_player = 398 # (multiplayer_send_4_int_to_player, <player_id>, <message_type>, <value>, <value>, <value>, <value>),
# Same as (multiplayer_send_int_to_player), but four integer values are sent.
multiplayer_send_string_to_player = 399 # (multiplayer_send_string_to_player, <player_id>, <message_type>, <string_id>),
# Multiplayer server operation. Send a message with a string value to one of connected players.
# Player handling operations
get_max_players = 400 # (get_max_players, <destination>),
# Returns maximum possible number of connected players. Apparently always returns a constant value, however it's return value can change as maximum increases with new patches.
player_get_team_no = 402 # (player_get_team_no, <destination>, <player_id>),
# Retrieves player's selected team.
player_set_team_no = 403 # (player_get_team_no, <player_id>, <team_id>),
# Assigns a player to the specified team.
player_get_troop_id = 404 # (player_get_troop_id, <destination>, <player_id>),
# Retrieves player's selected troop reference.
player_set_troop_id = 405 # (player_get_troop_id, <player_id>, <troop_id>),
# Assigns the selected troop reference to a player.
player_get_agent_id = 406 # (player_get_agent_id, <destination>, <player_id>),
# Retrieves player's current agent reference. Returns a negative value if player has no agent.
agent_get_player_id = 1724 # (agent_get_player_id, <destination>, <agent_id>),
# Retrieves player reference that is currently controlling the specified agent.
player_get_gold = 407 # (player_get_gold, <destination>, <player_id>),
# Retrieves player's current gold amount.
player_set_gold = 408 # (player_set_gold, <player_id>, <value>, <max_value>),
# Sets player's new gold amount and maximum allowed gold amount. Use 0 for <max_value> to remove gold limit.
player_spawn_new_agent = 409 # (player_spawn_new_agent, <player_id>, <entry_point>),
# Spawns a new agent for the specified player. Essentially a combination of (spawn_agent) and (player_control_agent) operations.
player_add_spawn_item = 410 # (player_add_spawn_item, <player_id>, <item_slot_no>, <item_id>),
#
multiplayer_get_my_team = 411 # (multiplayer_get_my_team, <destination>),
# Client operation. Retrieves player's currently selected team.
multiplayer_get_my_troop = 412 # (multiplayer_get_my_troop, <destination>),
# Client operation. Retrieves player's currently selected troop.
multiplayer_set_my_troop = 413 # (multiplayer_get_my_troop, <destination>),
# Client operation. Selects a new troop for the player.
multiplayer_get_my_gold = 414 # (multiplayer_get_my_gold, <destination>),
# Client operation. Retrieves current player's gold amount.
multiplayer_get_my_player = 415 # (multiplayer_get_my_player, <destination>),
# Client operation. Retrieves current player's player_id reference.
multiplayer_make_everyone_enemy = 420 # (multiplayer_make_everyone_enemy),
# Used in deathmatch mode to make everyone hostile to all other agents.
player_control_agent = 421 # (player_control_agent, <player_id>, <agent_id>),
# Server operation. Puts the agent under specified player's control. Operation will change agent's face code and banner to those of player.
player_get_item_id = 422 # (player_get_item_id, <destination>, <player_id>, <item_slot_no>),
# Server operation. Retrieves item that's currently equipped by specified player in <item_slot_no> equipment slot.
player_get_banner_id = 423 # (player_get_banner_id, <destination>, <player_id>),
# Server operation. Retrieves banner_id reference used by the specified player. Note that in MP banners are enumerated starting from 0 (unlike single-player where they're enumeration depends on scene prop banners' reference range).
player_set_is_admin = 429 # (player_set_is_admin, <player_id>, <value>),
# Server operation. Set the current player as admin (value = 1) or not (value = 0).
player_get_score = 431 # (player_get_score, <destination>, <player_id>),
#
player_set_score = 432 # (player_set_score, <player_id>, <value>),
#
player_get_kill_count = 433 # (player_get_kill_count, <destination>, <player_id>),
#
player_set_kill_count = 434 # (player_set_kill_count, <player_id>, <value>),
#
player_get_death_count = 435 # (player_get_death_count, <destination>, <player_id>),
#
player_set_death_count = 436 # (player_set_death_count, <player_id>, <value>),
#
player_get_ping = 437 # (player_get_ping, <destination>, <player_id>),
#
player_get_is_muted = 439 # (player_get_is_muted, <destination>, <player_id>),
#
player_set_is_muted = 440 # (player_set_is_muted, <player_id>, <value>, [mute_for_everyone]), #mute_for_everyone optional parameter should be set to 1 if player is muted for everyone (this works only on server).
#
player_get_unique_id = 441 # (player_get_unique_id, <destination>, <player_id>), #can only bew used on server side
# Server operation. Retrieves player's unique identifier which is determined by player's game license code. This number is supposed to be unique for each license, allowing reliable player identification across servers.
player_get_gender = 442 # (player_get_gender, <destination>, <player_id>),
#
player_save_picked_up_items_for_next_spawn = 459 # (player_save_picked_up_items_for_next_spawn, <player_id>),
#
player_get_value_of_original_items = 460 # (player_get_value_of_original_items, <player_id>),
# Undocumented. Official docs: this operation returns values of the items, but default troop items will be counted as zero (except horse)
profile_get_banner_id = 350 # (profile_get_banner_id, <destination>),
# Client operation. Retrieves banner_id reference used by the game for multiplayer. Note that in MP banners are enumerated starting from 0 (unlike single-player where they're enumeration depends on scene prop banners' reference range).
profile_set_banner_id = 351 # (profile_set_banner_id, <value>),
# Client operation. Assigns a new banner_id to be used for multiplayer. Note that in MP banners are enumerated starting from 0 (unlike single-player where they're enumeration depends on scene prop banners' reference range).
# Team handling operations
team_get_bot_kill_count = 450 # (team_get_bot_kill_count, <destination>, <team_id>),
#
team_set_bot_kill_count = 451 # (team_get_bot_kill_count, <destination>, <team_id>),
#
team_get_bot_death_count = 452 # (team_get_bot_death_count, <destination>, <team_id>),
#
team_set_bot_death_count = 453 # (team_get_bot_death_count, <destination>, <team_id>),
#
team_get_kill_count = 454 # (team_get_kill_count, <destination>, <team_id>),
#
team_get_score = 455 # (team_get_score, <destination>, <team_id>),
#
team_set_score = 456 # (team_set_score, <team_id>, <value>),
#
team_set_faction = 457 # (team_set_faction, <team_id>, <faction_id>),
#
team_get_faction = 458 # (team_get_faction, <destination>, <team_id>),
#
# General scene and mission handling operations
multiplayer_clear_scene = 416 # (multiplayer_clear_scene),
#
multiplayer_find_spawn_point = 425 # (multiplayer_find_spawn_point, <destination>, <team_no>, <examine_all_spawn_points>, <is_horseman>),
#
set_spawn_effector_scene_prop_kind = 426 # (set_spawn_effector_scene_prop_kind, <team_no>, <scene_prop_kind_no>),
# Specifies some scene prop kind as one of the teams' spawn effector, making players of that team more likely to spawn closer to the specified effector prop instances. Use -1 to disable spawn effector for a team.
set_spawn_effector_scene_prop_id = 427 # (set_spawn_effector_scene_prop_id, <team_no>, <scene_prop_id>),
# Specifies a single prop instance as a team's spawn effector. Different from (set_spawn_effector_scene_prop_kind) as other instances of the same scene prop will not affect player spawning.
start_multiplayer_mission = 470 # (start_multiplayer_mission, <mission_template_id>, <scene_id>, <started_manually>),
#
# Administrative operations and settings
kick_player = 465 # (kick_player, <player_id>),
#
ban_player = 466 # (ban_player, <player_id>, <value>, <player_id>),
# Official docs: set value = 1 for banning temporarily, assign 2nd player id as the administrator player id if banning is permanent
save_ban_info_of_player = 467 # (save_ban_info_of_player, <player_id>),
#
ban_player_using_saved_ban_info = 468 # (ban_player_using_saved_ban_info),
#
server_add_message_to_log = 473 # (server_add_message_to_log, <string_id>),
#
server_get_renaming_server_allowed = 475 # (server_get_renaming_server_allowed, <destination>),
# Official docs: 0-1
server_get_changing_game_type_allowed = 476 # (server_get_changing_game_type_allowed, <destination>),
# Official docs: 0-1
server_get_combat_speed = 478 # (server_get_combat_speed, <destination>),
# Official docs: 0-2
server_set_combat_speed = 479 # (server_set_combat_speed, <value>),
# Official docs: 0-2
server_get_friendly_fire = 480 # (server_get_friendly_fire, <destination>),
#
server_set_friendly_fire = 481 # (server_set_friendly_fire, <value>),
# Official docs: 0 = off, 1 = on
server_get_control_block_dir = 482 # (server_get_control_block_dir, <destination>),
#
server_set_control_block_dir = 483 # (server_set_control_block_dir, <value>),
# Official docs: 0 = automatic, 1 = by mouse movement
server_set_password = 484 # (server_set_password, <string_id>),
#
server_get_add_to_game_servers_list = 485 # (server_get_add_to_game_servers_list, <destination>),
#
server_set_add_to_game_servers_list = 486 # (server_set_add_to_game_servers_list, <value>),
#
server_get_ghost_mode = 487 # (server_get_ghost_mode, <destination>),
#
server_set_ghost_mode = 488 # (server_set_ghost_mode, <value>),
#
server_set_name = 489 # (server_set_name, <string_id>),
#
server_get_max_num_players = 490 # (server_get_max_num_players, <destination>),
#
server_set_max_num_players = 491 # (server_set_max_num_players, <value>),
#
server_set_welcome_message = 492 # (server_set_welcome_message, <string_id>),
#
server_get_melee_friendly_fire = 493 # (server_get_melee_friendly_fire, <destination>),
#
server_set_melee_friendly_fire = 494 # (server_set_melee_friendly_fire, <value>),
# Official docs: 0 = off, 1 = on
server_get_friendly_fire_damage_self_ratio = 495 # (server_get_friendly_fire_damage_self_ratio, <destination>),
#
server_set_friendly_fire_damage_self_ratio = 496 # (server_set_friendly_fire_damage_self_ratio, <value>),
# Official docs: 0-100
server_get_friendly_fire_damage_friend_ratio = 497 # (server_get_friendly_fire_damage_friend_ratio, <destination>),
#
server_set_friendly_fire_damage_friend_ratio = 498 # (server_set_friendly_fire_damage_friend_ratio, <value>),
# Official docs: 0-100
server_get_anti_cheat = 499 # (server_get_anti_cheat, <destination>),
#
server_set_anti_cheat = 477 # (server_set_anti_cheat, <value>),
# Official docs: 0 = off, 1 = on
################################################################################
# [ Z25 ] REMAINING ESOTERIC STUFF (NO IDEA WHAT IT DOES)
################################################################################
# Honestly, I have no idea what these functions could be used for. If you
# know, please let me know ASAP! :-)
set_tooltip_text = 1130 # (set_tooltip_text, <string_id>),
ai_mesh_face_group_show_hide = 1805 # (ai_mesh_face_group_show_hide, <group_no>, <value>), # 1 for enable, 0 for disable
auto_set_meta_mission_at_end_commited = 1305 # (auto_set_meta_mission_at_end_commited), Not documented. Not used in Native. Was (simulate_battle, <value>) before.
################################################################################
# [ Z26 ] HARDCODED COMPILER-RELATED CODE
################################################################################
# Do not touch this stuff unless necessary. Module System compiler needs this
# code to correctly compile your module into format that Warband understands.
lhs_operations = [
try_for_range, try_for_range_backwards, try_for_parties, try_for_agents, store_script_param_1, store_script_param_2, store_script_param, store_repeat_object,
get_global_cloud_amount, get_global_haze_amount, options_get_damage_to_player, options_get_damage_to_friends, options_get_combat_ai, options_get_campaign_ai, options_get_combat_speed,
profile_get_banner_id, get_achievement_stat, get_max_players, player_get_team_no, player_get_troop_id, player_get_agent_id, player_get_gold, multiplayer_get_my_team,
multiplayer_get_my_troop, multiplayer_get_my_gold, multiplayer_get_my_player, player_get_score, player_get_kill_count, player_get_death_count, player_get_ping, player_get_is_muted,
player_get_unique_id, player_get_gender, player_get_item_id, player_get_banner_id, game_get_reduce_campaign_ai, multiplayer_find_spawn_point, team_get_bot_kill_count,
team_get_bot_death_count, team_get_kill_count, team_get_score, team_get_faction, player_get_value_of_original_items, server_get_renaming_server_allowed,
server_get_changing_game_type_allowed, server_get_friendly_fire, server_get_control_block_dir, server_get_combat_speed, server_get_add_to_game_servers_list, server_get_ghost_mode,
server_get_max_num_players, server_get_melee_friendly_fire, server_get_friendly_fire_damage_self_ratio, server_get_friendly_fire_damage_friend_ratio, server_get_anti_cheat, troop_get_slot,
party_get_slot, faction_get_slot, scene_get_slot, party_template_get_slot, agent_get_slot, quest_get_slot, item_get_slot, player_get_slot, team_get_slot, scene_prop_get_slot,
store_last_sound_channel, get_angle_between_positions, get_distance_between_positions, get_distance_between_positions_in_meters, get_sq_distance_between_positions,
get_sq_distance_between_positions_in_meters, get_sq_distance_between_position_heights, position_get_x, position_get_y, position_get_z, position_get_scale_x,
position_get_scale_y, position_get_scale_z, position_get_rotation_around_z, position_normalize_origin, position_get_rotation_around_x, position_get_rotation_around_y,
position_get_distance_to_terrain, position_get_distance_to_ground_level, create_text_overlay, create_mesh_overlay, create_button_overlay, create_image_button_overlay, create_slider_overlay,
create_progress_overlay, create_combo_button_overlay, create_text_box_overlay, create_check_box_overlay, create_simple_text_box_overlay, create_image_button_overlay_with_tableau_material,
create_mesh_overlay_with_tableau_material, create_game_button_overlay, create_in_game_button_overlay, create_number_box_overlay, create_listbox_overlay, create_mesh_overlay_with_item_id,
overlay_get_position, create_combo_label_overlay, get_average_game_difficulty, get_level_boundary, faction_get_color, troop_get_type, troop_get_xp, troop_get_class,
troop_inventory_slot_get_item_amount, troop_inventory_slot_get_item_max_amount, troop_get_inventory_capacity, troop_get_inventory_slot, troop_get_inventory_slot_modifier,
troop_get_upgrade_troop, item_get_type, party_get_num_companions, party_get_num_prisoners, party_get_current_terrain, party_get_template_id, party_count_members_of_type,
party_count_companions_of_type, party_count_prisoners_of_type, party_get_free_companions_capacity, party_get_free_prisoners_capacity, party_get_helpfulness, party_get_ai_initiative,
party_get_num_companion_stacks, party_get_num_prisoner_stacks, party_stack_get_troop_id, party_stack_get_size, party_stack_get_num_wounded, party_stack_get_troop_dna,
party_prisoner_stack_get_troop_id, party_prisoner_stack_get_size, party_prisoner_stack_get_troop_dna, party_get_cur_town, party_get_morale, party_get_battle_opponent, party_get_icon,
party_get_skill_level, get_battle_advantage, party_get_attached_to, party_get_num_attached_parties, party_get_attached_party_with_rank, get_player_agent_no, get_player_agent_kill_count,
get_player_agent_own_troop_kill_count, agent_get_horse, agent_get_rider, agent_get_party_id, agent_get_entry_no, agent_get_troop_id, agent_get_item_id, store_agent_hit_points,
agent_get_kill_count, agent_get_player_id, agent_get_wielded_item, agent_get_ammo, agent_get_simple_behavior, agent_get_combat_state, agent_get_attached_scene_prop,
agent_get_time_elapsed_since_removed, agent_get_number_of_enemies_following, agent_get_attack_action, agent_get_defend_action, agent_get_group, agent_get_action_dir, agent_get_animation,
agent_get_team, agent_get_class, agent_get_division, team_get_hold_fire_order, team_get_movement_order, team_get_riding_order, team_get_weapon_usage_order, team_get_leader,
agent_get_item_slot, scene_prop_get_num_instances, scene_prop_get_instance, scene_prop_get_visibility, scene_prop_get_hit_points, scene_prop_get_max_hit_points, scene_prop_get_team,
agent_get_ammo_for_slot, agent_deliver_damage_to_agent_advanced, team_get_gap_distance, scene_item_get_num_instances, scene_item_get_instance, scene_spawned_item_get_num_instances,
scene_spawned_item_get_instance, prop_instance_get_variation_id, prop_instance_get_variation_id_2, prop_instance_get_position, prop_instance_get_starting_position, prop_instance_get_scale,
prop_instance_get_scene_prop_kind, prop_instance_is_animating, prop_instance_get_animation_target_position, agent_get_item_cur_ammo, mission_get_time_speed, mission_cam_get_aperture,
store_trigger_param, store_trigger_param_1, store_trigger_param_2, store_trigger_param_3, agent_ai_get_look_target, agent_ai_get_move_target, agent_ai_get_behavior_target,
agent_get_crouch_mode, store_or, store_and, store_mod, store_add, store_sub, store_mul, store_div, store_sqrt, store_pow, store_sin, store_cos, store_tan, assign, store_random,
store_random_in_range, store_asin, store_acos, store_atan, store_atan2, store_troop_gold, store_num_free_stacks, store_num_free_prisoner_stacks, store_party_size,
store_party_size_wo_prisoners, store_troop_kind_count, store_num_regular_prisoners, store_troop_count_companions, store_troop_count_prisoners, store_item_kind_count,
store_free_inventory_capacity, store_skill_level, store_character_level, store_attribute_level, store_troop_faction, store_troop_health, store_proficiency_level, store_relation,
store_conversation_agent, store_conversation_troop, store_partner_faction, store_encountered_party, store_encountered_party2, store_faction_of_party, store_current_scene, store_zoom_amount,
store_item_value, store_troop_value, store_partner_quest, store_random_quest_in_range, store_random_troop_to_raise, store_random_troop_to_capture, store_random_party_in_range,
store_random_horse, store_random_equipment, store_random_armor, store_quest_number, store_quest_item, store_quest_troop, store_current_hours, store_time_of_day, store_current_day,
store_distance_to_party_from_party, get_party_ai_behavior, get_party_ai_object, get_party_ai_current_behavior, get_party_ai_current_object, store_num_parties_created,
store_num_parties_destroyed, store_num_parties_destroyed_by_player, store_num_parties_of_template, store_random_party_of_template, store_remaining_team_no, store_mission_timer_a_msec,
store_mission_timer_b_msec, store_mission_timer_c_msec, store_mission_timer_a, store_mission_timer_b, store_mission_timer_c, store_enemy_count, store_friend_count, store_ally_count,
store_defender_count, store_attacker_count, store_normalized_team_count, item_get_weight, item_get_value, item_get_difficulty, item_get_head_armor, item_get_body_armor, item_get_leg_armor,
item_get_hit_points, item_get_weapon_length, item_get_speed_rating, item_get_missile_speed, item_get_max_ammo, item_get_accuracy, item_get_shield_height, item_get_horse_scale,
item_get_horse_speed, item_get_horse_maneuver, item_get_food_quality, item_get_abundance, item_get_thrust_damage, item_get_thrust_damage_type, item_get_swing_damage,
item_get_swing_damage_type, item_get_horse_charge_damage, try_for_prop_instances, options_get_battle_size, party_get_ignore_with_player_party, cast_ray,
prop_instance_get_current_deform_progress, prop_instance_get_current_deform_frame, face_keys_get_hair, face_keys_get_beard, face_keys_get_face_texture, face_keys_get_hair_texture,
face_keys_get_hair_color, face_keys_get_age, face_keys_get_skin_color, face_keys_get_morph_key, try_for_players, get_operation_set_version, get_startup_sun_light, get_startup_ambient_light,
get_startup_ground_ambient_light, agent_ai_get_num_cached_enemies, agent_ai_get_cached_enemy,
]
global_lhs_operations = [
val_lshift, val_rshift, val_add, val_sub, val_mul, val_div, val_max, val_min, val_mod,
]
can_fail_operations = [
ge, eq, gt, is_between, entering_town, map_free, encountered_party_is_attacker, conversation_screen_is_active, in_meta_mission, troop_is_hero, troop_is_wounded,
key_is_down, key_clicked, game_key_is_down, game_key_clicked, hero_can_join, hero_can_join_as_prisoner, party_can_join, party_can_join_as_prisoner, troops_can_join,
troops_can_join_as_prisoner, party_can_join_party, main_party_has_troop, party_is_in_town, party_is_in_any_town, party_is_active, player_has_item, troop_has_item_equipped, troop_is_mounted,
troop_is_guarantee_ranged, troop_is_guarantee_horse, player_is_active, multiplayer_is_server, multiplayer_is_dedicated_server, game_in_multiplayer_mode, player_is_admin,
player_is_busy_with_menus, player_item_slot_is_picked_up, check_quest_active, check_quest_finished, check_quest_succeeded, check_quest_failed, check_quest_concluded, is_trial_version,
is_edit_mode_enabled, troop_slot_eq, party_slot_eq, faction_slot_eq, scene_slot_eq, party_template_slot_eq, agent_slot_eq, quest_slot_eq, item_slot_eq, player_slot_eq, team_slot_eq,
scene_prop_slot_eq, troop_slot_ge, party_slot_ge, faction_slot_ge, scene_slot_ge, party_template_slot_ge, agent_slot_ge, quest_slot_ge, item_slot_ge, player_slot_ge, team_slot_ge,
scene_prop_slot_ge, position_has_line_of_sight_to_position, position_is_behind_position, is_presentation_active, all_enemies_defeated, race_completed_by_player, num_active_teams_le,
main_hero_fallen, lt, neq, le, teams_are_enemies, agent_is_alive, agent_is_wounded, agent_is_human, agent_is_ally, agent_is_non_player, agent_is_defender, agent_is_active, agent_is_routed,
agent_is_in_special_mode, agent_is_in_parried_animation, class_is_listening_order, agent_check_offer_from_agent, entry_point_is_auto_generated, scene_prop_has_agent_on_it, agent_is_alarmed,
agent_is_in_line_of_sight, scene_prop_get_instance, scene_item_get_instance, scene_allows_mounted_units, prop_instance_is_valid, prop_instance_intersects_with_prop_instance,
agent_has_item_equipped, map_get_land_position_around_position, map_get_water_position_around_position, is_zoom_disabled, is_currently_night, store_random_party_of_template, str_is_empty,
item_has_property, item_has_capability, item_has_modifier, item_has_faction, cast_ray,
]
depth_operations = [
try_begin, try_for_range, try_for_range_backwards, try_for_parties, try_for_agents, try_for_prop_instances, try_for_players,
]
| 94.912042 | 526 | 0.658537 |
182dd446982804ddccc9e2f96414958a00c9a917 | 7,210 | py | Python | ginga/misc/Settings.py | godber/ginga | acb32ed422aa604681c63c5a9494ffb0ad96cf2e | [
"BSD-3-Clause"
] | null | null | null | ginga/misc/Settings.py | godber/ginga | acb32ed422aa604681c63c5a9494ffb0ad96cf2e | [
"BSD-3-Clause"
] | null | null | null | ginga/misc/Settings.py | godber/ginga | acb32ed422aa604681c63c5a9494ffb0ad96cf2e | [
"BSD-3-Clause"
] | null | null | null | #
# Settings.py -- Simple class to manage stateful user preferences.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os
import pprint
import numpy
from . import Callback
from . import Bunch
unset_value = ("^^UNSET^^")
class SettingError(Exception):
pass
class Setting(Callback.Callbacks):
def __init__(self, value=unset_value, name=None, logger=None,
check_fn=None):
Callback.Callbacks.__init__(self)
self.value = value
self._unset = (value == unset_value)
self.name = name
self.logger = logger
if check_fn == None:
check_fn = self._check_none
self.check_fn = check_fn
# For callbacks
for name in ('set', ):
self.enable_callback(name)
def _check_none(self, value):
return value
def set(self, value, callback=True):
self.value = self.check_fn(value)
if callback:
self.make_callback('set', value)
def get(self, *args):
if self._unset:
if len(args) == 0:
raise KeyError("setting['%s'] value is not set!" % (
self.name))
else:
assert len(args) == 1, \
SettingError("Illegal parameter use to get(): %s" % (
str(args)))
return args[0]
return self.value
def __repr__(self):
return repr(self.value)
def __str__(self):
return str(self.value)
class SettingGroup(object):
def __init__(self, name=None, logger=None, preffile=None):
self.name = name
self.logger = logger
self.preffile = preffile
self.group = Bunch.Bunch()
def addSettings(self, **kwdargs):
for key, value in kwdargs.items():
self.group[key] = Setting(value=value, name=key,
logger=self.logger)
# TODO: add group change callback?
def getSetting(self, key):
return self.group[key]
def shareSettings(self, other, keylist=None):
if keylist == None:
keylist = self.group.keys()
for key in keylist:
other.group[key] = self.group[key]
def copySettings(self, other, keylist=None):
if keylist == None:
keylist = self.group.keys()
d = {}
for key in keylist:
d[key] = self.get(key)
other.setDict(d)
def setdefault(self, key, value):
if key in self.group:
return self.group[key].get(value)
else:
d = { key: value }
self.addSettings(**d)
return self.group[key].get(value)
def addDefaults(self, **kwdargs):
for key, value in kwdargs.items():
self.setdefault(key, value)
def setDefaults(self, **kwdargs):
return self.addDefaults(**kwdargs)
def get(self, *args):
key = args[0]
if len(args) == 1:
return self.group[key].get()
if len(args) == 2:
return self.setdefault(key, args[1])
def getDict(self):
return dict([[name, self.group[name].value] for name in self.group.keys()])
def setDict(self, d, callback=True):
for key, value in d.items():
if key not in self.group:
self.setdefault(key, value)
else:
self.group[key].set(value, callback=callback)
def set(self, callback=True, **kwdargs):
self.setDict(kwdargs, callback=callback)
def __getitem__(self, key):
return self.group[key].value
def __setitem__(self, key, value):
self.group[key].set(value)
def has_key(self, key):
return key in self.group
def load(self, onError='raise'):
try:
d = {}
with open(self.preffile, 'r') as in_f:
buf = in_f.read()
for line in buf.split('\n'):
line = line.strip()
# skip comments and anything that doesn't look like an
# assignment
if line.startswith('#') or (not ('=' in line)):
continue
else:
try:
i = line.index('=')
key = line[:i].strip()
val = eval(line[i+1:].strip())
d[key] = val
except Exception as e:
# silently skip parse errors, for now
continue
self.setDict(d)
except Exception as e:
errmsg = "Error opening settings file (%s): %s" % (
self.preffile, str(e))
if onError == 'silent':
pass
elif onError == 'warn':
self.logger.warn(errmsg)
else:
raise SettingError(errmsg)
def _check(self, d):
if isinstance(d, dict):
for key, value in d.items():
d[key] = self._check(value)
return d
try:
if numpy.isnan(d):
return 0.0
elif numpy.isinf(d):
return 0.0
except Exception:
pass
return d
def save(self):
d = self.getDict()
# sanitize data -- hard to parse NaN or Inf
self._check(d)
try:
# sort keys for easy reading/editing
keys = list(d.keys())
keys.sort()
with open(self.preffile, 'w') as out_f:
for key in keys:
out_f.write("%s = %s\n" % (key, repr(d[key])))
except Exception as e:
errmsg = "Error opening settings file (%s): %s" % (
self.preffile, str(e))
self.logger.error(errmsg)
class Preferences(object):
def __init__(self, basefolder="/tmp", logger=None):
self.folder = basefolder
self.logger = logger
self.settings = Bunch.Bunch(caseless=True)
def setDefaults(self, category, **kwdargs):
self.settings[category].addDefaults(**kwdargs)
def getSettings(self, category):
return self.settings[category]
def getDict(self, category):
return self.settings[category].getDict()
def createCategory(self, category):
if category not in self.settings:
suffix = '.cfg'
path = os.path.join(self.folder, category + suffix)
self.settings[category] = SettingGroup(logger=self.logger,
name=category,
preffile=path)
return self.settings[category]
def get_baseFolder(self):
return self.folder
def getDict(self):
return dict([[name, self.settings[name].getDict()] for name in
self.settings.keys()])
#END
| 29.793388 | 83 | 0.512067 |
e3bbdb52db29269bd698afb55e2389c5536152d4 | 2,958 | py | Python | utils/roasts.py | pratheek78/Ezebot-Open-Source | 7888bee4d9867c32832bbcb051afb1bae0ca16d4 | [
"Apache-2.0"
] | 2 | 2021-12-15T15:59:46.000Z | 2021-12-18T16:15:16.000Z | utils/roasts.py | pratheek78/Ezebot-Open-Source | 7888bee4d9867c32832bbcb051afb1bae0ca16d4 | [
"Apache-2.0"
] | 1 | 2022-01-01T04:23:19.000Z | 2022-01-29T03:21:03.000Z | utils/roasts.py | pratheek78/Ezebot-Open-Source | 7888bee4d9867c32832bbcb051afb1bae0ca16d4 | [
"Apache-2.0"
] | 1 | 2022-02-21T15:10:56.000Z | 2022-02-21T15:10:56.000Z | roastsList = ["If your brain was dynamite, there wouldnt be enough to blow your hat off",
"You're so annoying, you make your Happy Meal cry",
"Light travels faster than sound, which is why you seemed bright until you spoke",
"I'm not insulting you, dude, im just describing you",
"You are like a cloud. When you disappear, it’s a beautiful day",
"Where’s your off button?",
"You’re so real. A real ass.",
"I’d smack you, but that would be animal abuse.",
"I keep thinking you can’t get any dumber and you keep proving me wrong.",
"I’d explain it to you but I left my English-to-Dumbass Dictionary at home.",
"If you’re going to be a smart ass, first you have to be smart, otherwise you’re just an ass.",
"o, no. I am listening. It just takes me a moment to process so much stupid information all at once.",
"You're as important as the 'ueue' in queue",
"mirrors can't talk. Lucky for you, they can't laugh either",
"Hmmm. You seem like you were born on the highway. Because that's were most accidents happen",
"If laughter is the best medicine, Your face must be curing the world wouldnt it?",
"Is your ass jealous of the amount of shit that just came out of your mouth?",
"When you were born the doctor through you out the windows, but the window through you back.",
"I dont think even bob the builder can fix your face",
"You're the reason god created the middle finger",
"dont feel disappointed in yourself its your parents job to do it"
"You're cute. Like my dog. He also always chases his tail for entertainment."
"I guess if you actually ever spoke your mind, you’d really be speechless."
"The last time I saw something like you… I flushed."
"I thought of you today. It reminded me to take out the trash."
"wait, i just took a shit how are yu still here? i sent you down with the flush"
"Stupidity isn’t a crime, so you’re free to go"
"Everyone’s entitled to act stupid once in a while, but you really abuse the privilege."
"If ignorance is bliss, you must be the happiest person on the planet."
"I was hoping for a battle of wits but you appear to be unarmed."
"I believed in human evolution until I met you."
"I envy people who have never met you and i wish i was one of them"
"when you fart people consider it a honk inside your boxers"
"You sound reasonable… I don't think my meds are working , wait a sec"
"When karma comes back to smack you in the face, I want to be there in case it needs help"]
| 77.842105 | 118 | 0.606491 |
02d3d6399e84c6adc255bdcf76e8a71456a894fe | 9,176 | py | Python | src/rez/utils/scope.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | null | null | null | src/rez/utils/scope.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | null | null | null | src/rez/utils/scope.py | maxnbk/rez | 762c5cfce17eabde67eb5582498406eb3544daf0 | [
"Apache-2.0"
] | 1 | 2020-09-24T08:33:43.000Z | 2020-09-24T08:33:43.000Z | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from rez.utils.formatting import StringFormatMixin, StringFormatType
from rez.vendor.six.six.moves import UserDict
import sys
class RecursiveAttribute(UserDict, StringFormatMixin):
"""An object that can have new attributes added recursively::
>>> a = RecursiveAttribute()
>>> a.foo.bah = 5
>>> a.foo['eek'] = 'hey'
>>> a.fee = 1
>>> print(a.to_dict())
{'foo': {'bah': 5, 'eek': 'hey'}, 'fee': 1}
A recursive attribute can also be created from a dict, and made read-only::
>>> d = {'fee': {'fi': {'fo': 'fum'}}, 'ho': 'hum'}
>>> a = RecursiveAttribute(d, read_only=True)
>>> print(str(a))
{'fee': {'fi': {'fo': 'fum'}}, 'ho': 'hum'}
>>> print(a.ho)
hum
>>> a.new = True
AttributeError: 'RecursiveAttribute' object has no attribute 'new'
"""
format_expand = StringFormatType.unchanged
def __init__(self, data=None, read_only=False):
self.__dict__.update(dict(data={}, read_only=read_only))
self._update(data or {})
def __getattr__(self, attr):
def _noattrib():
raise AttributeError("'%s' object has no attribute '%s'"
% (self.__class__.__name__, attr))
d = self.__dict__
if attr.startswith('__') and attr.endswith('__'):
try:
return d[attr]
except KeyError:
_noattrib()
if attr in d["data"]:
return d["data"][attr]
if d["read_only"]:
_noattrib()
# the new attrib isn't actually added to this instance until it's set
# to something. This stops code like "print(instance.notexist)" from
# adding empty attributes
attr_ = self._create_child_attribute(attr)
assert(isinstance(attr_, RecursiveAttribute))
attr_.__dict__["pending"] = (attr, self)
return attr_
def __setattr__(self, attr, value):
d = self.__dict__
if d["read_only"]:
if attr in d["data"]:
raise AttributeError("'%s' object attribute '%s' is read-only"
% (self.__class__.__name__, attr))
else:
raise AttributeError("'%s' object has no attribute '%s'"
% (self.__class__.__name__, attr))
elif attr.startswith('__') and attr.endswith('__'):
d[attr] = value
else:
d["data"][attr] = value
self._reparent()
def __getitem__(self, attr):
return getattr(self, attr)
def __str__(self):
return str(self.to_dict())
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.to_dict())
def _create_child_attribute(self, attr):
"""Override this method to create new child attributes.
Returns:
`RecursiveAttribute` instance.
"""
return self.__class__()
def to_dict(self):
"""Get an equivalent dict representation."""
d = {}
for k, v in self.__dict__["data"].items():
if isinstance(v, RecursiveAttribute):
d[k] = v.to_dict()
else:
d[k] = v
return d
def copy(self):
return self.__class__(self.__dict__['data'].copy())
def update(self, data):
"""Dict-like update operation."""
if self.__dict__["read_only"]:
raise AttributeError("read-only, cannot be updated")
self._update(data)
def _update(self, data):
for k, v in data.items():
if isinstance(v, dict):
v = RecursiveAttribute(v)
self.__dict__["data"][k] = v
def _reparent(self):
d = self.__dict__
if "pending" in d:
attr_, parent = d["pending"]
parent._reparent()
parent.__dict__["data"][attr_] = self
del d["pending"]
class _Scope(RecursiveAttribute):
def __init__(self, name=None, context=None):
RecursiveAttribute.__init__(self)
self.__dict__.update(dict(name=name,
context=context,
locals=None))
def __enter__(self):
locals_ = sys._getframe(1).f_locals
self.__dict__["locals"] = locals_.copy()
return self
def __exit__(self, *args):
# find what's changed
updates = {}
d = self.__dict__
locals_ = sys._getframe(1).f_locals
self_locals = d["locals"]
for k, v in locals_.items():
if not (k.startswith("__") and k.endswith("__")) \
and (k not in self_locals or v != self_locals[k]) \
and not isinstance(v, _Scope):
updates[k] = v
# merge updated local vars with attributes
self.update(updates)
# restore upper scope
locals_.clear()
locals_.update(self_locals)
self_context = d["context"]
if self_context:
self_context._scope_exit(d["name"])
def _create_child_attribute(self, attr):
return RecursiveAttribute()
class ScopeContext(object):
"""A context manager for creating nested dictionaries::
>>> scope = ScopeContext()
>>>
>>> with scope("animal"):
>>> count = 2
>>> with scope("cat"):
>>> friendly = False
>>> with scope("dog") as d:
>>> friendly = True
>>> d.num_legs = 4
>>> d.breed.sub_breed = 'yorkshire terrier'
>>> with scope("animal"):
>>> count = 3
>>> with scope("cat"):
>>> num_legs = 4
>>> with scope("ostrich"):
>>> friendly = False
>>> num_legs = 2
The dictionaries can then be retrieved::
>>> print(pprint.pformat(scope.to_dict()))
{'animal': {'count': 3,
'cat': {'friendly': False,
'num_legs': 4},
'dog': {'breed': {'sub_breed': 'yorkshire terrier'},
'friendly': True,
'num_legs': 4},
'ostrich': {'friendly': False,
'num_legs': 2}}}
Note that scopes and recursive attributes can be referenced multiple times,
and the assigned properties will be merged. If the same property is set
multiple times, it will be overwritten.
"""
def __init__(self):
self.scopes = {}
self.scope_stack = [_Scope()]
def __call__(self, name):
path = tuple([x.name for x in self.scope_stack[1:]] + [name])
if path in self.scopes:
scope = self.scopes[path]
else:
scope = _Scope(name, self)
self.scopes[path] = scope
self.scope_stack.append(scope)
return scope
def _scope_exit(self, name):
scope = self.scope_stack.pop()
assert(self.scope_stack)
assert(name == scope.name)
data = {scope.name: scope.to_dict()}
self.scope_stack[-1].update(data)
def to_dict(self):
"""Get an equivalent dict representation."""
return self.scope_stack[-1].to_dict()
def __str__(self):
names = ('.'.join(y for y in x) for x in self.scopes.keys())
return "%r" % (tuple(names),)
def scoped_formatter(**objects):
"""Format a string with respect to a set of objects' attributes.
Use this rather than `scoped_format` when you need to reuse the formatter.
"""
return RecursiveAttribute(objects, read_only=True)
def scoped_format(txt, **objects):
"""Format a string with respect to a set of objects' attributes.
Example:
>>> Class Foo(object):
>>> def __init__(self):
>>> self.name = "Dave"
>>> print(scoped_format("hello {foo.name}", foo=Foo()))
hello Dave
Args:
objects (dict): Dict of objects to format with. If a value is a dict,
its values, and any further neted dicts, will also format with dot
notation.
pretty (bool): See `ObjectStringFormatter`.
expand (bool): See `ObjectStringFormatter`.
"""
pretty = objects.pop("pretty", RecursiveAttribute.format_pretty)
expand = objects.pop("expand", RecursiveAttribute.format_expand)
formatter = scoped_formatter(**objects)
return formatter.format(txt, pretty=pretty, expand=expand)
| 33.126354 | 79 | 0.559721 |
3f21f007554e17dbc8b4dc86327dd0aa1e171400 | 2,007 | py | Python | stubs/micropython-v1_17-esp8266/usocket.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_17-esp8266/usocket.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_17-esp8266/usocket.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
Module: 'usocket' on micropython-v1.17-esp8266
"""
# MCU: {'ver': 'v1.17', 'port': 'esp8266', 'arch': 'xtensa', 'sysname': 'esp8266', 'release': '1.17', 'name': 'micropython', 'mpy': 9733, 'version': '1.17', 'machine': 'ESP module with ESP8266', 'build': '', 'nodename': 'esp8266', 'platform': 'esp8266', 'family': 'micropython'}
# Stubber: 1.5.4
from typing import Any
AF_INET = 2 # type: int
AF_INET6 = 10 # type: int
IPPROTO_IP = 0 # type: int
IP_ADD_MEMBERSHIP = 1024 # type: int
SOCK_DGRAM = 2 # type: int
SOCK_RAW = 3 # type: int
SOCK_STREAM = 1 # type: int
SOL_SOCKET = 1 # type: int
SO_REUSEADDR = 4 # type: int
def callback(*args, **kwargs) -> Any:
...
def getaddrinfo(*args, **kwargs) -> Any:
...
def print_pcbs(*args, **kwargs) -> Any:
...
def reset(*args, **kwargs) -> Any:
...
class socket:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def close(self, *args, **kwargs) -> Any:
...
def read(self, *args, **kwargs) -> Any:
...
def readinto(self, *args, **kwargs) -> Any:
...
def readline(self, *args, **kwargs) -> Any:
...
def send(self, *args, **kwargs) -> Any:
...
def write(self, *args, **kwargs) -> Any:
...
def accept(self, *args, **kwargs) -> Any:
...
def bind(self, *args, **kwargs) -> Any:
...
def connect(self, *args, **kwargs) -> Any:
...
def listen(self, *args, **kwargs) -> Any:
...
def makefile(self, *args, **kwargs) -> Any:
...
def recv(self, *args, **kwargs) -> Any:
...
def recvfrom(self, *args, **kwargs) -> Any:
...
def sendall(self, *args, **kwargs) -> Any:
...
def sendto(self, *args, **kwargs) -> Any:
...
def setblocking(self, *args, **kwargs) -> Any:
...
def setsockopt(self, *args, **kwargs) -> Any:
...
def settimeout(self, *args, **kwargs) -> Any:
...
| 21.126316 | 278 | 0.508221 |
a0a120a0bdaad4c795fe1050abb356d050fb9102 | 787 | py | Python | setup.py | youben11/simplesearch | f9d006f64caa441f21305427a71b29c6a9f10f2a | [
"Apache-2.0"
] | null | null | null | setup.py | youben11/simplesearch | f9d006f64caa441f21305427a71b29c6a9f10f2a | [
"Apache-2.0"
] | null | null | null | setup.py | youben11/simplesearch | f9d006f64caa441f21305427a71b29c6a9f10f2a | [
"Apache-2.0"
] | null | null | null | import setuptools
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
requirements = read("requirements.txt").split()
setuptools.setup(
name="simplesearch",
version="0.1a",
author="Ayoub Benaissa",
author_email="ayouben9@gmail.com",
description="A library for doing search on different kind of files",
license="Apache License 2.0",
keywords="search nlp index",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/youben11/simplesearch",
packages=setuptools.find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| 27.137931 | 72 | 0.684879 |
ff940bf50a1502929acc64f7de9db4f4457187d9 | 1,291 | py | Python | hackathon_ht3/code/geo_sort-key_bnb-python-test.py | encodingintuition/MachineLearningWorkbook | fb8e985475ad930c30092a0b1a27f4b8be3c7979 | [
"MIT"
] | null | null | null | hackathon_ht3/code/geo_sort-key_bnb-python-test.py | encodingintuition/MachineLearningWorkbook | fb8e985475ad930c30092a0b1a27f4b8be3c7979 | [
"MIT"
] | null | null | null | hackathon_ht3/code/geo_sort-key_bnb-python-test.py | encodingintuition/MachineLearningWorkbook | fb8e985475ad930c30092a0b1a27f4b8be3c7979 | [
"MIT"
] | null | null | null | # basic py libs
import numpy as np
import pandas as pd
import json
# GIS libs
import geopandas as gpd
from shapely.geometry import Point, Polygon
# Sample JSON the BnBKey from the Json list of Gems
# INGESTION
longitude = -73.9233
latitude = 43.7717
bnb_geo_address = [ longitude,latitude ]
# Data for GEMS hard coded
data = {'gem_id': [1001, 1002,1003,1004,1005,1006],
'latitude': [43.771663, 43.779007, 43.747417, 43.747417, 44.771755, 43.779534],
'longitude': [-73.930204, -73.95124, -73.851914, -73.851914, -73.924258, -73.950862]
}
df = pd.DataFrame(data = data)
#GIS manipulaton
# convert BnB GIS # to GIS point
geo_point = Point( bnb_geo_address )
# convert GEMS DataFrame to Geopands
gdf = gpd.GeoDataFrame(df, geometry = gpd.points_from_xy(df.longitude, df.latitude))
# Increase radius of gis point to incude more area
geo_pointB = geo_point.buffer(.1)
# loop of Masking
c = 0
close_gems = []
# loop through rows in dataframe
for i, row in gdf.iterrows():
# Check if GEM is in radius
if geo_pointB.contains(row['geometry']):
print('yes')
# create list of geo points
close_gems.append(row['gem_id'])
else:
print('no')
# Join all gemIDs to a string csv
json_gems = json.dumps(close_gems)
print(json_gems) | 28.065217 | 88 | 0.693261 |
3d71640624c8dca8e15f3148682b5a19097ee5ec | 23,306 | py | Python | train_model_Resnet50_keras_tuner.py | jamestd-td/dataset_shift | 5b1c66c29bffced3267ed94706d9a5706753fcf6 | [
"MIT"
] | null | null | null | train_model_Resnet50_keras_tuner.py | jamestd-td/dataset_shift | 5b1c66c29bffced3267ed94706d9a5706753fcf6 | [
"MIT"
] | null | null | null | train_model_Resnet50_keras_tuner.py | jamestd-td/dataset_shift | 5b1c66c29bffced3267ed94706d9a5706753fcf6 | [
"MIT"
] | null | null | null | import tensorflow as tf
import keras_tuner as kt
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.metrics import auc
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
# =============================================================================
# Tensorflow setup for using distributed / parallel computing
# =============================================================================
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), 'Physical GPUs,', len(logical_gpus), 'Logical GPUs')
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
strategy=tf.distribute.MirroredStrategy()
print('Number of GPU devices: {}'.format(strategy.num_replicas_in_sync))
# =============================================================================
# Settings for batch size and image size and project name
# =============================================================================
BATCH_SIZE_PER_REPLICA = 16
batch_size = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
img_height = 224
img_width = 224
project_name='train_val_in' # replace "in" when training other datasets
# =============================================================================
# Resnet50 pre processing pipeline
# =============================================================================
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function = tf.keras.applications.resnet50.preprocess_input)
# =============================================================================
# directory setup and load files for training, intramural test and extramural test
# use appropriate dataset for training and intra and extramural test
# for example train on IN dataset and test on SH, MC & NIAID
# use only those directory and comment all other directory (make inactive)
# =============================================================================
source_dir = '/dataset'
kt_dir = '/dataset/kt_check_points'
# =============================================================================
# directory setup for IN dataset
# =============================================================================
ds_in_dir = os.path.join(source_dir, 'ds_in')
in_train_dir = os.path.join(ds_in_dir,'in_train')
in_val_dir = os.path.join(ds_in_dir,'in_val')
in_test_dir = os.path.join(ds_in_dir,'in_test')
# =============================================================================
# directory setup for SH dataset
# =============================================================================
ds_sh_dir = os.path.join(source_dir, 'ds_sh')
sh_train_dir = os.path.join(ds_sh_dir,'sh_train')
sh_val_dir = os.path.join(ds_sh_dir,'sh_val')
sh_test_dir = os.path.join(ds_sh_dir,'sh_test')
# =============================================================================
# directory setup for MC dataset
# =============================================================================
ds_mc_dir = os.path.join(source_dir, 'ds_mc')
mc_train_dir = os.path.join(ds_mc_dir,'mc_train')
mc_val_dir = os.path.join(ds_mc_dir,'mc_val')
mc_test_dir = os.path.join(ds_mc_dir,'mc_test')
# =============================================================================
# directory setup for NIAID dataset
# =============================================================================
ds_niaid_dir = os.path.join(source_dir, 'ds_niaid')
niaid_train_dir = os.path.join(ds_niaid_dir,'niaid_train')
niaid_val_dir = os.path.join(ds_niaid_dir,'niaid_val')
niaid_test_dir = os.path.join(ds_niaid_dir,'niaid_test')
# =============================================================================
# directory setup for extramural test dataset
# =============================================================================
ds_em_dir = os.path.join(source_dir, 'extramural_test')
ds_em_in = os.path.join(ds_em_dir,'ds_in')
ds_em_sh = os.path.join(ds_em_dir,'ds_sh')
ds_em_mc = os.path.join(ds_em_dir,'ds_mc')
ds_em_niaid = os.path.join(ds_em_dir,'ds_niaid')
# =============================================================================
# flow_from_directory Method
# This method is useful when the images are sorted and placed in there respective
# class/label folders. This method will identify classes automatically from the folder name.
# =============================================================================
train_ds=datagen.flow_from_directory(
# This is the train directory, replace this when training for other dataset
in_train_dir,
# All images will be resized to 224x224
target_size = (img_height, img_width),
color_mode = 'rgb',
classes = ['normal', 'tb'],
batch_size = batch_size,
# Since we use categorical_crossentropy loss, we need binary labels
class_mode = 'categorical',
shuffle = True,
seed = 42,
interpolation = 'bicubic'
)
val_ds=datagen.flow_from_directory(
# This is the validation directory, replace this when training for other dataset
in_val_dir,
# All images will be resized to 224x224
target_size = (img_height, img_width),
color_mode = 'rgb',
classes = ['normal', 'tb'],
batch_size = batch_size,
# Since we use categorical_crossentropy loss, we need binary labels
class_mode = 'categorical',
shuffle = True,
seed = 42,
interpolation = 'bicubic'
)
test_ds_ho=datagen.flow_from_directory(
# This is the holdout test directory, replace this when training for other dataset
in_test_dir,
# All images will be resized to 224x224
target_size = (img_height, img_width),
color_mode = 'rgb',
classes = ['normal', 'tb'],
batch_size = batch_size,
# Since we use categorical_crossentropy loss, we need binary labels
class_mode = 'categorical',
shuffle = True,
seed = 42,
interpolation = 'bicubic'
)
test_ds_ex1=datagen.flow_from_directory(
# This is the extramural test set directory, replace this when testing for other dataset
ds_em_sh,
# All images will be resized to 224x224
target_size = (img_height, img_width),
color_mode = 'rgb',
classes = ['normal', 'tb'],
batch_size = batch_size,
# Since we use categorical_crossentropy loss, we need binary labels
class_mode = 'categorical',
shuffle = True,
seed = 42,
interpolation = 'bicubic'
)
test_ds_ex2=datagen.flow_from_directory(
# This is the extramural test set directory, replace this when testing for other dataset
ds_em_mc,
# All images will be resized to 224x224
target_size = (img_height, img_width),
color_mode = 'rgb',
classes = ['normal', 'tb'],
batch_size = batch_size,
# Since we use categorical_crossentropy loss, we need binary labels
class_mode = 'categorical',
shuffle = True,
seed = 42,
interpolation = 'bicubic'
)
test_ds_ex3=datagen.flow_from_directory(
# This is the extramural test set directory, replace this when testing for other dataset
ds_em_niaid,
# All images will be resized to 224x224
target_size = (img_height, img_width),
color_mode = 'rgb',
classes = ['normal', 'tb'],
batch_size = batch_size,
# Since we use categorical_crossentropy loss, we need binary labels
class_mode = 'categorical',
shuffle = True,
seed = 42,
interpolation = 'bicubic'
)
# =============================================================================
# Build model for hyperparameter tuning
# Model take argument hp from the tuner
# =============================================================================
def build_model(hp):
base_model = tf.keras.applications.ResNet50(weights = 'imagenet', include_top = False)
base_model.trainable = False
for layer in base_model.layers:
if 'BatchNormalization' in layer.__class__.__name__:
layer.trainable = True
inputs = base_model.input
x = tf.reduce_mean(base_model.output, axis = [1,2])
x = tf.keras.layers.Dense(hp.Int('hidden_size', 32, 512, step = 32, default = 128), activation = 'relu')(x)
x = tf.keras.layers.Dropout(hp.Float('dropout', 0.2, 0.8, step = 0.05, default = 0.4))(x)
outputs = tf.keras.layers.Dense(2, activation = 'softmax')(x)
model = tf.keras.Model(inputs, outputs)
optimizer = tf.keras.optimizers.Adam(hp.Float('learning_rate', 1e-6, 1e-2, sampling='log',default=1e-5))
criterion = tf.keras.losses.CategoricalCrossentropy()
model.compile(optimizer = optimizer,loss = criterion, metrics=['accuracy'])
return model
# =============================================================================
# Initializing Keras tuner under Multi GPU
# =============================================================================
with strategy.scope():
tuner = kt.Hyperband(
hypermodel = build_model,
objective = 'val_accuracy',
max_epochs = 10,
factor = 3,
hyperband_iterations = 5,
seed = 42,
directory = kt_dir,
project_name = project_name)
# =============================================================================
# Train models for hyperparameters
# =============================================================================
tuner.search(
train_ds,
validation_data = val_ds,
epochs = 10,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor = 'val_loss',patience = 1)],)
# =============================================================================
# Selection of best model
# =============================================================================
best_model = tuner.get_best_models(1)[0]
best_hyperparameters = tuner.get_best_hyperparameters(1)[0]
print(best_hyperparameters.values)
best_model.summary()
tuner.results_summary()
# =============================================================================
# evaluate model on holdout test set
# =============================================================================
evalu = best_model.evaluate(test_ds_ho)
print('test loss, test acc:', evalu)
# =============================================================================
# Test model on hold out holdout test set
# =============================================================================
y_pred = best_model.predict(test_ds_ho)
y_pred_proba = y_pred[:,1] # for ROC curve
y_true = test_ds_ho.labels
y_pred = np.argmax(y_pred, axis = 1)
ground_trouth = test_ds_ho.class_indices
# =============================================================================
# Delivery report and model performance for holdout test set
#
# TP = True positive: Sick people correctly identified as sick
# FP = False positive: Healthy people incorrectly identified as sick
# TN = True negative: Healthy people correctly identified as healthy
# FN = False negative: Sick people incorrectly identified as healthy
#
# =============================================================================
f1s = [0,0,0]
y_true = tf.cast(y_true, tf.float64)
y_pred = tf.cast(y_pred, tf.float64)
TP = tf.math.count_nonzero(y_pred * y_true)
TN = tf.math.count_nonzero((y_pred -1) * (y_true -1) )
FP = tf.math.count_nonzero(y_pred * (y_true - 1))
FN = tf.math.count_nonzero((y_pred - 1) * y_true)
accuracy = (TP + TN) / (TP + TN + FP + FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
specificity = TN / (TN + FP)
f1 = 2 * precision * recall / (precision + recall )
auc_roc_score_test_set_ho = roc_auc_score(y_true, y_pred_proba)
print('-'*90)
print('Derived Report & Model Performance')
print('-'*90)
print('%s%.2f%s' % ('Accuracy : ', accuracy * 100, '%'))
print('%s%.2f%s' % ('Precision : ', precision * 100, '%'))
print('%s%.2f%s' % ('Sensitivity : ', recall * 100, '%'))
print('%s%.2f%s' % ('Specificity : ', specificity * 100, '%'))
print('%s%.2f%s' % ('F1-Score : ', f1 * 100, '%'))
print('%s%.2f%s' % ('AUC ROC : ', auc_roc_score_test_set_ho, ''))
print("-"*90)
print("\n\n")
# =============================================================================
# Confusion Matrix for holdout test set
# =============================================================================
cm = tf.math.confusion_matrix(y_true, y_pred)
cm = cm/cm.numpy().sum(axis=1)[:, tf.newaxis]
sns.heatmap(
cm, annot=True,
xticklabels = ground_trouth,
yticklabels = ground_trouth,
cbar=False,
cmap='Blues')
plt.xlabel("Model Predicted")
plt.ylabel("Ground Truth")
plt.savefig('/model_evaluation_results/R50_conf_matrix_ho.jpg',dpi=300)
# =============================================================================
# Test model on extramural test set1
# =============================================================================
y_pred_ex1 = best_model.predict(test_ds_ex1)
y_pred_proba_ex1 = y_pred_ex1[:,1] # for ROC curve
y_true_ex1 = test_ds_ex1.labels
y_pred_ex1 = np.argmax(y_pred_ex1, axis = 1)
ground_trouth_ex1 = test_ds_ex1.class_indices
# =============================================================================
# Delivery report and model performance for extramural test set1
# =============================================================================
f1s = [0,0,0]
y_true_ex1 = tf.cast(y_true_ex1, tf.float64)
y_pred_ex1 = tf.cast(y_pred_ex1, tf.float64)
TP_ex1 = tf.math.count_nonzero(y_pred_ex1 * y_true_ex1)
TN_ex1 = tf.math.count_nonzero((y_pred_ex1 -1) * (y_true_ex1 -1) )
FP_ex1 = tf.math.count_nonzero(y_pred_ex1 * (y_true_ex1 - 1))
FN_ex1 = tf.math.count_nonzero((y_pred_ex1 - 1) * y_true_ex1)
accuracy_ex1 = (TP_ex1 + TN_ex1) / (TP_ex1 + TN_ex1 + FP_ex1 + FN_ex1)
precision_ex1 = TP_ex1 / (TP_ex1 + FP_ex1)
recall_ex1 = TP_ex1 / (TP_ex1 + FN_ex1)
specificity_ex1 = TN_ex1 / (TN_ex1 + FP_ex1)
f1_ex1 = 2 * precision_ex1 * recall_ex1 / (precision_ex1 + recall_ex1 )
auc_roc_score_ex1 = roc_auc_score(y_true_ex1, y_pred_proba_ex1)
print('-'*90)
print('Derived Report & Model Performance')
print('-'*90)
print('%s%.2f%s' % ('Accuracy : ', accuracy_ex1 * 100, '%'))
print('%s%.2f%s' % ('Precision : ', precision_ex1 * 100, '%'))
print('%s%.2f%s' % ('Sensitivity : ', recall_ex1 * 100, '%'))
print('%s%.2f%s' % ('Specificity : ', specificity_ex1 * 100, '%'))
print('%s%.2f%s' % ('F1-Score : ', f1_ex1 * 100, '%'))
print('%s%.2f%s' % ('AUC ROC : ', auc_roc_score_ex1, ''))
print("-"*90)
print("\n\n")
# =============================================================================
# Confusion Matrix for extramural test set1
# =============================================================================
cm_ex1 = tf.math.confusion_matrix(y_true_ex1, y_pred_ex1)
cm_ex1 = cm_ex1/cm_ex1.numpy().sum(axis=1)[:, tf.newaxis]
sns.heatmap(
cm_ex1, annot=True,
xticklabels = ground_trouth_ex1,
yticklabels = ground_trouth_ex1,
cbar=False,
cmap='Blues')
plt.xlabel("Model Predicted")
plt.ylabel("Ground Truth")
plt.savefig('/model_evaluation_results/R50_conf_matrix_ex1.jpg',dpi=300)
# =============================================================================
# Test model on extramural test set2
# =============================================================================
y_pred_ex2 = best_model.predict(test_ds_ex2)
y_pred_proba_ex2 = y_pred_ex2[:,1] # for ROC curve
y_true_ex2 = test_ds_ex2.labels
y_pred_ex2 = np.argmax(y_pred_ex2, axis = 1)
ground_trouth_ex2 = test_ds_ex2.class_indices
# =============================================================================
# Delivery report and model performance for extramural test set2
# =============================================================================
f1s = [0,0,0]
y_true_ex2 = tf.cast(y_true_ex2, tf.float64)
y_pred_ex2 = tf.cast(y_pred_ex2, tf.float64)
TP_ex2 = tf.math.count_nonzero(y_pred_ex2 * y_true_ex2)
TN_ex2 = tf.math.count_nonzero((y_pred_ex2 -1) * (y_true_ex2 -1) )
FP_ex2 = tf.math.count_nonzero(y_pred_ex2 * (y_true_ex2 - 1))
FN_ex2 = tf.math.count_nonzero((y_pred_ex2 - 1) * y_true_ex2)
accuracy_ex2 = (TP_ex2 + TN_ex2) / (TP_ex2 + TN_ex2 + FP_ex2 + FN_ex2)
precision_ex2 = TP_ex2 / (TP_ex2 + FP_ex2)
recall_ex2 = TP_ex2 / (TP_ex2 + FN_ex2)
specificity_ex2 = TN_ex2 / (TN_ex2 + FP_ex2)
f1_ex2 = 2 * precision_ex2 * recall_ex2 / (precision_ex2 + recall_ex2 )
auc_roc_score_ex2 = roc_auc_score(y_true_ex2, y_pred_proba_ex2)
print('-'*90)
print('Derived Report & Model Performance')
print('-'*90)
print('%s%.2f%s' % ('Accuracy : ', accuracy_ex2 * 100, '%'))
print('%s%.2f%s' % ('Precision : ', precision_ex2 * 100, '%'))
print('%s%.2f%s' % ('Sensitivity : ', recall_ex2 * 100, '%'))
print('%s%.2f%s' % ('Specificity : ', specificity_ex2 * 100, '%'))
print('%s%.2f%s' % ('F1-Score : ', f1_ex2 * 100, '%'))
print('%s%.2f%s' % ('AUC ROC : ', auc_roc_score_ex2, ''))
print("-"*90)
print("\n\n")
# =============================================================================
# Confusion Matrix for extramural test set2
# =============================================================================
cm_ex2 = tf.math.confusion_matrix(y_true_ex2, y_pred_ex2)
cm_ex2 = cm_ex2/cm_ex2.numpy().sum(axis=1)[:, tf.newaxis]
sns.heatmap(
cm_ex2, annot=True,
xticklabels = ground_trouth_ex2,
yticklabels = ground_trouth_ex2,
cbar=False,
cmap='Blues')
plt.xlabel("Model Predicted")
plt.ylabel("Ground Truth")
plt.savefig('/model_evaluation_results/R50_conf_matrix_ex2.jpg',dpi=300)
# =============================================================================
# Test model on extramural test set3
# =============================================================================
y_pred_ex3 = best_model.predict(test_ds_ex3)
y_pred_proba_ex3 = y_pred_ex3[:,1] # for ROC curve
y_true_ex3 = test_ds_ex3.labels
y_pred_ex3 = np.argmax(y_pred_ex3, axis = 1)
ground_trouth_ex3 = test_ds_ex3.class_indices
# =============================================================================
# Delivery report and model performance for extramural test set3
# =============================================================================
f1s = [0,0,0]
y_true_ex3 = tf.cast(y_true_ex3, tf.float64)
y_pred_ex3 = tf.cast(y_pred_ex3, tf.float64)
TP_ex3 = tf.math.count_nonzero(y_pred_ex3 * y_true_ex3)
TN_ex3 = tf.math.count_nonzero((y_pred_ex3 -1) * (y_true_ex3 -1) )
FP_ex3 = tf.math.count_nonzero(y_pred_ex3 * (y_true_ex3 - 1))
FN_ex3 = tf.math.count_nonzero((y_pred_ex3 - 1) * y_true_ex3)
accuracy_ex3 = (TP_ex3 + TN_ex3) / (TP_ex3 + TN_ex3 + FP_ex3 + FN_ex3)
precision_ex3 = TP_ex3 / (TP_ex3 + FP_ex3)
recall_ex3 = TP_ex3 / (TP_ex3 + FN_ex3)
specificity_ex3 = TN_ex3 / (TN_ex3 + FP_ex3)
f1_ex3 = 2 * precision_ex3 * recall_ex3 / (precision_ex3 + recall_ex3 )
auc_roc_score_ex3 = roc_auc_score(y_true_ex3, y_pred_proba_ex3)
print('-'*90)
print('Derived Report & Model Performance')
print('-'*90)
print('%s%.2f%s' % ('Accuracy : ', accuracy_ex3 * 100, '%'))
print('%s%.2f%s' % ('Precision : ', precision_ex3 * 100, '%'))
print('%s%.2f%s' % ('Sensitivity : ', recall_ex3 * 100, '%'))
print('%s%.2f%s' % ('Specificity : ', specificity_ex3 * 100, '%'))
print('%s%.2f%s' % ('F1-Score : ', f1_ex3 * 100, '%'))
print('%s%.2f%s' % ('AUC ROC : ', auc_roc_score_ex3, ''))
print("-"*90)
print("\n\n")
# =============================================================================
# Confusion Matrix for extramural test set3
# =============================================================================
cm_ex3 = tf.math.confusion_matrix(y_true_ex3, y_pred_ex3)
cm_ex3 = cm_ex3/cm_ex3.numpy().sum(axis=1)[:, tf.newaxis]
sns.heatmap(
cm_ex3, annot=True,
xticklabels = ground_trouth_ex3,
yticklabels = ground_trouth_ex3,
cbar=False,
cmap='Blues')
plt.xlabel("Model Predicted")
plt.ylabel("Ground Truth")
plt.savefig('/model_evaluation_results/R50_conf_matrix_ex3.jpg',dpi=300)
# =============================================================================
# ROC curve where positive label is tb and plot holdout and extramural test together
# =============================================================================
fpr_ho = dict()
tpr_ho = dict()
roc_auc_score_ho = dict()
fpr_ex1 = dict()
tpr_ex1 = dict()
roc_auc_score_ex1 = dict()
fpr_ex2 = dict()
tpr_ex2 = dict()
roc_auc_score_ex2 = dict()
fpr_ex3 = dict()
tpr_ex3 = dict()
roc_auc_score_ex3 = dict()
num_classes=2
for i in range(num_classes):
fpr_ho[i], tpr_ho[i], _ = roc_curve(y_true, y_pred_proba)
roc_auc_score_ho[i] = auc(fpr_ho[i], tpr_ho[i])
for i in range(num_classes):
fpr_ex1[i], tpr_ex1[i], _ = roc_curve(y_true_ex1, y_pred_proba_ex1)
roc_auc_score_ex1[i] = auc(fpr_ex1[i], tpr_ex1[i])
for i in range(num_classes):
fpr_ex2[i], tpr_ex2[i], _ = roc_curve(y_true_ex2, y_pred_proba_ex2)
roc_auc_score_ex2[i] = auc(fpr_ex2[i], tpr_ex2[i])
for i in range(num_classes):
fpr_ex3[i], tpr_ex3[i], _ = roc_curve(y_true_ex3, y_pred_proba_ex3)
roc_auc_score_ex3[i] = auc(fpr_ex3[i], tpr_ex3[i])
fig=plt.figure(figsize=(15,10), dpi=300)
ax = fig.add_subplot(1, 1, 1)
# Major ticks every 0.05, minor ticks every 0.05
ax.tick_params(axis='both', which='major', labelsize=20)
ax.tick_params(axis='both', which='minor', labelsize=20)
major_ticks = np.arange(0.0, 1.10, 0.10)
minor_ticks = np.arange(0.0, 1.10, 0.05)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_yticks(major_ticks)
ax.set_yticks(minor_ticks, minor=True)
ax.grid(which='both')
lw = 2
# =============================================================================
# change the label accordingly when analyzing other datasets
# * indicate holdout and # indicate extramural test set
# =============================================================================
plt.plot(fpr_ho[1], tpr_ho[1], '*-', color='xkcd:indigo',
lw=lw, label='IN* (area = %0.4f)' % roc_auc_score_ho[1])
plt.plot(fpr_ex1[1], tpr_ex1[1], '*-', color='xkcd:plum',
lw=lw, label='SH# (area = %0.4f)' % roc_auc_score_ex1[1])
plt.plot(fpr_ex2[1], tpr_ex2[1], '*-', color='xkcd:magenta',
lw=lw, label='MC# (area = %0.4f)' % roc_auc_score_ex2[1])
plt.plot(fpr_ex3[1], tpr_ex3[1],'*-', color='xkcd:tomato',
lw=lw, label='NIAID# (area = %0.4f)' % roc_auc_score_ex3[1])
plt.plot([0, 1], [0, 1], ':', color='xkcd:red', lw=lw) # reference ROC 50% AUC
plt.xlabel('False Positive Rate',fontsize=20)
plt.ylabel('True Positive Rate',fontsize=20)
plt.legend(loc="lower right",fontsize=20)
plt.savefig('/model_evaluation_results/R50_roc_train_in.jpg',dpi=300)
| 39.23569 | 111 | 0.546254 |
be71a6b5241721d70160591082153a4bc1a9f7d2 | 3,048 | py | Python | GetStarted/02_adding_data_to.py | ganeshghimire1986/google_earth_engine | 21436dd80318b1d02ac3d2ef880ad01d626b84cb | [
"MIT"
] | null | null | null | GetStarted/02_adding_data_to.py | ganeshghimire1986/google_earth_engine | 21436dd80318b1d02ac3d2ef880ad01d626b84cb | [
"MIT"
] | null | null | null | GetStarted/02_adding_data_to.py | ganeshghimire1986/google_earth_engine | 21436dd80318b1d02ac3d2ef880ad01d626b84cb | [
"MIT"
] | null | null | null | # %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/GetStarted/02_adding_data_to.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/02_adding_data_to.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/02_adding_data_to.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('Installing geemap ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# %%
import ee
import geemap
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
# Load an image.
image = ee.Image('LANDSAT/LC08/C01/T1/LC08_044034_20140318')
# Center the map on the image.
Map.centerObject(image, 9)
# Display the image.
Map.addLayer(image, {}, 'Landsat 8 original image')
# Define visualization parameters in an object literal.
vizParams = {'bands': ['B5', 'B4', 'B3'],
'min': 5000, 'max': 15000, 'gamma': 1.3}
# Center the map on the image and display.
Map.centerObject(image, 9)
Map.addLayer(image, vizParams, 'Landsat 8 False color')
# Use Map.addLayer() to add features and feature collections to the map. For example,
counties = ee.FeatureCollection('TIGER/2016/Counties')
Map.addLayer(ee.Image().paint(counties, 0, 2), {}, 'counties')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map | 39.584416 | 457 | 0.724738 |
ced8292a725dbe6e8109b7a020805b93e8048053 | 158 | py | Python | examples/bioinformatics/research/exploratory_data_analysis/nmf_comparison.py | LiorZ/labnote | d3732e8ce6414796c1631ac009147e7488012066 | [
"BSD-2-Clause-FreeBSD"
] | 19 | 2017-02-23T04:02:54.000Z | 2021-02-04T12:34:57.000Z | examples/bioinformatics/research/exploratory_data_analysis/nmf_comparison.py | LiorZ/labnote | d3732e8ce6414796c1631ac009147e7488012066 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-03-06T23:27:46.000Z | 2019-03-06T23:27:46.000Z | examples/bioinformatics/research/exploratory_data_analysis/nmf_comparison.py | LiorZ/labnote | d3732e8ce6414796c1631ac009147e7488012066 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2017-06-16T03:47:44.000Z | 2022-02-18T03:40:49.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Nonnegative matrix factorization comparison
"""
def main():
pass
if __name__ == "__main__":
main()
| 14.363636 | 43 | 0.620253 |
a328c9e3c8b8e2bb05d44110bd624b1c5ce7a289 | 224 | py | Python | sample/insert_after.py | iogf/ehp | 14d2cae449f411863fea201c3a99802fa37f1696 | [
"MIT"
] | 47 | 2015-09-11T20:44:05.000Z | 2022-03-03T01:16:51.000Z | sample/insert_after.py | iogf/ehp | 14d2cae449f411863fea201c3a99802fa37f1696 | [
"MIT"
] | 6 | 2016-06-30T18:55:08.000Z | 2020-08-15T20:19:11.000Z | sample/insert_after.py | iogf/ehp | 14d2cae449f411863fea201c3a99802fa37f1696 | [
"MIT"
] | 15 | 2016-05-08T01:17:10.000Z | 2021-06-29T07:43:08.000Z | from ehp import *
data = '''<html><body><em> alpha </em></body></html>'''
dom = Html().feed(data)
x = Tag('em')
x.append(Data('beta'))
for root, ind in dom.find_with_root('em'):
root.insert_after(ind, x)
print(dom)
| 16 | 55 | 0.611607 |
16c83f71f80919c36c1fb4e9708f80619158a40d | 1,673 | py | Python | creme/optim/rms_prop.py | Raul9595/creme | 39cec7ac27ccd40ff0a7bdd6bceaf7ce25c1a8da | [
"BSD-3-Clause"
] | 1 | 2020-07-27T03:06:46.000Z | 2020-07-27T03:06:46.000Z | creme/optim/rms_prop.py | 2torus/creme | bcc5e2a0155663a1f0ba779c68f23456695bcb54 | [
"BSD-3-Clause"
] | null | null | null | creme/optim/rms_prop.py | 2torus/creme | bcc5e2a0155663a1f0ba779c68f23456695bcb54 | [
"BSD-3-Clause"
] | 2 | 2021-06-20T09:29:38.000Z | 2021-06-23T07:47:21.000Z | import collections
from . import base
__all__ = ['RMSProp']
class RMSProp(base.Optimizer):
"""RMSProp optimizer.
Example:
::
>>> from creme import compose
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> from creme import stream
>>> from sklearn import datasets
>>> X_y = stream.iter_sklearn_dataset(
... dataset=datasets.load_breast_cancer(),
... shuffle=True,
... random_state=42
... )
>>> optimiser = optim.RMSProp()
>>> model = compose.Pipeline([
... ('scale', preprocessing.StandardScaler()),
... ('learn', linear_model.LogisticRegression(optimiser))
... ])
>>> metric = metrics.F1()
>>> model_selection.online_score(X_y, model, metric)
F1: 0.971989
References:
1. `Divide the gradient by a running average of itsrecent magnitude <https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_
"""
def __init__(self, lr=0.1, rho=0.9, eps=1e-8):
super().__init__(lr)
self.rho = rho
self.eps = eps
self.g2 = collections.defaultdict(float)
def _update_after_pred(self, w, g):
for i, gi in g.items():
self.g2[i] = self.rho * self.g2[i] + (1 - self.rho) * gi ** 2
w[i] -= self.learning_rate / (self.g2[i] + self.eps) ** 0.5 * gi
return w
| 28.844828 | 152 | 0.53676 |
8b3521a184d8613d684d1718bb3d73340c541af3 | 174 | py | Python | pyroot/check_model.py | mj-will/ml4np | 7dca09d430e588b04ced8a12e46caa5d2a6f2c09 | [
"MIT"
] | null | null | null | pyroot/check_model.py | mj-will/ml4np | 7dca09d430e588b04ced8a12e46caa5d2a6f2c09 | [
"MIT"
] | null | null | null | pyroot/check_model.py | mj-will/ml4np | 7dca09d430e588b04ced8a12e46caa5d2a6f2c09 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
from keras.models import load_model
# get filename
fname = sys.argv[1]
print(fname)
# load model
model = load_model(fname)
model.summary()
| 14.5 | 35 | 0.747126 |
d5ead432df7de39a5e5e5248351fa79063b25e8a | 485 | py | Python | src/configs.py | mariolovric/solubility | 55f8002b27fc03a05a37d88607b69c7e6146ba06 | [
"MIT"
] | 2 | 2021-12-14T13:09:02.000Z | 2022-02-13T16:17:42.000Z | src/configs.py | mariolovric/solubility | 55f8002b27fc03a05a37d88607b69c7e6146ba06 | [
"MIT"
] | null | null | null | src/configs.py | mariolovric/solubility | 55f8002b27fc03a05a37d88607b69c7e6146ba06 | [
"MIT"
] | 2 | 2020-09-28T02:12:39.000Z | 2021-08-13T08:14:00.000Z | param_space = {
'rf_':
{
'max_depth': (8, 30),
'n_estimators': (50, 300),
'max_samples': (.25, .6),
'min_samples_split': (3, 10)
},
'lg_':
{
'num_leaves': (20, 250),
'max_depth': (2, 50),
'lambda_l2': (0.0, .1),
'lambda_l1': (0.0, .1),
'min_data_in_leaf': (2, 10)
},
'pls_':
{
'n_components': (2, 10),
},
'lasso_':
{
'alpha': (0.01, 1000)
},
}
| 15.645161 | 36 | 0.385567 |
48c6c942c553c1df572b05b72d8910f79f7139c0 | 2,170 | py | Python | pinakes/main/analytics/tasks.py | mkanoor/pinakes | cfcc6e8e12e9c68d7930f41075b5e4e0dfee51c3 | [
"Apache-2.0"
] | null | null | null | pinakes/main/analytics/tasks.py | mkanoor/pinakes | cfcc6e8e12e9c68d7930f41075b5e4e0dfee51c3 | [
"Apache-2.0"
] | null | null | null | pinakes/main/analytics/tasks.py | mkanoor/pinakes | cfcc6e8e12e9c68d7930f41075b5e4e0dfee51c3 | [
"Apache-2.0"
] | null | null | null | """ Tasks for metrics collection """
import logging
import django_rq
from rq import Queue
from rq.job import Job
from rq import get_current_job
from rq import exceptions
from django.utils.timezone import make_aware
from pinakes.main.analytics.collector import AnalyticsCollector
from pinakes.main.analytics import analytics_collectors
logger = logging.getLogger("analytics")
def gather_analytics():
collector = AnalyticsCollector(
collector_module=analytics_collectors,
collection_type="scheduled",
logger=logger,
)
last_gather = get_last_gather()
saved_last_gather = make_aware(last_gather) if last_gather else None
# save last gathered job info in current job's meta
job = get_current_job()
job.meta["last_gather"] = saved_last_gather
job.save_meta()
collector.gather(since=saved_last_gather)
def get_last_gather():
connection = django_rq.get_connection()
queue = Queue(connection=connection)
last_finished_job = get_last_successful_gather_job(
queue.finished_job_registry, connection
)
last_canceled_job = get_last_successful_gather_job(
queue.canceled_job_registry, connection
)
if last_canceled_job:
logger.info(
"last canceled job: %s, %s",
last_canceled_job.id,
last_canceled_job.ended_at,
)
if last_finished_job:
logger.info(
"last finished job: %s, %s",
last_finished_job.id,
last_finished_job.ended_at,
)
return (
last_finished_job.ended_at
if last_finished_job
else last_canceled_job.ended_at
if last_canceled_job
else None
)
def get_last_successful_gather_job(job_registry, connection):
job_ids = job_registry.get_job_ids()
while len(job_ids) > 0:
job_id = job_ids.pop(-1)
try:
job = Job.fetch(job_id, connection)
except exceptions.NoSuchJobError:
logger.warning(f"No job with id: {job_id}")
continue
if job.ended_at and job.func_name.endswith("gather_analytics"):
return job
return None
| 26.144578 | 72 | 0.682028 |
529f9c855773196fd5759108688b1a147af1c8dd | 85,453 | py | Python | src/aequitas/plotting.py | LiFaytheGoblin/aequitas | e5690baa955c94ea6459af5064cf1c741a345646 | [
"MIT"
] | 469 | 2018-04-24T23:11:45.000Z | 2022-03-29T07:54:07.000Z | src/aequitas/plotting.py | LiFaytheGoblin/aequitas | e5690baa955c94ea6459af5064cf1c741a345646 | [
"MIT"
] | 62 | 2018-04-16T00:14:56.000Z | 2021-11-12T10:35:01.000Z | src/aequitas/plotting.py | LiFaytheGoblin/aequitas | e5690baa955c94ea6459af5064cf1c741a345646 | [
"MIT"
] | 94 | 2018-05-21T16:13:57.000Z | 2022-03-25T20:07:25.000Z | import logging
import math
import numpy as np
import collections
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.cm
from aequitas import squarify_flipped as sf
logging.getLogger(__name__)
__author__ = "Pedro Saleiro <saleiro@uchicago.edu>, Loren Hinkson"
__copyright__ = "Copyright \xa9 2018. The University of Chicago. All Rights Reserved."
# module-level function
def assemble_ref_groups(disparities_table, ref_group_flag='_ref_group_value',
specific_measures=None, label_score_ref=None):
"""
Creates a dictionary of reference groups for each metric in a data_table.
:param disparities_table: a disparity table. Output of bias.get_disparity or
fairness.get_fairness functions
:param ref_group_flag: string indicating column indicates reference group
flag value. Default is '_ref_group_value'.
:param specific_measures: Limits reference dictionary to only specified
metrics in a data table. Default is None.
:param label_score_ref: Defines a metric, ex: 'fpr' (false positive rate)
from which to mimic reference group for label_value and score. Used for
statistical significance calculations in Bias() class. Default is None.
:return: A dictionary
"""
ref_groups = {}
ref_group_cols = \
set(disparities_table.columns[disparities_table.columns.str.contains(
ref_group_flag)])
# Note: specific measures is a set
if specific_measures:
if len(specific_measures) < 1:
raise ValueError("At least one metric must be passed for which to "
"find refrence group.")
specific_measures = specific_measures.union({label_score_ref})
ref_group_cols = {measure + ref_group_flag for measure in specific_measures if
measure + ref_group_flag in ref_group_cols}
attributes = list(disparities_table.attribute_name.unique())
for attribute in attributes:
attr_table = \
disparities_table.loc[disparities_table['attribute_name'] == attribute]
attr_refs = {}
for col in ref_group_cols:
if col in ('label' + ref_group_flag, 'score' + ref_group_flag):
continue
metric_key = col.replace(ref_group_flag, '')
attr_refs[metric_key] = \
attr_table.loc[attr_table['attribute_name'] == attribute, col].min()
if label_score_ref:
is_valid_label_ref = lambda label: label + ref_group_flag in disparities_table.columns
if not is_valid_label_ref(label_score_ref):
try:
label_score_ref = next(measure for measure in specific_measures if is_valid_label_ref(measure))
logging.warning("The specified reference measure for label "
"value and score is not included in the "
f"data frame. Using '{label_score_ref}' "
"reference group as label value and score "
"reference instead.")
except StopIteration:
raise ValueError("None of metrics passed in 'specific_measures' are in dataframe.")
attr_refs['label_value'] = attr_refs[label_score_ref]
attr_refs['score'] = attr_refs[label_score_ref]
ref_groups[attribute] = attr_refs
return ref_groups
# Plot() class
class Plot(object):
"""
Plotting object allows for visualization of absolute group bias metrics and
relative disparities calculated by Aequitas Group(), Bias(), and Fairness()
class instances.
"""
default_absolute_metrics = ('pprev', 'ppr', 'fdr', 'for', 'fpr', 'fnr')
default_disparities = ('pprev_disparity', 'ppr_disparity',
'fdr_disparity', 'for_disparity',
'fpr_disparity', 'fnr_disparity')
# Define mapping for conditional coloring based on fairness
# determinations
_metric_parity_mapping = {
'ppr_disparity': 'Statistical Parity',
'pprev_disparity': 'Impact Parity',
'precision_disparity': 'Precision Parity',
'fdr_disparity': 'FDR Parity',
'for_disparity': 'FOR Parity',
'fpr_disparity': 'FPR Parity',
'fnr_disparity': 'FNR Parity',
'tpr_disparity': 'TPR Parity',
'tnr_disparity': 'TNR Parity',
'npv_disparity': 'NPV Parity',
'ppr': 'Statistical Parity',
'pprev': 'Impact Parity',
'precision': 'Precision Parity',
'fdr': 'FDR Parity',
'for': 'FOR Parity',
'fpr': 'FPR Parity',
'fnr': 'FNR Parity',
'tpr': 'TPR Parity',
'tnr': 'TNR Parity',
'npv': 'NPV Parity'
}
_significance_disparity_mapping = {
'ppr_disparity': 'ppr_significance',
'pprev_disparity': 'pprev_significance',
'precision_disparity': 'precision_significance',
'fdr_disparity': 'fdr_significance',
'for_disparity': 'fnr_significance',
'fpr_disparity': 'fpr_significance',
'fnr_disparity': 'fnr_significance',
'tpr_disparity': 'tpr_significance',
'tnr_disparity': 'tnr_significance',
'npv_disparity': 'npv_significance'
}
def __init__(self, key_metrics=default_absolute_metrics,
key_disparities=default_disparities):
"""
:param key_metrics: Set default absolute group metrics for all subplots
:param key_disparities: Set default disparity metrics for all subplots
"""
self.key_metrics = key_metrics
self.key_disparities = key_disparities
@staticmethod
def _nearest_quartile(x):
'''
Return nearest quartile for given value x.
'''
rounded = round(x * 4) / 4
if rounded > x:
return rounded
else:
return rounded + 1 / 4
@staticmethod
def _check_brightness(rgb_tuple):
'''
Determine the brightness of background color in a plot.
Adapted from https://trendct.org/2016/01/22/how-to-choose-a-label-color-to-contrast-with-background/
'''
r, g, b = rgb_tuple
return (r * 299 + g * 587 + b * 114) / 1000
@classmethod
def _brightness_threshold(cls, rgb_tuple, min_brightness, light_color,
dark_color='black'):
'''
Determine ideal plot label color (light or dark) based on brightness of
background color based on a given brightness threshold.
Adapted from https://trendct.org/2016/01/22/how-to-choose-a-label-color-to-contrast-with-background/
'''
if cls._check_brightness(rgb_tuple) > min_brightness:
return dark_color
return light_color
@staticmethod
def _truncate_colormap(orig_cmap, min_value=0.0, max_value=1.0, num_colors=100):
'''
Use only part of a colormap (min_value to max_value) across a given number
of partitions.
:param orig_cmap: an existing Matplotlib colormap.
:param min_value: desired minimum value (0.0 to 1.0) for truncated
colormap. Default is 0.0.
:param max_value: desired maximum value (0.0 to 1.0) for truncated
colormap. Default is 1.0.
:param num_colors: number of colors to spread colormap gradient across
before truncating. Default is 100.
:return: Truncated color map
Attribution: Adapted from: https://stackoverflow.com/questions/
18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
'''
cmap = plt.get_cmap(orig_cmap)
new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b: .2f})'.format(n=cmap.name, a=min_value, b=max_value),
cmap(np.linspace(min_value, max_value, num_colors)))
return new_cmap
@classmethod
def _locate_ref_group_indices(cls, disparities_table, attribute_name, group_metric,
ref_group_flag='_ref_group_value'):
"""
Finds relative index (row) of reference group value for a given metric.
:param disparities_table: a disparity table. Output of bias.get_disparity or
fairness.get_fairness functions.
:param attribute_name: the attribute to plot metric against. Must be a column
in the disparities_table.
:param group_metric: the metric to plot. Must be a column in the
disparities_table.
:param ref_group_flag: string indicating column indicates reference group
flag value. Default is '_ref_group_value'.
:return: Integer indicating relative index of reference group value row.
"""
df_models = disparities_table.model_id.unique()
if len(df_models) == 1:
model_id = df_models[0]
else:
raise ValueError('This method requires one and only one model_id in the disparities table. '
'Tip: check disparities_table.model_id.unique() should be just one element list.')
# get absolute metric name from passed group metric (vs. a disparity name)
abs_metric = group_metric.replace('_disparity', '')
all_ref_groups = assemble_ref_groups(disparities_table, ref_group_flag)
ref_group_name = all_ref_groups[attribute_name][abs_metric]
# get index for row associated with reference group for that model
ind = list(disparities_table.loc[(disparities_table['attribute_name'] == attribute_name) &
(disparities_table['attribute_value'] == ref_group_name) &
(disparities_table['model_id'] == model_id)].index)
# there should only ever be one item in list, but JIC, select first
if len(ind) == 1:
idx = ind[0]
else:
raise ValueError(f"failed to find only one index for the reference "
f"group for attribute_name = {attribute_name} and "
f"attribute_value of reference = {ref_group_name} "
f"and model_id={model_id}")
relative_ind = disparities_table.index.get_loc(idx)
return relative_ind, ref_group_name
@staticmethod
def iterate_subplots(axs, ncols, rows, ax_col, ax_row):
ax_col += 1
if (ax_col >= ncols) and ((ax_col + 1) % ncols) == 1:
ax_row += 1
ax_col = 0
if rows == 1:
current_subplot = axs[ax_col]
elif ncols == 1:
current_subplot = axs[ax_row]
ax_row += 1
else:
current_subplot = axs[ax_row, ax_col]
return current_subplot, ax_row, ax_col
@staticmethod
def generate_axes(ncols, num_metrics, total_plot_width, sharey, hspace=0.25, indiv_height=6):
rows = math.ceil(num_metrics / ncols)
if ncols == 1 or (num_metrics % ncols == 0):
axes_to_remove = 0
else:
axes_to_remove = ncols - (num_metrics % ncols)
if not (0 < rows <= num_metrics):
raise ValueError(
"Plot must have at least one row. Please update number of columns"
" ('ncols') or check that at least one metric is specified in "
"'metrics'.")
if not (0 < ncols <= num_metrics):
raise ValueError(
"Plot must have at least one column, and no more columns than "
"subplots. Please update number of columns ('ncols') or check "
"that at least one metric is specified in 'metrics'.")
total_plot_width = total_plot_width
fig, axs = plt.subplots(nrows=rows, ncols=ncols,
figsize=(total_plot_width, indiv_height * rows),
sharey=sharey,
gridspec_kw={'wspace': 0.075, 'hspace': hspace})
return fig, axs, rows, axes_to_remove
def multimodel_plot_group_metric(self, group_table, group_metric,
ncols=3, title=True, label_dict=None,
show_figure=True, selected_models=None,
min_group_size = None):
"""
Plot a single group metric across all attribute groups for multiple models.
:param group_table: group table. Output of of group.get_crosstabs() or
bias.get_disparity functions.
:param group_metric: the metric to plot. Must be a column in the group_table.
:param ncols: The number of subplots to plot per row visualization
figure.
Default is 3.
:param title: whether to include a title in visualizations. Default is True.
:param label_dict: optional, dictionary of replacement labels for data.
Default is None.
:param show_figure: Whether to show figure (plt.show()). Default is
True.
:param selected_models: which models to visualize. Default is all models in group_table.
:param min_group_size: minimum size for groups to include in visualization
(as a proportion of total sample).
:return: A Matplotlib axis
"""
# requirement: at least two model_id values
df_models = self._check_multiple_models(group_table, method_table_name='group_table')
if not selected_models:
selected_models = df_models
plot_table = group_table.loc[group_table['model_id'].isin(selected_models)]
num_metrics = len(df_models)
total_plot_width = 25
fig, axs, rows, axes_to_remove = self.generate_axes(ncols=ncols, num_metrics=num_metrics,
total_plot_width=total_plot_width, sharey=True)
# set a different distribution to be plotted in each subplot
ax_col = -1
ax_row = 0
col_num = 0
for model in df_models:
if plot_table.loc[plot_table['model_id'] == model, group_metric].isnull().all():
logging.warning(f"Cannot plot metric '{group_metric}', only NaN values.")
axes_to_remove += 1
continue
elif plot_table.loc[plot_table['model_id'] == model, group_metric].isnull().any():
# determine which group(s) have missing values
missing = ", ".join(plot_table.loc[(plot_table['model_id'] == model) &
(plot_table[
group_metric].isnull()), 'attribute_value'].values.tolist())
attr = ", ".join(plot_table.loc[(plot_table['model_id'] == model) &
(plot_table[
group_metric].isnull()), 'attribute_name'].values.tolist())
logging.warning(f"Model {model} '{attr}' group '{missing}' value for metric "
f"'{group_metric}' is NA, group not included in visualization.")
plot_table = plot_table.dropna(axis=0, subset=[group_metric])
model_table = plot_table.loc[plot_table['model_id'] == model]
else:
model_table = plot_table.loc[plot_table['model_id'] == model]
current_subplot, ax_row, ax_col = self.iterate_subplots(axs, ncols, rows, ax_col, ax_row)
self.plot_group_metric(group_table=model_table, group_metric=group_metric,
ax=current_subplot, ax_lim=None, title=title, label_dict=label_dict,
min_group_size = min_group_size)
if title:
current_subplot.set_title(f"{group_metric.upper()} (Model {model})", fontsize=20)
col_num += 1
# disable axes not being used
if axes_to_remove > 0:
for i in np.arange(axes_to_remove):
axs[-1, -(i + 1)].axis('off')
if show_figure:
plt.show()
return fig
def multimodel_plot_fairness_group(self, fairness_table, group_metric,
ncols=3, title=True, label_dict=None, show_figure=True,
selected_models=None, min_group_size = None):
"""
Plot a single group metric colored by parity determination across all
attribute groups for multiple models.
:param fairness_table: a fairness table. Output of a Fairness.get_fairness
function.
:param group_metric: the metric to plot. Must be a column in the group_table.
:param ncols: The number of subplots to plot per row visualization
figure.
Default is 3.
:param title: whether to include a title in visualizations. Default is True.
:param label_dict: optional, dictionary of replacement labels for data.
Default is None.
:param show_figure: Whether to show figure (plt.show()). Default is
True.
:param selected_models: which models to visualize. Default is all models in fairness_table.
:param min_group_size: minimum size for groups to include in visualization
(as a proportion of total sample)
:return: A Matplotlib axis
"""
parity_list = list(fairness_table.columns[fairness_table.columns.str.contains(' Parity')])
if len(parity_list) < 1:
raise ValueError("multimodel_plot_fairness_disparity: No parity determinations found in fairness_table.")
# requires at least 2 models
df_models = self._check_multiple_models(fairness_table, method_table_name='fairness_table')
if not selected_models:
selected_models = df_models
plot_table = fairness_table.loc[fairness_table['model_id'].isin(selected_models)]
num_metrics = len(df_models)
total_plot_width = 25
fig, axs, rows, axes_to_remove = self.generate_axes(ncols=ncols, num_metrics=num_metrics,
total_plot_width=total_plot_width, sharey=True)
# set a different distribution to be plotted in each subplot
ax_col = -1
ax_row = 0
col_num = 0
viz_title = \
f"MODEL COMPARISON: {group_metric.replace('_', ' ').upper()} PARITY"
for model in df_models:
if plot_table.loc[plot_table['model_id'] == model, group_metric].isnull().all():
logging.warning(f"Cannot plot metric '{group_metric}', only NaN values.")
axes_to_remove += 1
continue
elif plot_table.loc[plot_table['model_id'] == model, group_metric].isnull().any():
# determine which group(s) have missing values
missing = ", ".join(plot_table.loc[(plot_table['model_id'] == model) &
(plot_table[
group_metric].isnull()), 'attribute_value'].values.tolist())
attr = ", ".join(plot_table.loc[(plot_table['model_id'] == model) &
(plot_table[
group_metric].isnull()), 'attribute_name'].values.tolist())
logging.warning(f"Model {model} '{attr}' group '{missing}' value for metric "
f"'{group_metric}' is NA, group not included in visualization.")
plot_table = plot_table.dropna(axis=0, subset=[group_metric])
model_table = plot_table.loc[plot_table['model_id'] == model]
else:
model_table = plot_table.loc[plot_table['model_id'] == model]
current_subplot, ax_row, ax_col = self.iterate_subplots(axs, ncols, rows, ax_col, ax_row)
self.plot_fairness_group(fairness_table=model_table, group_metric=group_metric,
ax=current_subplot, ax_lim=None, title=title, label_dict=label_dict,
min_group_size = min_group_size)
if title:
current_subplot.set_title(f"{group_metric.upper()} (Model {model})", fontsize=20)
col_num += 1
# disable axes not being used
if axes_to_remove > 0:
for i in np.arange(axes_to_remove):
axs[-1, -(i + 1)].axis('off')
if title:
plt.suptitle(f"{viz_title}", fontsize=25, fontweight="bold")
if show_figure:
plt.show()
return fig
def multimodel_plot_disparity(self, disparity_table, group_metric,
attribute_name, color_mapping=None,
label_dict=None, title=True, show_figure=True,
highlight_fairness=True, selected_models=None,
min_group_size=None, significance_alpha=0.05):
"""
Create treemaps to compare multiple model values for a single bias
disparity metric across attribute groups.
Adapted from https://plot.ly/python/treemaps/,
https://gist.github.com/gVallverdu/0b446d0061a785c808dbe79262a37eea,
and https://fcpython.com/visualisation/python-treemaps-squarify-matplotlib
:param disparity_table: a disparity table. Output of bias.get_disparity or
fairness.get_fairness function.
:param group_metric: the metric to plot. Must be a column in the
disparity_table.
:param attribute_name: which attribute to plot group_metric across.
:param color_mapping: matplotlib colormapping for treemap value boxes.
:param label_dict: optional, dictionary of replacement labels for data.
Default is None.
:param title: whether to include a title in visualizations. Default is True.
:param highlight_fairness: whether to highlight treemaps by disparity
magnitude, or by related fairness determination.
:param show_figure: Whether to show figure (plt.show()). Default is
True.
:param selected_models: which models to visualize. Default is all models in disparity_table.
:param min_group_size: minimum proportion of total group size (all data)
a population group must meet in order to be included in bias metric
visualization.
:param significance_alpha: statistical significance level. Used to
determine visual representation of significance (number of
asterisks on treemap).
:return: A Matplotlib figure
"""
# requires at least 2 models
df_models = self._check_multiple_models(disparity_table, method_table_name='disparity_table')
if not selected_models:
selected_models = df_models
plot_table = disparity_table.loc[disparity_table['model_id'].isin(selected_models)]
if group_metric + '_disparity' not in plot_table.columns:
related_disparity = group_metric
else:
related_disparity = group_metric + '_disparity'
viz_title = \
f"MODEL COMPARISON: {related_disparity.replace('_', ' ').upper()}"
num_metrics = len(df_models)
ncols=3
total_plot_width = 25
fig, axs, rows, axes_to_remove = self.generate_axes(
ncols=ncols, num_metrics=num_metrics, total_plot_width=total_plot_width,
sharey=True, hspace=0.5, indiv_height=8)
# set a different distribution to be plotted in each subplot
ax_col = -1
ax_row = 0
col_num = 0
for model in df_models:
model_table = plot_table.loc[plot_table['model_id'] == model]
current_subplot, ax_row, ax_col = self.iterate_subplots(axs, ncols, rows, ax_col, ax_row)
self.plot_disparity(model_table, group_metric=group_metric,
attribute_name=attribute_name, color_mapping=color_mapping,
ax=current_subplot, fig=fig, label_dict=label_dict,
title=title, highlight_fairness=highlight_fairness,
min_group_size=min_group_size,
significance_alpha=significance_alpha)
if title:
current_subplot.set_title(f"{related_disparity.replace('_', ' ').upper()}: {attribute_name.upper()} (Model {model})",
fontsize=23)
col_num += 1
# disable axes not being used
if axes_to_remove > 0:
for i in np.arange(axes_to_remove):
axs[-1, -(i + 1)].axis('off')
if title:
plt.suptitle(f"{viz_title}", fontsize=25, fontweight="bold")
if show_figure:
plt.show()
return fig
def multimodel_plot_fairness_disparity(self, fairness_table, group_metric,
attribute_name, label_dict=None,
title=True, show_figure=True, selected_models=None,
min_group_size=None, significance_alpha=0.05):
"""
Create treemaps to compare multiple model fairness determinations for a
single bias disparity metric across attribute groups.
:param fairness_table: a fairness table. Output of a Fairness.get_fairness
function.
:param group_metric: the metric to plot. Must be a column in the
disparity_table.
:param attribute_name: which attribute to plot group_metric across.
:param color_mapping: matplotlib colormapping for treemap value boxes.
:param label_dict: optional, dictionary of replacement labels for data.
Default is None.
:param title: whether to include a title in visualizations. Default is True.
:param show_figure: Whether to show figure (plt.show()). Default is
True.
:param selected_models: which models to visualize. Default is all models in fairness_table.
:param min_group_size: minimum proportion of total group size (all data)
a population group must meet in order to be included in bias metric
visualization.
:param significance_alpha: statistical significance level. Used to
determine visual representation of significance (number of
asterisks on treemap).
:return: A Matplotlib figure
"""
return self.multimodel_plot_disparity(
disparity_table=fairness_table, group_metric=group_metric,
attribute_name=attribute_name, label_dict=label_dict, title=title,
show_figure=show_figure, selected_models=selected_models,
highlight_fairness=True, min_group_size=min_group_size,
significance_alpha=significance_alpha)
@classmethod
def _check_model_id(cls, df, method_table_name):
"""
Ensure single model in df, return model_id if so
"""
if 'model_id' in df.columns:
df_models = df.model_id.unique()
if len(df_models) != 1:
raise ValueError('This method requires one and only one model_id in the dataframe. '
f'Tip: Check that {method_table_name}.model_id.unique() returns a one-element array. ')
else:
return df_models[0]
else:
return 0
@classmethod
def _check_multiple_models(cls, df, method_table_name):
"""
Ensure multiple models in df, return model_ids if so
"""
if 'model_id' in df.columns:
df_models = df.model_id.unique()
if len(df_models) < 2:
raise ValueError("This method requires at least two distinct 'model_id' values "
f"in the dataframe. Tip: Check that "
f"{method_table_name}.model_id.unique() returns more than one element.")
else:
return df_models
else:
raise ValueError("This method requires at least two distinct 'model_id' values "
f"in the dataframe. Tip: Check that a 'model_id column exists in "
f"'{method_table_name}'.")
def plot_group_metric(self, group_table, group_metric, ax=None, ax_lim=None,
title=True, label_dict=None, min_group_size = None):
"""
Plot a single group metric across all attribute groups.
:param group_table: group table. Output of of group.get_crosstabs() or
bias.get_disparity functions.
:param group_metric: the metric to plot. Must be a column in the group_table.
:param ax: a matplotlib Axis. If not passed, a new figure will be created.
:param title: whether to include a title in visualizations. Default is True.
:param label_dict: optional, dictionary of replacement labels for data.
Default is None.
:param min_group_size: minimum size for groups to include in visualization
(as a proportion of total sample)
:return: A Matplotlib axis
"""
model_id = self._check_model_id(df=group_table, method_table_name='group_table')
if group_metric not in group_table.columns:
raise ValueError(f"Specified disparity metric '{group_metric}' not "
f"in 'group_table'.")
if group_table[group_metric].isnull().all():
raise ValueError(f"Cannot plot {group_metric}, has NaN values.")
if ax is None:
(_fig, ax) = plt.subplots(figsize=(10, 5))
height_of_bar = 1
attribute_names = group_table.attribute_name.unique()
tick_indices = []
next_bar_height = 0
if min_group_size:
if min_group_size > (group_table.group_size.max() / group_table.group_size.sum()):
raise ValueError(f"'min_group_size' proportion specified: '{min_group_size}' "
f"is larger than all groups in sample.")
min_size = min_group_size * group_table.group_size.sum()
group_table = group_table.loc[group_table['group_size'] >= min_size]
label_position_values = collections.deque(group_table[group_metric].values)
lighter_coppers = self._truncate_colormap('copper_r', min_value=0,
max_value=0.65)
norm = matplotlib.colors.Normalize(vmin=group_table['group_size'].min(),
vmax=group_table['group_size'].max())
mapping = matplotlib.cm.ScalarMappable(norm=norm, cmap=lighter_coppers)
# Lock absolute value metric plot x-axis to (0, 1)
if not ax_lim:
ax_lim = 1
ax.set_xlim(0, ax_lim)
for attribute_name in attribute_names:
attribute_data = group_table.loc[
(group_table['attribute_name'] == attribute_name)]
values = attribute_data[group_metric].values
grp_sizes = attribute_data['group_size'].values
attribute_indices = np.arange(next_bar_height,
next_bar_height + attribute_data.shape[0],
step=height_of_bar)
attribute_tick_location = float((min(attribute_indices) + max(attribute_indices) + height_of_bar)) / 2
h_attribute = ax.barh(attribute_indices,
width=values,
# label=list(attribute_data['attribute_value'].values),
align='edge', edgecolor='grey')
label_colors = []
min_brightness = 0.55
for bar, g_size in zip(h_attribute, grp_sizes):
my_col = mapping.to_rgba(g_size)
bar.set_color(my_col)
label_colors.append(self._brightness_threshold(
rgb_tuple=my_col[:3], min_brightness=min_brightness,
light_color=(1, 1, 1, 1)))
if label_dict:
labels = [label_dict.get(label, label) for label in
attribute_data['attribute_value'].values]
else:
labels = attribute_data['attribute_value'].values
for y, label, value, text_color, g_size in zip(attribute_indices, labels,
values, label_colors,
grp_sizes):
next_position = label_position_values.popleft()
group_label = f"{label} (Num: {g_size:,})"
if ax_lim < 3:
CHAR_PLACEHOLDER = 0.03
else:
CHAR_PLACEHOLDER = 0.05
label_length = len(group_label) * CHAR_PLACEHOLDER
max_val_length = 7 * CHAR_PLACEHOLDER
indent_length = ax_lim * 0.025
# bar long enough for label, enough space after bar for value
if ((indent_length + label_length) < (next_position - indent_length)) and (
(next_position + indent_length + max_val_length) < (
ax_lim - indent_length)):
ax.text(next_position + indent_length, y + float(height_of_bar) / 2,
f"{value:.2f}", fontsize=12, verticalalignment='top')
ax.text(indent_length, y + float(height_of_bar) / 2,
group_label, fontsize=11, verticalalignment='top',
color=text_color)
# case when bar too long for labels after bar, print all text in bar
elif (next_position + indent_length + max_val_length) > (
ax_lim - indent_length):
ax.text(indent_length, y + float(height_of_bar) / 2,
f"{group_label}, {value:.2f}", fontsize=11,
verticalalignment='top', color=text_color)
# case when bar too small for labels inside bar, print after bar
else:
ax.text(next_position + indent_length, y + float(
height_of_bar) / 2,
f"{group_label}, {value:.2f}", fontsize=12,
verticalalignment='top')
tick_indices.append((attribute_name, attribute_tick_location))
next_bar_height = max(attribute_indices) + 2 * height_of_bar
ax.yaxis.set_ticks(list(map(lambda x: x[1], tick_indices)))
ax.yaxis.set_ticklabels(list(map(lambda x: x[0], tick_indices)), fontsize=14)
ax.set_axisbelow(True)
ax.xaxis.grid(color='lightgray', which='major',linestyle='dashed')
ax.set_xlabel("Absolute Metric Magnitude")
if title:
ax.set_title(f"{group_metric.upper()} (Model {model_id})", fontsize=20)
return ax
def plot_disparity(self, disparity_table, group_metric, attribute_name,
color_mapping=None, ax=None, fig=None,
label_dict=None, title=True,
highlight_fairness=False, min_group_size=None,
significance_alpha=0.05):
"""
Create treemap based on a single bias disparity metric across attribute
groups.
Adapted from https://plot.ly/python/treemaps/,
https://gist.github.com/gVallverdu/0b446d0061a785c808dbe79262a37eea,
and https://fcpython.com/visualisation/python-treemaps-squarify-matplotlib
:param disparity_table: a disparity table. Output of bias.get_disparity or
fairness.get_fairness function.
:param group_metric: the metric to plot. Must be a column in the
disparity_table.
:param attribute_name: which attribute to plot group_metric across.
:param color_mapping: matplotlib colormapping for treemap value boxes.
:param ax: a matplotlib Axis. If not passed, a new figure will be created.
:param fig: a matplotlib Figure. If not passed, a new figure will be created.
:param label_dict: optional, dictionary of replacement labels for data.
Default is None.
:param title: whether to include a title in visualizations. Default is True.
:param highlight_fairness: whether to highlight treemaps by disparity
magnitude, or by related fairness determination.
:param min_group_size: minimum proportion of total group size (all data)
a population group must meet in order to be included in bias metric
visualization
:param significance_alpha: statistical significance level. Used to
determine visual representation of significance (number of
asterisks on treemap).
:return: A Matplotlib axis
"""
# Use matplotlib to truncate colormap, scale metric values
# between the min and max, then assign colors to individual values
model_id = self._check_model_id(df=disparity_table, method_table_name='disparities_table')
table_columns = set(disparity_table.columns)
if group_metric not in table_columns:
raise ValueError(f"Specified disparity metric {group_metric} not in 'disparity_table'.")
attribute_table = \
disparity_table.loc[disparity_table['attribute_name'] == attribute_name]
# sort by group size, as box size is indicative of group size
sorted_df = attribute_table.sort_values('group_size', ascending=False)
x = 0.
y = 0.
width = 100.
height = 100.
ref_group_rel_idx, ref_group_name = \
self._locate_ref_group_indices(disparities_table=sorted_df,
attribute_name=attribute_name,
group_metric=group_metric)
if min_group_size:
if min_group_size > (disparity_table.group_size.max() /
disparity_table.group_size.sum()):
raise ValueError(f"'min_group_size' proportion specified: '{min_group_size}' "
f"is larger than all groups in sample.")
min_size = min_group_size * disparity_table.group_size.sum()
# raise warning if minimum group size specified would exclude
# reference group
if any(sorted_df.loc[(sorted_df['attribute_value']==ref_group_name),
['group_size']].values < min_size):
logging.warning(
f"Reference group size is smaller than 'min_group_size' proportion "
f"specified: '{min_group_size}'. Reference group '{ref_group_name}' "
f"was not excluded.")
sorted_df = \
sorted_df.loc[(sorted_df['group_size'] >= min_size) |
(sorted_df['attribute_value'] == ref_group_name)]
# select group size as values for size of boxes
values = sorted_df.loc[:, 'group_size']
# get new index for ref group
ref_group_rel_idx, _ = \
self._locate_ref_group_indices(disparities_table=sorted_df,
attribute_name=attribute_name,
group_metric=group_metric)
# labels for squares in tree map:
# label should always be disparity value (but boxes visualized should be
# always be the metric absolute value capped between 0.1x ref group and
# 10x ref group)
if group_metric + '_disparity' not in attribute_table.columns:
related_disparity = group_metric
else:
related_disparity = group_metric + '_disparity'
if highlight_fairness:
if not len(table_columns.intersection(self._metric_parity_mapping.values())) > 1:
raise ValueError("Data table must include at least one fairness "
"determination to visualize metric parity.")
# apply red for "False" fairness determinations and green for "True"
# determinations
cb_green = '#1b7837'
cb_red = '#a50026'
parity = self._metric_parity_mapping[group_metric]
if (parity not in table_columns):
raise ValueError(
f"Related fairness determination for {group_metric} must be "
f"included in data table to color visualization based on "
f"metric fairness.")
clrs = [cb_green if val else cb_red for val in sorted_df[parity]]
else:
aq_palette = sns.diverging_palette(225, 35, sep=10, as_cmap=True)
if not color_mapping:
norm = matplotlib.colors.Normalize(vmin=0, vmax=2)
color_mapping = matplotlib.cm.ScalarMappable(norm=norm, cmap=aq_palette)
clrs = \
[color_mapping.to_rgba(val) for val in sorted_df[related_disparity]]
# color reference group grey
clrs[ref_group_rel_idx] = '#D3D3D3'
compare_value = values.iloc[ref_group_rel_idx]
scaled_values = [(0.1 * compare_value) if val < (0.1 * compare_value) else
(10 * compare_value) if val >= (10 * compare_value) else
val for val in values]
label_values = \
["(Ref)" if attr_val == ref_group_name else
f"{disp:.2f}" for attr_val, disp in
zip(sorted_df['attribute_value'], sorted_df[related_disparity]) ]
if label_dict:
labels = \
[label_dict.get(label, label) for label in sorted_df['attribute_value']]
else:
labels = sorted_df['attribute_value'].values
# if df includes significance columns, add stars to indicate significance
if self._significance_disparity_mapping[related_disparity] in sorted_df.columns:
# truncated_signif_mapping = {k: v for k,v in self._significance_disparity_mapping.items() if v in sorted_df.columns}
if sorted_df.columns[
sorted_df.columns.str.contains('_significance')].value_counts().sum() > 0:
# unmasked significance
# find indices where related significance have smaller value than significance_alpha
if np.issubdtype(
sorted_df[
self._significance_disparity_mapping[related_disparity]].dtype,
# truncated_signif_mapping[related_disparity]].dtype,
np.number):
to_star = sorted_df.loc[
sorted_df[
self._significance_disparity_mapping[related_disparity]] < significance_alpha].index.tolist()
# truncated_signif_mapping[related_disparity]] < significance_alpha].index.tolist()
# masked significance
# find indices where attr values have True value for each of those two columns,
else:
to_star = sorted_df.loc[
sorted_df[
self._significance_disparity_mapping[related_disparity]] > 0].index.tolist()
# truncated_signif_mapping[related_disparity]] > 0].index.tolist()
# add stars to label value where significant
for idx in to_star:
# convert idx location to relative index in sorted df and label_values list
idx_adj = sorted_df.index.get_loc(idx)
# star significant disparities in visualizations based on significance level
if 0.10 >= significance_alpha > 0.05:
significance_stars = '*'
elif 0.05 >= significance_alpha > 0.01:
significance_stars = '**'
elif significance_alpha <= 0.01:
significance_stars = '***'
else:
significance_stars = ''
label_values[idx_adj] = label_values[idx_adj] + significance_stars
normed = sf.normalize_sizes(scaled_values, width, height)
padded_rects = sf.padded_squarify(normed, x, y, width, height)
# make plot
if not (ax and fig):
fig, ax = plt.subplots(figsize=(5, 4))
ax = sf.squarify_plot_rects(padded_rects, color=clrs, labels=labels,
values=label_values, ax=ax, alpha=0.8,
acronyms=False)
if title:
ax.set_title(f"{related_disparity.replace('_', ' ').upper()}: {attribute_name.upper()}",
fontsize=23)
if not highlight_fairness:
# create dummy invisible image with a color map to leverage for color bar
img = plt.imshow([[0, 2]], cmap=aq_palette, alpha=0.8)
img.set_visible(False)
fig.colorbar(img, orientation="vertical", shrink=.96, ax=ax)
# Remove axes and display the plot
ax.axis('off')
def plot_fairness_group(self, fairness_table, group_metric, ax=None,
ax_lim=None, title=False, label_dict=None,
min_group_size=None):
'''
This function plots absolute group metrics as indicated by the config file,
colored based on calculated parity.
:param fairness_table: fairness table. Output of a Fairness.get_fairness
function.
:param group_metric: the fairness metric to plot. Must be a column in the fairness_table.
:param ax: a matplotlib Axis. If not passed a new figure will be created.
:param ax_lim: maximum value on x-axis, used to match axes across subplots
when plotting multiple metrics. Default is None.
:param title: whether to include a title in visualizations. Default is True.
:param label_dict: optional dictionary of replacement values for data.
Default is None.
:param min_group_size: minimum proportion of total group size (all data)
a population group must meet in order to be included in fairness
visualization
:return: A Matplotlib axis
'''
model_id = self._check_model_id(df=fairness_table, method_table_name='fairness_table')
if group_metric not in fairness_table.columns:
raise ValueError(f"Specified disparity metric {group_metric} not "
f"in 'fairness_table'.")
if fairness_table[group_metric].isnull().any():
raise ValueError(f"Cannot plot {group_metric}, has NaN values.")
if ax is None:
fig, ax = plt.subplots(figsize=(10, 5))
height_of_bar = 1
attributes = fairness_table.attribute_name.unique()
tick_indices = []
next_bar_height = 0
if min_group_size:
if min_group_size > (fairness_table.group_size.max() / fairness_table.group_size.sum()):
raise ValueError(f"'min_group_size' proportion specified: '{min_group_size}' "
f"is larger than all groups in sample.")
min_size = min_group_size * fairness_table.group_size.sum()
fairness_table = fairness_table.loc[fairness_table['group_size'] >= min_size]
label_position_values = collections.deque(fairness_table[group_metric].values)
# Lock absolute value metric plot x-axis to (0, 1)
if not ax_lim:
ax_lim = 1
ax.set_xlim(0, ax_lim)
for attribute in attributes:
attribute_data = fairness_table.loc[
fairness_table['attribute_name'] == attribute]
values = attribute_data[group_metric].values
grp_sizes = attribute_data['group_size'].values
# apply red for "False" fairness determinations and green for "True"
# determinations
cb_green = '#1b7837'
cb_red = '#a50026'
parity = self._metric_parity_mapping[group_metric]
parity_colors = [cb_green if val else
cb_red for val in attribute_data[parity]]
# Set white text for red bars and black text for green bars
label_colors = [(0, 0, 0, 1) if val == True else
(1, 1, 1, 1) for val in attribute_data[parity]]
attribute_indices = \
np.arange(next_bar_height, next_bar_height + attribute_data.shape[0],
step=height_of_bar)
attribute_tick_location = \
float((min(attribute_indices) + max(attribute_indices) +
height_of_bar)) / 2
h_attribute = ax.barh(attribute_indices,
width=values,
color=parity_colors,
align='edge', edgecolor='grey', alpha=0.8)
if label_dict:
labels = [label_dict.get(label, label) for label in
attribute_data['attribute_value'].values]
else:
labels = attribute_data['attribute_value'].values
for y, label, value, text_color, g_size in zip(
attribute_indices, labels, values, label_colors,
grp_sizes):
next_position = label_position_values.popleft()
group_label = f"{label} (Num: {g_size:,})"
if ax_lim < 3:
CHAR_PLACEHOLDER = 0.03
else:
CHAR_PLACEHOLDER = 0.25
label_length = len(group_label) * CHAR_PLACEHOLDER
max_val_length = 7 * CHAR_PLACEHOLDER
indent_length = ax_lim * 0.025
# bar long enough for label, enough space after bar for value
if ((indent_length + label_length) < (next_position - indent_length)) and (
(next_position + indent_length + max_val_length) < (
ax_lim - indent_length)):
ax.text(next_position + indent_length, y + float(height_of_bar) / 2,
f"{value:.2f}", fontsize=12, verticalalignment='top')
ax.text(indent_length, y + float(height_of_bar) / 2,
group_label, fontsize=11, verticalalignment='top',
color=text_color)
# case when bar too long for labels after bar, print all text in bar
elif (next_position + indent_length + max_val_length) > (
ax_lim - indent_length):
ax.text(indent_length, y + float(height_of_bar) / 2,
f"{group_label}, {value:.2f}", fontsize=11,
verticalalignment='top', color=text_color)
# case when bar too small for labels inside bar, print all text
# after bar
else:
ax.text(next_position + indent_length,
y + float(height_of_bar) / 2,
f"{group_label}, {value:.2f}", fontsize=12,
verticalalignment='top')
tick_indices.append((attribute, attribute_tick_location))
next_bar_height = max(attribute_indices) + 2 * height_of_bar
ax.yaxis.set_ticks(list(map(lambda x: x[1], tick_indices)))
ax.yaxis.set_ticklabels(list(map(lambda x: x[0], tick_indices)), fontsize=14)
ax.set_axisbelow(True)
ax.xaxis.grid(color='lightgray', which='major', linestyle='dashed')
ax.set_xlabel('Absolute Metric Magnitude')
if title:
ax.set_title(f"{group_metric.upper()}", fontsize=20)
return ax
def plot_fairness_disparity(self, fairness_table, group_metric,
attribute_name, ax=None, fig=None,
title=True, min_group_size=None,
significance_alpha=0.05):
"""
Plot disparity metrics colored based on calculated disparity.
:param group_metric: the metric to plot. Must be a column in the disparity_table.
:param attribute_name: which attribute to plot group_metric across.
:param ax: a matplotlib Axis. If not passed, a new figure will be created.
:param fig: a matplotlib Figure. If not passed, a new figure will be created.
:param title: whether to include a title in visualizations. Default is True.
:param min_group_size: minimum proportion of total group size (all data)
a population group must meet in order to be included in bias metric
visualization
:param significance_alpha: statistical significance level. Used to
determine visual representation of significance (number of
asterisks on treemap).
:return: A Matplotlib axis
"""
return self.plot_disparity(disparity_table=fairness_table,
group_metric=group_metric,
attribute_name=attribute_name,
color_mapping=None,
ax=ax, fig=fig, highlight_fairness=True,
min_group_size=min_group_size, title=title,
significance_alpha=significance_alpha)
def _plot_multiple(self, data_table, plot_fcn, metrics=None, fillzeros=True,
title=True, ncols=3, label_dict=None, show_figure=True,
min_group_size=None):
"""
This function plots bar charts of absolute metrics indicated by config
file
:param data_table: output of group.get_crosstabs, bias.get_disparity, or
fairness.get_fairness functions
:param plot_fcn: the single-metric plotting function to use for subplots
:param metrics: which metric(s) to plot, or 'all.' If this value is
null, will plot the following absolute metrics (or related disparity
measures):
- Predicted Prevalence (pprev),
- Predicted Positive Rate (ppr),
- False Discovery Rate (fdr),
- False Omission Rate (for),
- False Positive Rate (fpr),
- False Negative Rate (fnr)
:param fillzeros: Should null values be filled with zeros. Default is
True.
:param title: Whether to display a title on each plot. Default is True.
:param ncols: The number of subplots to plot per row visualization
figure.
Default is 3.
:param label_dict: Optional dictionary of label replacements. Default is
None.
:param show_figure: Whether to show figure (plt.show()). Default is
True.
:param min_group_size: Minimum proportion of total group size (all data)
a population group must meet in order to be included in visualization
:return: Returns a figure
"""
model_id = self._check_model_id(df=data_table, method_table_name='data_table')
if fillzeros:
data_table = data_table.fillna(0)
if plot_fcn in [self.plot_fairness_group, self.plot_group_metric]:
if not metrics:
metrics = \
[met for met in self.key_metrics if met in data_table.columns]
elif metrics == 'all':
all_abs_metrics = ('pprev', 'ppr', 'fdr', 'for', 'fpr', 'fnr',
'tpr', 'tnr', 'npv', 'precision')
metrics = \
[met for met in all_abs_metrics if met in data_table.columns]
ax_lim = 1
# elif plot_fcn in [self.plot_fairness_disparity, self.plot_disparity]:
else:
if not metrics:
metrics = \
[disp for disp in self.key_disparities if disp in data_table.columns]
elif metrics == 'all':
metrics = \
list(data_table.columns[data_table.columns.str.contains('_disparity')])
ax_lim = min(10, self._nearest_quartile(max(data_table[metrics].max())) + 0.1)
num_metrics = len(metrics)
rows = math.ceil(num_metrics / ncols)
if ncols == 1 or (num_metrics % ncols == 0):
axes_to_remove = 0
else:
axes_to_remove = ncols - (num_metrics % ncols)
if not (0 < rows <= num_metrics):
raise ValueError (
"Plot must have at least one row. Please update number of columns"
" ('ncols') or check that at least one metric is specified in "
"'metrics'.")
if not (0 < ncols <= num_metrics):
raise ValueError(
"Plot must have at least one column, and no more columns than "
"subplots. Please update number of columns ('ncols') or check "
"that at least one metric is specified in 'metrics'.")
total_plot_width = 25
fig, axs = plt.subplots(nrows=rows, ncols=ncols,
figsize=(total_plot_width, 6 * rows),
sharey=True,
gridspec_kw={'wspace': 0.075, 'hspace': 0.25})
# set a different metric to be plotted in each subplot
ax_col = 0
ax_row = 0
viz_title = \
f"{(', ').join(list(map(lambda x: x.upper(), metrics)))}"
for group_metric in metrics:
if (ax_col >= ncols) and ((ax_col + 1) % ncols) == 1:
ax_row += 1
ax_col = 0
if rows == 1:
current_subplot = axs[ax_col]
elif ncols == 1:
current_subplot = axs[ax_row]
ax_row += 1
else:
current_subplot = axs[ax_row, ax_col]
plot_fcn(data_table, group_metric=group_metric, ax=current_subplot,
ax_lim=ax_lim, title=title, label_dict=label_dict,
min_group_size=min_group_size)
ax_col += 1
# disable axes not being used
if axes_to_remove > 0:
for i in np.arange(axes_to_remove):
axs[-1, -(i + 1)].axis('off')
if show_figure:
plt.show()
if title:
plt.suptitle(f"{viz_title}", fontsize=25, fontweight="bold")
return fig
def _plot_multiple_treemaps(self, data_table, plot_fcn, attributes=None,
metrics=None, fillzeros=True, title=True,
label_dict=None, highlight_fairness=False,
show_figure=True, min_group_size=None,
significance_alpha=0.05):
"""
This function plots treemaps of disparities indicated by config file
:param data_table: output of bias.get_disparity, or fairness.get_fairness
functions
:param plot_fcn: Plotting function to use to plot individual disparity
or fairness treemaps in grid
:param attributes: which attributes to plot against. Must be specified
if no metrics specified.
:param metrics: which metric(s) to plot, or 'all.' MUST be specified if
no attributes specified. If this value is null, the following
absolute metrics/ related disparity measures will be plotted against
specified attributes:
- Predicted Prevalence (pprev),
- Predicted Positive Rate (ppr),
- False Discovery Rate (fdr),
- False Omission Rate (for),
- False Positive Rate (fpr),
- False Negative Rate (fnr)
:param fillzeros: Whether null values should be filled with zeros. Default
is True.
:param title: Whether to display a title on each plot. Default is True.
:param label_dict: Optional dictionary of label replacements. Default is
None.
:param highlight_fairness: Whether to highlight treemaps by disparity
magnitude, or by related fairness determination.
:param show_figure: Whether to show figure (plt.show()). Default is True.
:param min_group_size: Minimum proportion of total group size (all data)
a population group must meet in order to be included in visualization
:param significance_alpha: statistical significance level. Used to
determine visual representation of significance (number of
asterisks on treemap).
:return: Returns a figure
"""
model_id = self._check_model_id(df=data_table, method_table_name='data_table')
if fillzeros:
data_table = data_table.fillna(0)
if all(v is None for v in [attributes, metrics]):
raise ValueError("One of the following parameters must be specified: " \
"'attribute', 'metrics'.")
if attributes:
if not metrics:
metrics = [abs_m for abs_m in self.key_metrics if
abs_m in data_table.columns]
# metrics = list(set(self.input_group_metrics) &
# set(data_table.columns))
elif metrics == 'all':
all_abs_metrics = ['tpr_disparity', 'tnr_disparity', 'for_disparity',
'fdr_disparity', 'fpr_disparity', 'fnr_disparity',
'npv_disparity', 'precision_disparity',
'ppr_disparity', 'pprev_disparity']
metrics = \
[abs_m for abs_m in all_abs_metrics if abs_m in data_table.columns]
viz_title = \
f"DISPARITY METRICS BY {(', ').join(list(map(lambda x:x.upper(), attributes)))}"
elif not attributes:
attributes = list(data_table.attribute_name.unique())
if metrics == 'all':
all_disparities = ['tpr_disparity', 'tnr_disparity', 'for_disparity',
'fdr_disparity', 'fpr_disparity', 'fnr_disparity',
'npv_disparity', 'precision_disparity',
'ppr_disparity', 'pprev_disparity']
metrics = [disparity for disparity in all_disparities if
disparity in data_table.columns]
viz_title = f"{(', ').join(map(lambda x:x.upper(), metrics))} " \
f"ACROSS ATTRIBUTES"
num_metrics = len(attributes) * len(metrics)
if num_metrics > 2:
ncols = 3
else:
ncols = num_metrics
rows = math.ceil(num_metrics / ncols)
if ncols == 1 or (num_metrics % ncols == 0):
axes_to_remove = 0
else:
axes_to_remove = ncols - (num_metrics % ncols)
if not (0 < rows <= num_metrics):
raise ValueError (
"Plot must have at least one row. Please update number of columns"
" ('ncols'), the list of metrics to be plotted ('metrics'), or "
"the list of attributes to plot disparity metrics across.")
if not (0 < ncols <= num_metrics):
raise ValueError(
"Plot must have at least one column, and no more columns than "
"plots. Please update number of columns ('ncols'), the list of "
"metrics to be plotted ('metrics'), or the list of attributes to "
"plot disparity metrics across.")
total_plot_width = 25
fig, axs = plt.subplots(nrows=rows, ncols=ncols,
figsize=(total_plot_width, 8 * rows),
gridspec_kw={'wspace': 0.025, 'hspace': 0.5},
subplot_kw={'aspect': 'equal'})
if highlight_fairness:
mapping = None
else:
aq_palette = sns.diverging_palette(225, 35, sep=10, as_cmap=True)
norm = matplotlib.colors.Normalize(vmin=0, vmax=2)
mapping = matplotlib.cm.ScalarMappable(norm=norm, cmap=aq_palette)
# set a different metric to be plotted in each subplot
ax_col = 0
ax_row = 0
for group_metric in metrics:
for attr in attributes:
if (ax_col >= ncols) and ((ax_col + 1) % ncols) == 1:
ax_row += 1
ax_col = 0
if num_metrics == 1:
current_subplot = axs
elif (num_metrics > 1) and (rows == 1):
current_subplot = axs[ax_col]
elif (num_metrics > 1) and (ncols == 1):
current_subplot = axs[ax_row]
ax_row += 1
else:
current_subplot = axs[ax_row, ax_col]
plot_fcn(data_table, group_metric=group_metric,
attribute_name=attr, color_mapping=mapping,
ax=current_subplot, fig=fig, title=title,
label_dict=label_dict,
highlight_fairness=highlight_fairness,
min_group_size=min_group_size, significance_alpha=significance_alpha)
ax_col += 1
# disable axes not being used
if axes_to_remove > 0:
for i in np.arange(axes_to_remove):
axs[-1, -(i + 1)].axis('off')
plt.suptitle(f"{viz_title}", fontsize=25, fontweight="bold")
# fig.tight_layout()
if rows > 2:
fig.subplots_adjust(top=0.95)
else:
fig.subplots_adjust(top=0.90)
if show_figure:
plt.show()
return fig
def plot_group_metric_all(self, data_table, metrics=None, fillzeros=True,
ncols=3, title=True, label_dict=None,
show_figure=True, min_group_size=None):
"""
Plot multiple metrics at once from a fairness object table.
:param data_table: output of group.get_crosstabs, bias.get_disparity, or
fairness.get_fairness functions.
:param metrics: which metric(s) to plot, or 'all.'
If this value is null, will plot:
- Predicted Prevalence (pprev),
- Predicted Positive Rate (ppr),
- False Discovery Rate (fdr),
- False Omission Rate (for),
- False Positive Rate (fpr),
- False Negative Rate (fnr)
:param fillzeros: whether to fill null values with zeros. Default is
True.
:param ncols: number of subplots per row in figure. Default is 3.
:param title: whether to display a title on each plot. Default is True.
:param label_dict: optional dictionary of label replacements. Default is None.
:param show_figure: whether to show figure (plt.show()). Default is True.
:param min_group_size: minimum proportion of total group size (all data)
a population group must meet in order to be included in group metric
visualization.
:return: A Matplotlib figure
"""
return self._plot_multiple(
data_table, plot_fcn=self.plot_group_metric, metrics=metrics,
fillzeros=fillzeros, title=title, ncols=ncols, label_dict=label_dict,
show_figure=show_figure, min_group_size=min_group_size)
def plot_disparity_all(self, data_table, attributes=None, metrics=None,
fillzeros=True, title=True, label_dict=None,
show_figure=True, min_group_size=None,
significance_alpha=0.05):
"""
Plot multiple metrics at once from a fairness object table.
:param data_table: output of group.get_crosstabs, bias.get_disparity, or
fairness.get_fairness functions.
:param attributes: which attribute(s) to plot metrics for. If this
value is null, will plot metrics against all attributes.
:param metrics: which metric(s) to plot, or 'all.'
If this value is null, will plot:
- Predicted Prevalence Disparity (pprev_disparity),
- Predicted Positive Rate Disparity (ppr_disparity),
- False Discovery Rate Disparity (fdr_disparity),
- False Omission Rate Disparity (for_disparity),
- False Positive Rate Disparity (fpr_disparity),
- False Negative Rate Disparity (fnr_disparity)
:param fillzeros: whether to fill null values with zeros. Default is True.
:param title: whether to display a title on each plot. Default is True.
:param label_dict: optional dictionary of label replacements. Default is
None.
:param show_figure: whether to show figure (plt.show()). Default is True.
:param min_group_size: minimum proportion of total group size (all data)
a population group must meet in order to be included in metric
visualization.
:param significance_alpha: statistical significance level. Used to
determine visual representation of significance (number of
asterisks on treemap).
:return: A Matplotlib figure
"""
return self._plot_multiple_treemaps(
data_table, plot_fcn=self.plot_disparity, attributes=attributes,
metrics=metrics, fillzeros=fillzeros, label_dict=label_dict,
highlight_fairness=False, show_figure=show_figure, title=title,
min_group_size=min_group_size, significance_alpha=significance_alpha)
def plot_fairness_group_all(self, fairness_table, metrics=None, fillzeros=True,
ncols=3, title=True, label_dict=None,
show_figure=True, min_group_size=None):
"""
Plot multiple metrics at once from a fairness object table.
:param fairness_table: fairness table. Output of a Fairness.get_fairness_
function.
:param metrics: which metric(s) to plot, or 'all.'
If this value is null, will plot:
- Predicted Prevalence (pprev),
- Predicted Positive Rate (ppr),
- False Discovery Rate (fdr),
- False Omission Rate (for),
- False Positive Rate (fpr),
- False Negative Rate (fnr)
:param fillzeros: whether to fill null values with zeros. Default is True.
:param ncols: number of subplots per row in figure. Default is 3.
:param title: whether to display a title on each plot. Default is True.
:param label_dict: optional dictionary of label replacements. Default is
None.
:param show_figure: whether to show figure (plt.show()). Default is True.
:param min_group_size: minimum proportion of total group size (all data).
a population group must meet in order to be included in fairness
visualization
:return: A Matplotlib figure
"""
return self._plot_multiple(
fairness_table, plot_fcn=self.plot_fairness_group, metrics=metrics,
fillzeros=fillzeros, title=title, ncols=ncols, label_dict=label_dict,
show_figure=show_figure, min_group_size=min_group_size)
def plot_fairness_disparity_all(self, fairness_table, attributes=None,
metrics=None, fillzeros=True, title=True,
label_dict=None, show_figure=True,
min_group_size=None, significance_alpha=0.05):
"""
Plot multiple metrics at once from a fairness object table.
:param fairness_table: a fairness table. Output of a Fairness.get_fairness
function.
:param attributes: which attribute(s) to plot metrics for. If this value is null, will plot metrics against all attributes.
:param metrics: which metric(s) to plot, or 'all.'
If this value is null, will plot:
- Predicted Prevalence Disparity (pprev_disparity),
- Predicted Positive Rate Disparity (ppr_disparity),
- False Discovery Rate Disparity (fdr_disparity),
- False Omission Rate Disparity (for_disparity),
- False Positive Rate Disparity (fpr_disparity),
- False Negative Rate Disparity (fnr_disparity)
:param fillzeros: whether to fill null values with zeros. Default is True.
:param title: whether to display a title on each plot. Default is True.
:param label_dict: optional dictionary of label replacements. Default is
None.
:param show_figure: whether to show figure (plt.show()). Default is True.
:param min_group_size: minimum proportion of total group size (all data)
a population group must meet in order to be included in fairness
visualization
:param significance_alpha: statistical significance level. Used to
determine visual representation of significance (number of
asterisks on treemap)
:return: A Matplotlib figure
"""
return self._plot_multiple_treemaps(
fairness_table, plot_fcn=self.plot_disparity, attributes=attributes,
metrics=metrics, fillzeros=fillzeros, label_dict=label_dict,
title=title, highlight_fairness=True, show_figure=show_figure,
min_group_size=min_group_size, significance_alpha=significance_alpha)
def multimodel_attribute_comparison(self, disparity_table, attribute, x_metric, y_metric='precision',
x_jitter=None, y_jitter=None, selected_models=None, ncols=3,
scatter_kws={'legend': 'full'}, title=True, sharey=True,
show_figure=True):
"""
:param disparity_table: disparity table. output of bias.get_disparity, or
fairness.get_fairness function.
:param attribute: attributes: which attribute values (sample groups) to plot x and y metrics for.
:param x_metric: the metric to plot on the X axis. Must be a column in the disparity_table.
:param y_metric: the metric to plot on the Y axis. Must be a column in the disparity_table.
:param x_jitter: jitter for x values. Default is None.
:param y_jitter: jitter for y values. Default is None.
:param selected_models: which models to visualize. Default is all models in disparity_table.
:param ncols: The number of subplots to plot per row visualization
figure.
Default is 3.
:param scatter_kws: keyword arguments for scatterplot
:param title: whether to include a title in visualizations. Default is True.
:param sharey: whether comparison subplots should share Y axis. Default is True
:param show_figure: whether to show figure (plt.show()). Default is True.
:return: A Matplotlib figure
"""
df_models = self._check_multiple_models(disparity_table, method_table_name='disparity_table')
if not selected_models:
selected_models = df_models
attribute_table = disparity_table.loc[(disparity_table['attribute_name']==attribute) & (disparity_table['model_id'].isin(selected_models))]
groups = attribute_table.attribute_value.unique()
num_metrics = len(groups)
total_plot_width = 25
fig, axs, rows, axes_to_remove = self.generate_axes(ncols=ncols, num_metrics=num_metrics,
total_plot_width=total_plot_width,
sharey=sharey, hspace=0.5,
indiv_height=6)
# set a different distribution to be plotted in each subplot
ax_col = -1
ax_row = 0
col_num = 0
viz_title = \
f"MODEL COMPARISON: {x_metric.replace('_', ' ').upper()} BY {y_metric.replace('_', ' ').upper()} " \
f"ACROSS {attribute.replace('_', ' ').upper()}"
aq_palette = sns.diverging_palette(225, 35, sep=10, n=20, as_cmap=True, center="dark")
for group in groups:
# subset df to get only that attribute, no need to aggregate
group_table = attribute_table.loc[attribute_table['attribute_value'] == group]
if group_table.loc[group_table['attribute_value'] == group, x_metric].isnull().all():
logging.warning(f"Cannot plot metric '{x_metric}' for group '{group}', only NaN values."
f" Continuing with remaining groups.")
axes_to_remove += 1
continue
elif group_table.loc[group_table['attribute_value'] == group, y_metric].isnull().all():
logging.warning(f"Cannot plot metric '{y_metric}' for group '{group}', only NaN values. "
f"Continuing with remaining groups.")
axes_to_remove += 1
continue
current_subplot, ax_row, ax_col = self.iterate_subplots(axs, ncols, rows, ax_col, ax_row)
with sns.axes_style("whitegrid"):
# scatterplot of each model for that atttibute value group
sns.scatterplot(x=x_metric, y=y_metric, data=group_table, hue='model_id', palette=aq_palette,
x_jitter=x_jitter, y_jitter=y_jitter, ax=current_subplot, **scatter_kws)
current_subplot.xaxis.grid(color='lightgray', which='major')
current_subplot.yaxis.grid(color='lightgray', which='major')
labels = [item.get_text().replace('_', ' ').upper() for item in current_subplot.get_xticklabels()]
if '' not in labels:
current_subplot.set_xticklabels(labels, rotation=30, ha='center')
else:
plt.xticks(rotation=30, horizontalalignment='center')
x_clean = x_metric.replace('_', ' ').upper()
y_clean = y_metric.replace('_', ' ').upper()
current_subplot.set_xlabel(x_clean, fontsize=12)
current_subplot.set_ylabel(y_clean, fontsize=12)
handles, labels = current_subplot.get_legend_handles_labels()
current_subplot.legend(handles=handles[1:], labels=[f"Model {model}" for model in labels[1:]], title="Model ID")
plot_title = f"MODEL COMPARISON:\n{y_clean} BY {x_clean} ({attribute.replace('_',' ').upper()}: {group.replace('_',' ').upper()})"
current_subplot.set_title(plot_title, fontsize=20)
# current_subplot.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
col_num += 1
# disable axes not being used
if axes_to_remove > 0:
for i in np.arange(axes_to_remove):
axs[-1, -(i + 1)].axis('off')
if title:
plt.suptitle(f"{viz_title}", fontsize=25, fontweight="bold")
if show_figure:
plt.show()
return fig
def multimodel_comparison(self, disparity_table, x_metric, y_metric='precision',
x_agg_method='mean', y_agg_method='mean', title=True,
x_jitter=None, y_jitter=None, selected_models=None,
ax=None, scatter_kws={'legend': 'full'}, show_figure=True):
"""
Compare two absolute bias metrics or bias metric disparities across models.
:param disparity_table: disparity table. output of bias.get_disparity, or
fairness.get_fairness function.
:param x_metric: the metric to plot on the X axis. Must be a column in the disparity_table.
:param y_metric: the metric to plot on the Y axis. Must be a column in the disparity_table.
:param x_agg_method: Method to aggregate metric values for X axis. Options: 'mean', 'median', 'max', 'min'. Default is 'mean'. For absolute metrics, 'mean' aggregation is a weighted average by group size.
:param y_agg_method: Method to aggregate metric values for Y axis. Options: 'mean', 'median', 'max', 'min'. Default is 'mean'. For absolute metrics, 'mean' aggregation is a weighted average by group size.
:param title: whether to include a title in visualizations. Default is True.
:param x_jitter: jitter for x values. Default is None.
:param y_jitter: jitter for y values. Default is None.
:param selected_models: which models to visualize. Default is all models in disparity_table.
:param ax: a matplotlib Axis. If not passed, a new figure will be created.
:param scatter_kws: keyword arguments for scatterplot
:param show_figure: whether to show figure (plt.show()). Default is True.
:return: A Matplotlib axis
"""
df_models = self._check_multiple_models(disparity_table, method_table_name='disparities_table')
if not selected_models:
selected_models = df_models
plot_table = disparity_table.loc[disparity_table['model_id'].isin(selected_models)]
# requirement: at least two model_id values
if len(selected_models) < 2:
raise ValueError("This method requires at least two distinct 'model_id' values "
"in the disparities table. Tip: check that "
"disparities_table.model_id.unique() returns more than one element.")
# must be valid metric
if x_metric not in plot_table.columns:
raise ValueError(
f"Absolute metric '{x_metric}' is not included in disparities_table.")
if y_metric not in plot_table.columns:
raise ValueError(
f"Disparity metric '{y_metric}' is not included in disparities_table.")
# must be valid aggregation method
if (x_agg_method not in ('mean', 'median', 'max', 'min')) or (y_agg_method not in ('mean', 'median', 'max', 'min')):
raise ValueError(
"Aggregation methods 'x_agg_method' and 'y_agg_method' must "
"take one of the following values: 'mean', 'median', 'max', 'min'.")
# should never really have NaNs for one model but not another, but handling JIC
get_indices = lambda x: ~np.isnan(x)
get_weights = lambda x: plot_table.loc[x.index, "group_size"]
wtd_mean = lambda x: (np.average(x[get_indices(x)], axis=0, weights=get_weights(x)[get_indices(x)]))
if x_agg_method == "mean":
if "_disparity" not in x_metric:
x_agg_method = wtd_mean
if y_agg_method == "mean":
if "_disparity" not in y_metric:
y_agg_method = wtd_mean
collected_df = plot_table.groupby('model_id', as_index=False).agg({x_metric: x_agg_method, y_metric:y_agg_method})
if ax is None:
fig, ax = plt.subplots(figsize=(8, 5))
aq_palette = sns.diverging_palette(225, 35, sep=10, n=40, as_cmap=True, center="dark")
with sns.axes_style("whitegrid"):
ax = sns.scatterplot(x=x_metric, y=y_metric, data=collected_df, hue='model_id',
x_jitter=x_jitter, y_jitter=y_jitter, palette=aq_palette,
alpha=0.75, **scatter_kws)
ax.xaxis.grid(color='lightgray', which='major')
ax.yaxis.grid(color='lightgray', which='major')
labels = [item.get_text().replace('_', ' ').upper() for item in ax.get_xticklabels()]
if '' not in labels:
ax.set_xticklabels(labels, rotation=30, ha='center')
else:
plt.xticks(rotation=30, horizontalalignment='center')
x_clean = x_metric.replace('_', ' ').upper()
y_clean = y_metric.replace('_', ' ').upper()
ax.set_xlabel(x_clean, fontsize=12)
ax.set_ylabel(y_clean, fontsize=12)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[1:], labels=[f"Model {model}" for model in labels[1:]], title="Model ID")
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if title:
plot_title = f"MODEL COMPARISON: {y_clean} BY {x_clean}"
ax.set_title(plot_title, fontsize=20)
if show_figure:
plt.show()
else:
return ax
| 45.237163 | 212 | 0.593519 |
382b9578d66961ed4369fb6bf9a30df328d21003 | 3,115 | py | Python | qa/rpc-tests/invalidateblock.py | mirzaei-ce/core-shahbit | 57ad738667b3d458c92d94aee713c184d911c537 | [
"MIT"
] | null | null | null | qa/rpc-tests/invalidateblock.py | mirzaei-ce/core-shahbit | 57ad738667b3d458c92d94aee713c184d911c537 | [
"MIT"
] | null | null | null | qa/rpc-tests/invalidateblock.py | mirzaei-ce/core-shahbit | 57ad738667b3d458c92d94aee713c184d911c537 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework.test_framework import ShahbitTestFramework
from test_framework.util import *
class InvalidateTest(ShahbitTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:"
print "Mine 4 blocks on Node 0"
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print "Mine competing 6 blocks on Node 1"
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
print "Connect nodes to force a reorg"
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain"
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print "\nMake sure we won't reorg to a lower work chain:"
connect_nodes_bi(self.nodes,1,2)
print "Sync node 2 to node 1 so both have 6 blocks"
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print "Invalidate block 5 on node 1 so its tip is now at 4"
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print "Invalidate block 3 on node 2, so its tip is now 2"
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print "..and then mine a block"
self.nodes[2].generate(1)
print "Verify all nodes are at the right height"
time.sleep(5)
for i in xrange(3):
print i,self.nodes[i].getblockcount()
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| 40.986842 | 95 | 0.646549 |
5eb84b139256fbc9fc91bfe1abd024934c0b1897 | 387 | py | Python | plgnd/wsgi.py | Griperis/django-heroku-deployment | 1a47b83d62e0ae11eb7bc76d05bea7efe28e7636 | [
"MIT"
] | null | null | null | plgnd/wsgi.py | Griperis/django-heroku-deployment | 1a47b83d62e0ae11eb7bc76d05bea7efe28e7636 | [
"MIT"
] | null | null | null | plgnd/wsgi.py | Griperis/django-heroku-deployment | 1a47b83d62e0ae11eb7bc76d05bea7efe28e7636 | [
"MIT"
] | null | null | null | """
WSGI config for plgnd project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plgnd.settings')
application = get_wsgi_application()
| 22.764706 | 78 | 0.782946 |
11a92d3ef8eb74714bdbe7cd0e22766b9c76469e | 7,844 | py | Python | docs/source/conf.py | stengaard/trousseau | 9f9c920f7eb4feb958946b9f7a89a28b9c622669 | [
"MIT"
] | 1 | 2017-08-13T00:16:35.000Z | 2017-08-13T00:16:35.000Z | docs/source/conf.py | stengaard/trousseau | 9f9c920f7eb4feb958946b9f7a89a28b9c622669 | [
"MIT"
] | null | null | null | docs/source/conf.py | stengaard/trousseau | 9f9c920f7eb4feb958946b9f7a89a28b9c622669 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# trousseau documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 18 12:47:01 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'trousseau'
copyright = u'2014, Oleiade'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'trousseaudoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'trousseau.tex', u'trousseau Documentation',
u'Oleiade', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'trousseau', u'trousseau Documentation',
[u'Oleiade'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'trousseau', u'trousseau Documentation',
u'Oleiade', 'trousseau', 'Networked and encrypted key-value database.',
'Database'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Flask theme
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'flask'
| 31.502008 | 80 | 0.714049 |
0d77b5eace18bd5911cc05ed35f899fa62a9e209 | 48,878 | py | Python | haros/metamodel.py | ipa-nhg/haros | ed657653b07418d9f93915d66fb201e7a1762602 | [
"MIT"
] | null | null | null | haros/metamodel.py | ipa-nhg/haros | ed657653b07418d9f93915d66fb201e7a1762602 | [
"MIT"
] | null | null | null | haros/metamodel.py | ipa-nhg/haros | ed657653b07418d9f93915d66fb201e7a1762602 | [
"MIT"
] | null | null | null |
#Copyright (c) 2017 Andre Santos
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Imports
###############################################################################
from collections import Counter
import os
import magic as file_cmd
###############################################################################
# Notes
###############################################################################
# It seems that it is better to store string names for source objects, instead
# of RosName. If necessary, store namespace also as a string. Source objects
# will be used as generators for runtime objects, so we must have access to
# the values provided originally.
# Source objects define various types of dependencies (files, packages, etc.).
# The objects exist, but only work properly if the dependencies are met.
# Runtime objects do not really have dependencies (only the node instances
# depend on the node executable). Instead, they have conditions, boolean
# expressions that determine whether the objects will be instantiated.
###############################################################################
# Base Metamodel Object
###############################################################################
class MetamodelObject(object):
pass
###############################################################################
# Analysis Properties
###############################################################################
class DependencySet(object):
def __init__(self):
self.files = set()
self.packages = set()
self.arguments = set()
self.environment = set()
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
if not isinstance(self, other.__class__):
return False
return (self.files == other.files
and self.packages == other.packages
and self.arguments == other.arguments
and self.environment == other.environment)
def __ne__(self, other):
return not self.__eq__(other)
class SourceCondition(object):
def __init__(self, condition, location = None):
self.condition = condition
self.location = location
@property
def language(self):
if self.location and self.location.file:
return self.location.file.language
return "unknown"
def to_JSON_object(self):
return {
"condition": str(self.condition),
"location": (self.location.to_JSON_object()
if self.location else None)
}
def __str__(self):
return str(self.condition)
def __repr__(self):
return self.__str__()
class RosPrimitiveCall(MetamodelObject):
""""Base class for calls to ROS primitives."""
def __init__(self, name, namespace, msg_type, control_depth = None,
repeats = False, conditions = None, location = None):
self.name = name
self.namespace = namespace
self.type = msg_type
self.conditions = conditions if not conditions is None else []
self.control_depth = control_depth or len(self.conditions)
self.repeats = repeats and self.control_depth >= 1
self.location = location
def to_JSON_object(self):
return {
"name": self.name,
"namespace": self.namespace,
"type": self.type,
"depth": self.control_depth,
"repeats": self.repeats,
"conditions": [c.to_JSON_object() for c in self.conditions],
"location": (self.location.to_JSON_object()
if self.location else None)
}
def __str__(self):
return "RosPrimitiveCall({}, {}, {}) {} (depth {})".format(
self.name, self.namespace, self.type,
self.location, self.control_depth
)
def __repr__(self):
return self.__str__()
class Publication(RosPrimitiveCall):
def __init__(self, name, namespace, msg_type, queue_size,
control_depth = None, repeats = False, conditions = None,
location = None):
RosPrimitiveCall.__init__(self, name, namespace, msg_type,
control_depth = control_depth,
repeats = repeats,
conditions = conditions, location = location)
self.queue_size = queue_size
def to_JSON_object(self):
data = RosPrimitiveCall.to_JSON_object(self)
data["queue"] = self.queue_size
return data
class Subscription(RosPrimitiveCall):
def __init__(self, name, namespace, msg_type, queue_size,
control_depth = None, repeats = False, conditions = None,
location = None):
RosPrimitiveCall.__init__(self, name, namespace, msg_type,
control_depth = control_depth,
repeats = repeats,
conditions = conditions, location = location)
self.queue_size = queue_size
def to_JSON_object(self):
data = RosPrimitiveCall.to_JSON_object(self)
data["queue"] = self.queue_size
return data
class ServiceServerCall(RosPrimitiveCall):
pass
class ServiceClientCall(RosPrimitiveCall):
pass
class ReadParameterCall(RosPrimitiveCall):
pass
class WriteParameterCall(RosPrimitiveCall):
pass
###############################################################################
# Source Code Structures
###############################################################################
class Person(object):
"""Represents a person (author/maintainer)."""
def __init__(self, name, email = "email@example.com"):
self.id = email
self.name = name or "Unknown"
self.email = email
def __eq__(self, other):
if not isinstance(self, other.__class__):
return False
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.id.__hash__()
class Location(object):
"""A location to report (package, file, line)."""
def __init__(self, pkg, file = None, line = None, fun = None, cls = None):
self.package = pkg
self.file = file
self.line = line
self.function = fun
self.class_ = cls
@property
def largest_scope(self):
return self.package
@property
def smallest_scope(self):
return self.file or self.package
def to_JSON_object(self):
return {
"package": self.package.name,
"file": self.file.full_name if self.file else None,
"line": self.line,
"function": self.function,
"class": self.class_
}
def __str__(self):
s = "in " + self.package.name
if not self.file:
return s
s += "/" + self.file.full_name
if not self.line is None:
s += ":" + str(self.line)
if self.function:
s += ", in function " + self.function
if self.class_:
s += ", in class " + self.class_
return s
class SourceObject(MetamodelObject):
"""Base class for objects subject to analysis."""
SCOPES = ("file", "node", "package", "repository", "project")
def __init__(self, id, name):
self.id = id
self.name = name
self.dependencies = DependencySet()
self._analyse = True
@property
def location(self):
return None
def __lt__(self, scope):
if isinstance(scope, basestring):
return self.SCOPES.index(self.scope) < self.SCOPES.index(scope)
return self.SCOPES.index(self.scope) < self.SCOPES.index(scope.scope)
def __le__(self, scope):
if isinstance(scope, basestring):
return self.SCOPES.index(self.scope) <= self.SCOPES.index(scope)
return self.SCOPES.index(self.scope) <= self.SCOPES.index(scope.scope)
def __gt__(self, scope):
if isinstance(scope, basestring):
return self.SCOPES.index(self.scope) > self.SCOPES.index(scope)
return self.SCOPES.index(self.scope) > self.SCOPES.index(scope.scope)
def __ge__(self, scope):
if isinstance(scope, basestring):
return self.SCOPES.index(self.scope) >= self.SCOPES.index(scope)
return self.SCOPES.index(self.scope) >= self.SCOPES.index(scope.scope)
def __eq__(self, other):
if not isinstance(self, other.__class__):
return False
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.id.__hash__()
def bound_to(self, scope):
return self == scope
def accepts_scope(self, scope):
return self.scope == scope
def __str__(self):
return self.id
class SourceFile(SourceObject):
"""Represents a source code file."""
CPP = ('c source', 'c++ source')
PYTHON = 'python script'
PKG_XML = 'package.xml'
LAUNCH = ('.launch', '.launch.xml')
MSG = '.msg'
SRV = '.srv'
ACTION = '.action'
YAML = ('.yaml', '.yml')
CMAKELISTS = 'CMakeLists.txt'
def __init__(self, name, directory, pkg):
id = ("file:" + pkg.name + "/" + directory.replace(os.path.sep, "/")
+ "/" + name)
SourceObject.__init__(self, id, name)
self.directory = directory
self.full_name = os.path.join(directory, name)
self.dir_path = os.path.join(pkg.path, directory)
self.path = os.path.join(pkg.path, directory, name)
self.package = pkg
self.language = self._get_language()
self.tree = None
self.size = 0
self.lines = 0
self.sloc = 0
self.timestamp = 0
@property
def scope(self):
return "file"
@property
def location(self):
return Location(self.package, file = self)
def bound_to(self, other):
if other.scope == "node":
return self in other.source_files
if other.scope == "package":
return self.package == other
if other.scope == "repository" or other.scope == "project":
return self.package in other.packages
return other.scope == "file" and self == other
def accepts_scope(self, scope):
return self >= scope
def set_file_stats(self):
self.size = os.path.getsize(self.path)
self.timestamp = os.path.getmtime(self.path)
self.lines = 0
self.sloc = 0
ignore_all = []
to_ignore = {"*": ignore_all}
ilp, inlp = self._ignore_parsers()
with open(self.path, "r") as handle:
for line in handle:
self.lines += 1
sline = line.strip()
if sline:
self.sloc += 1
if ilp(sline):
ignore_all.append(self.lines)
elif inlp(sline):
ignore_all.append(self.lines + 1)
return to_ignore
def to_JSON_object(self):
return {
"name": self.name,
"directory": self.directory,
"package": self.package.name,
"language": self.language,
"size": self.size,
"timestamp": self.timestamp,
"lines": self.lines,
"sloc": self.sloc
}
def _get_language(self):
file_type = file_cmd.from_file(self.path).lower()
if file_type.startswith(self.CPP):
return 'cpp'
if self.PYTHON in file_type:
return 'python'
if self.name.endswith(self.LAUNCH):
return 'launch'
if self.name == self.PKG_XML:
return 'package'
if self.name.endswith(self.MSG):
return 'msg'
if self.name.endswith(self.SRV):
return 'srv'
if self.name.endswith(self.ACTION):
return 'action'
if self.name.endswith(self.YAML):
return 'yaml'
if self.name == self.CMAKELISTS:
return 'cmake'
return 'unknown'
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.id
def _ignore_parsers(self):
if self.language == "cpp":
return (_cpp_ignore_line, _cpp_ignore_next_line)
elif self.language == "python":
return (_py_ignore_line, _py_ignore_next_line)
return (_no_parser, _no_parser)
class Package(SourceObject):
"""Represents a ROS package."""
def __init__(self, name, repo = None, proj = None):
SourceObject.__init__(self, "package:" + name, name)
# public:
self.project = proj
self.repository = repo
self.authors = set()
self.maintainers = set()
self.is_metapackage = False
self.description = ""
self.version = "0.0.0"
self.licenses = set()
self.website = None
self.vcs_url = None
self.bug_url = None
self.path = None
self.source_files = []
self.nodes = []
self.size = 0 # sum of file sizes
self.lines = 0 # sum of physical file lines
self.sloc = 0 # sum of file source lines of code
self.topological_tier = 0
@property
def scope(self):
return "package"
@property
def location(self):
return Location(self)
@property
def file_count(self):
return len(self.source_files)
def bound_to(self, other):
if other.scope == "file" or other.scope == "node":
return other.package == self
if other.scope == "repository":
return self.repository == other
if other.scope == "project":
return self.project == other
if other.scope == "package":
if self == other:
return True
for dep in self.dependencies.packages:
if dep.type == "package" and dep.value == other.name:
return True
return False
def to_JSON_object(self):
return {
"name": self.name,
"metapackage": self.is_metapackage,
"description": self.description,
"version": self.version,
"wiki": self.website,
"repository": self.vcs_url,
"bugTracker": self.bug_url,
"authors": [person.name for person in self.authors],
"maintainers": [person.name for person in self.maintainers],
"dependencies": [pkg for pkg in self.dependencies.packages],
"files": [f.full_name for f in self.files],
"nodes": [n.node_name for n in self.nodes]
}
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.id
class Repository(SourceObject):
"""Represents a source code repository."""
def __init__(self, name, vcs = None, url = None, version = None,
status = None, path = None, proj = None):
SourceObject.__init__(self, "repository:" + name, name)
self.project = None
self.vcs = vcs
self.url = url
self.version = version
self.status = status
self.path = path
self.packages = []
self.declared_packages = []
self.commits = 1
self.contributors = 1
@property
def scope(self):
return "repository"
def bound_to(self, other):
if other.scope == "package":
return other.repository == self
if other.scope == "file" or other.scope == "node":
return other.package in self.packages
if other.scope == "project":
if self.project == other:
return True
for package in self.packages:
if not package in other.packages:
return False
return True
return other.scope == "repository" and self == other
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.id
class Project(SourceObject):
"""A project is a custom grouping of packages, not necessarily
corresponding to a repository, and not even requiring the
existence of one.
"""
def __init__(self, name):
if name == "all":
raise ValueError("Forbidden project name: all")
SourceObject.__init__(self, "project:" + name, name)
self.packages = []
self.repositories = []
self.configurations = []
@property
def scope(self):
return "project"
def bound_to(self, other):
if other.scope == "package":
return other.project == self
if other.scope == "file" or other.scope == "node":
return other.package in self.packages
if other.scope == "repository":
if other.project == self:
return True
for package in other.packages:
if not package in self.packages:
return False
return True
return other.scope == "project" and self == other
def to_JSON_object(self):
return {
"id": self.name,
"packages": [pkg.name for pkg in self.packages],
"repositories": [repo.name for repo in self.repositories]
}
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.id
class Node(SourceObject):
def __init__(self, name, pkg, rosname = None, nodelet = None):
id = "node:" + pkg.name + "/" + (nodelet or name)
SourceObject.__init__(self, id, name)
self.package = pkg
self.rosname = rosname
self.nodelet_class = nodelet
self.source_files = []
self.source_tree = None
self.instances = []
self.advertise = []
self.subscribe = []
self.service = []
self.client = []
self.read_param = []
self.write_param = []
@property
def scope(self):
return "node"
@property
def location(self):
return Location(self.package)
@property
def is_nodelet(self):
return not self.nodelet_class is None
@property
def language(self):
for sf in self.source_files:
return sf.language
return None
@property
def node_name(self):
return self.package.name + "/" + (self.nodelet_class or self.name)
@property
def timestamp(self):
return max([f.timestamp for f in self.source_files] or [0])
def to_JSON_object(self):
return {
"id": self.node_name,
"name": self.name,
"package": self.package.name,
"rosname": self.rosname,
"nodelet": self.nodelet_class,
"files": [f.full_name for f in self.source_files],
"advertise": [p.to_JSON_object() for p in self.advertise],
"subscribe": [p.to_JSON_object() for p in self.subscribe],
"service": [p.to_JSON_object() for p in self.service],
"client": [p.to_JSON_object() for p in self.client],
"readParam": [p.to_JSON_object() for p in self.read_param],
"writeParam": [p.to_JSON_object() for p in self.write_param],
"timestamp": self.timestamp
}
def bound_to(self, other):
if other.scope == "package":
return other == self.package
if other.scope == "file":
return other in self.source_files
if other.scope == "repository" or other.scope == "project":
for package in other.packages:
if package == self.package:
return True
return False
return other.scope == "node" and self == other
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.id
###############################################################################
# ROS Computation Graph
###############################################################################
class RosName(object):
def __init__(self, name, ns = "/", private_ns = "", remaps = None):
self._given = name
self._name = RosName.transform(name, ns = ns, private_ns = private_ns,
remaps = remaps)
parts = self._name.rsplit("/", 1)
self._own = parts[-1]
self._ns = parts[0] or "/"
@property
def full(self):
return self._name
@property
def own(self):
return self._own
@property
def namespace(self):
return self._ns
@property
def given(self):
return self._given
@property
def is_global(self):
return self._given.startswith("/")
@property
def is_private(self):
return self._given.startswith("~")
@property
def is_unresolved(self):
return "?" in self._name
@property
def pattern(self):
parts = []
prev = ""
n = len(self._name)
i = 0
if self._name == "?":
return ".+$"
if self._name[0] == "?":
parts.append("(.*?)")
i = 1
prev = "?"
assert self._name[1] != "?"
for j in xrange(i, n):
if self._name[j] == "?":
assert prev != "?"
if prev == "/":
if j == n - 1: # self._name.endswith("/?")
# end, whole part for sure
parts.append(self._name[i:j])
parts.append("(.+?)")
elif self._name[j+1] == "/": # "/?/"
# start and middle, whole parts
parts.append(self._name[i:j-1])
parts.append("(/.+?)?")
else: # "/?a", optional part
parts.append(self._name[i:j])
parts.append("(.*?)")
else: # "a?/", "a?a", "/a?", optional part
parts.append(self._name[i:j])
parts.append("(.*?)")
i = j + 1
prev = self._name[j]
if i < n:
parts.append(self._name[i:])
parts.append("$")
return "".join(parts)
@staticmethod
def resolve(name, ns = "/", private_ns = ""):
if name[0] == "~":
return private_ns + "/" + name[1:]
elif name[0] == "/":
return name
elif ns == "" or ns[-1] != "/":
return ns + "/" + name
else:
return ns + name
@staticmethod
def transform(name, ns = "/", private_ns = "", remaps = None):
name = RosName.resolve(name, ns = ns, private_ns = private_ns)
if remaps:
return remaps.get(name, name)
return name
def __eq__(self, other):
if isinstance(self, other.__class__):
return self._name == other._name
return self._name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self._name.__hash__()
class RuntimeLocation(object):
def __init__(self, configuration):
self.configuration = configuration
@property
def largest_scope(self):
return self.configuration
@property
def smallest_scope(self):
return self.configuration
def to_JSON_object(self):
return {"configuration": self.configuration.name}
def __str__(self):
return "in configuration " + self.configuration.name
class Resource(MetamodelObject):
"""This is the base class for all runtime objects belonging
to the ROS Computation Graph.
"""
def __init__(self, config, rosname, conditions = None):
self.configuration = config
self.rosname = rosname
self.conditions = conditions if not conditions is None else []
@property
def id(self):
return self.rosname.full
@property
def name(self):
return self.rosname.own
@property
def namespace(self):
return self.rosname.namespace
@property
def enabled(self):
return not self.conditions
@property
def disabled(self):
return False in self.conditions
@property
def conditional(self):
return not self.enabled and not self.disabled
@property
def unresolved(self):
return self.rosname.is_unresolved
@property
def location(self):
return RuntimeLocation(self.configuration)
@property
def resource_type(self):
raise NotImplementedError("subclasses must implement this property")
def traceability(self):
raise NotImplementedError("subclasses must implement this method")
def remap(self, rosname):
raise NotImplementedError("subclasses must implement this method")
def __eq__(self, other):
if isinstance(self, other.__class__):
return (self.configuration == other.configuration
and self.rosname.full == other.rosname.full)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.rosname.__hash__()
class NodeInstance(Resource):
def __init__(self, config, rosname, node, launch = None, argv = None,
remaps = None, conditions = None):
Resource.__init__(self, config, rosname, conditions = conditions)
self.node = node
self.launch = launch
self.argv = argv if not argv is None else []
self.remaps = remaps if not remaps is None else {}
self.publishers = []
self.subscribers = []
self.servers = []
self.clients = []
self.reads = []
self.writes = []
@property
def resource_type(self):
return "node"
@property
def rt_outlinks(self):
visited = set()
queue = [self]
nodes = []
while queue:
current = queue.pop(0)
if current.id in visited:
continue
nodes.append(current)
visited.add(current.id)
for pub in self.publishers:
for sub in pub.topic.subscribers:
queue.append(sub.node)
for cli in self.clients:
if cli.service.server:
queue.append(cli.service.server.node)
return nodes
def traceability(self):
return [self.launch.location]
def remap(self, rosname):
new = NodeInstance(self.configuration, rosname, self.node,
launch = self.launch, argv = list(self.argv),
remaps = dict(self.remaps),
conditions = list(self.conditions))
new.publishers = list(self.publishers)
new.subscribers = list(self.subscribers)
new.servers = list(self.servers)
new.clients = list(self.clients)
new.reads = list(self.reads)
new.writes = list(self.writes)
return new
def to_JSON_object(self):
return {
"uid": str(id(self)),
"name": self.id,
"type": self.node.node_name,
"args": self.argv,
"conditions": [c.to_JSON_object() for c in self.conditions],
"publishers": [p.topic.rosname.full for p in self.publishers],
"subscribers": [p.topic.rosname.full for p in self.subscribers],
"servers": [p.service.rosname.full for p in self.servers],
"clients": [p.service.rosname.full for p in self.clients],
"reads": [p.parameter.rosname.full for p in self.reads],
"writes": [p.parameter.rosname.full for p in self.writes],
"traceability": [l.to_JSON_object() for l in self.traceability()]
}
def __repr__(self):
return self.__str__()
def __str__(self):
return ("NodeInstance " + self.configuration.name
+ ":" + self.rosname.full)
class Topic(Resource):
def __init__(self, config, rosname, message_type = None, conditions = None):
Resource.__init__(self, config, rosname, conditions = conditions)
self.type = message_type
self.publishers = []
self.subscribers = []
@property
def is_disconnected(self):
p = len(self.publishers)
s = len(self.subscribers)
return p + s > 0 and (p == 0 or s == 0)
@property
def resource_type(self):
return "topic"
def traceability(self):
sl = []
for p in self.publishers:
if not p.source_location is None:
sl.append(p.source_location)
for p in self.subscribers:
if not p.source_location is None:
sl.append(p.source_location)
return sl
def remap(self, rosname):
new = Topic(self.configuration, rosname, message_type = self.type,
conditions = list(self.conditions))
new.publishers = list(self.publishers)
new.subscribers = list(self.subscribers)
return new
def to_JSON_object(self):
return {
"uid": str(id(self)),
"name": self.id,
"type": self.type,
"conditions": [c.to_JSON_object() for c in self.conditions],
"publishers": [p.node.rosname.full for p in self.publishers],
"subscribers": [p.node.rosname.full for p in self.subscribers],
"traceability": [l.to_JSON_object() for l in self.traceability()]
}
def _get_conditions(self):
conditional = True
conditions = []
for links in (self.publishers, self.subscribers):
for link in links:
if not link.node.conditions:
conditional = False
if not link.conditions:
return []
conditions.extend(link.conditions)
if conditional:
conditions.extend(link.node.conditions)
return conditions
class Service(Resource):
def __init__(self, config, rosname, message_type = None, conditions = None):
Resource.__init__(self, config, rosname, conditions = conditions)
self.type = message_type
self.server = None
self.clients = []
@property
def is_disconnected(self):
s = 1 if not self.server is None else 0
c = len(self.clients)
return s + c > 0 and (s == 0 or c == 0)
@property
def servers(self):
if self.server:
return (self.server,)
return ()
@property
def resource_type(self):
return "service"
def traceability(self):
sl = []
if not self.server is None:
if not self.server.source_location is None:
sl.append(self.server.source_location)
for p in self.clients:
if not p.source_location is None:
sl.append(p.source_location)
return sl
def remap(self, rosname):
new = Service(self.configuration, rosname, message_type = self.type,
conditions = list(self.conditions))
new.server = self.servers
new.clients = list(self.clients)
return new
def to_JSON_object(self):
return {
"uid": str(id(self)),
"name": self.id,
"type": self.type,
"conditions": [c.to_JSON_object() for c in self.conditions],
"servers": ([self.server.node.rosname.full]
if not self.server is None else []),
"clients": [p.node.rosname.full for p in self.clients],
"traceability": [l.to_JSON_object() for l in self.traceability()]
}
def _get_conditions(self):
conditional = True
conditions = []
for links in (self.servers, self.clients):
for link in links:
if not link.node.conditions:
conditional = False
if not link.conditions:
return []
conditions.extend(link.conditions)
if conditional:
conditions.extend(link.node.conditions)
return conditions
class Parameter(Resource):
def __init__(self, config, rosname, ptype, value,
node_scope = False, launch = None, conditions = None):
Resource.__init__(self, config, rosname, conditions = conditions)
self.type = ptype or Parameter.type_of(value)
self.value = value
self.node_scope = node_scope
self.reads = []
self.writes = []
self.launch = launch
@staticmethod
def type_of(value):
if value is None:
return None
if isinstance(value, int):
return "int"
if isinstance(value, float):
return "double"
if isinstance(value, basestring):
return "string"
if isinstance(value, bool):
return "boolean"
return "yaml"
@property
def resource_type(self):
return "param"
def traceability(self):
sl = []
if not self.launch is None:
sl.append(self.launch.location)
for p in self.reads:
if not p.source_location is None:
sl.append(p.source_location)
for p in self.writes:
if not p.source_location is None:
sl.append(p.source_location)
return sl
def remap(self, rosname):
new = Parameter(self.configuration, rosname, self.type,
self.value, node_scope = self.node_scope,
conditions = list(self.conditions))
new.reads = list(self.reads)
new.writes = list(self.writes)
return new
def to_JSON_object(self):
return {
"uid": str(id(self)),
"name": self.id,
"type": self.type,
"value": self.value,
"conditions": [c.to_JSON_object() for c in self.conditions],
"reads": [p.node.rosname.full for p in self.reads],
"writes": [p.node.rosname.full for p in self.writes],
"traceability": [l.to_JSON_object() for l in self.traceability()]
}
class ResourceCollection(object):
def __init__(self, iterable):
self.all = []
self.enabled = []
self.unresolved = []
self.conditional = []
self.counter = Counter()
if not iterable is None:
for resource in iterable:
self.add(resource)
def __len__(self):
return len(self.all)
def __getitem__(self, item):
return self.all[item]
def __contains__(self, key):
return key in self.counter
def __iter__(self):
return self.all.__iter__()
def get(self, name, conditional = True):
for resource in reversed(self.all):
if resource.id == name:
if conditional or not resource.conditions:
return resource
return None
def get_all(self, name, conditional = True):
resources = []
for resource in self.all:
if resource.id == name:
if conditional or not resource.conditions:
resources.append(resource)
return resources
def get_collisions(self):
return len(self.all) - len(self.counter)
def add(self, resource):
self.all.append(resource)
if resource.conditions:
self.conditional.append(resource)
else:
self.enabled.append(resource)
if "?" in resource.id:
self.unresolved.append(resource)
previous = self.counter[resource.id]
self.counter[resource.id] += 1
return previous
class Configuration(MetamodelObject):
"""A configuration is more or less equivalent to an application.
It is the result of a set of launch files,
plus environment, parameters, etc.
"""
def __init__(self, name, env = None, nodes = None,
topics = None, services = None, parameters = None):
self.id = "configuration:" + name
self.name = name
self.roslaunch = []
self.environment = env if not env is None else {}
self.nodes = ResourceCollection(nodes)
self.topics = ResourceCollection(topics)
self.services = ResourceCollection(services)
self.parameters = ResourceCollection(parameters)
self.dependencies = DependencySet()
@property
def location(self):
return RuntimeLocation(self)
def get_collisions(self):
counter = Counter()
counter += self.nodes.counter
counter += self.topics.counter
counter += self.services.counter
counter += self.parameters.counter
return sum(counter.values()) - len(counter)
def get_remaps(self):
unique = set()
for node in self.nodes:
unique.update(node.remaps.viewitems())
return len(unique)
def get_unresolved(self):
return (len(self.nodes.unresolved) + len(self.topics.unresolved)
+ len(self.services.unresolved)
+ len(self.parameters.unresolved))
def get_conditional(self):
# FIXME len(self.topics.conditional) does not always work
n = 0
for c in (self.nodes, self.topics, self.services, self.parameters):
for r in c:
if r.conditions:
n += 1
return n
def to_JSON_object(self):
publishers = []
subscribers = []
servers = []
clients = []
reads = []
writes = []
for node in self.nodes:
publishers.extend(p.to_JSON_object() for p in node.publishers)
subscribers.extend(p.to_JSON_object() for p in node.subscribers)
servers.extend(p.to_JSON_object() for p in node.servers)
clients.extend(p.to_JSON_object() for p in node.clients)
reads.extend(p.to_JSON_object() for p in node.reads)
writes.extend(p.to_JSON_object() for p in node.writes)
return {
"id": self.name,
"launch": [f.to_JSON_object() for f in self.roslaunch],
"collisions": self.get_collisions(),
"remaps": self.get_remaps(),
"dependencies": list(self.dependencies.packages),
"environment": list(self.dependencies.environment),
"nodes": [n.to_JSON_object() for n in self.nodes],
"topics": [t.to_JSON_object() for t in self.topics],
"services": [s.to_JSON_object() for s in self.services],
"parameters": [p.to_JSON_object() for p in self.parameters],
"links": {
"publishers": publishers,
"subscribers": subscribers,
"servers": servers,
"clients": clients,
"reads": reads,
"writes": writes
}
}
def __repr__(self):
return self.__str__()
def __str__(self):
return "Configuration " + self.name
###############################################################################
# ROS Runtime Analysis Properties
###############################################################################
class RosPrimitive(MetamodelObject):
def __init__(self, node, rosname, conditions = None, location = None):
self.node = node
self.rosname = rosname # before remappings
self.conditions = conditions if not conditions is None else []
self.source_location = location
@property
def location(self):
return self.node.location
@property
def configuration(self):
return self.node.configuration
def to_JSON_object(self):
return {
"node": self.node.rosname.full,
"node_uid": str(id(self.node)),
"name": self.rosname.full,
"location": (self.source_location.to_JSON_object()
if self.source_location else None),
"conditions": [c.to_JSON_object() for c in self.conditions]
}
class TopicPrimitive(RosPrimitive):
def __init__(self, node, topic, message_type, rosname, queue_size,
conditions = None, location = None):
RosPrimitive.__init__(self, node, rosname, conditions = conditions,
location = location)
self.topic = topic
self.type = message_type
self.queue_size = queue_size
@property
def topic_name(self):
return self.topic.rosname.full
def to_JSON_object(self):
data = RosPrimitive.to_JSON_object(self)
data["topic"] = self.topic_name
data["topic_uid"] = str(id(self.topic))
data["type"] = self.type
data["queue"] = self.queue_size
return data
def __repr__(self):
return self.__str__()
def __str__(self):
return "Link of node '{}' to topic '{}' of type '{}'".format(
self.node.id, self.topic.id, self.type)
class PublishLink(TopicPrimitive):
@classmethod
def link(cls, node, topic, message_type, rosname, queue_size,
conditions = None, location = None):
link = cls(node, topic, message_type, rosname, queue_size,
conditions = conditions, location = location)
link.node.publishers.append(link)
link.topic.publishers.append(link)
return link
def __str__(self):
return "Publication of node '{}' to topic '{}' of type '{}'".format(
self.node.id, self.topic.id, self.type)
class SubscribeLink(TopicPrimitive):
@classmethod
def link(cls, node, topic, message_type, rosname, queue_size,
conditions = None, location = None):
link = cls(node, topic, message_type, rosname, queue_size,
conditions = conditions, location = location)
link.node.subscribers.append(link)
link.topic.subscribers.append(link)
return link
def __str__(self):
return "Subscription of node '{}' to topic '{}' of type '{}'".format(
self.node.id, self.topic.id, self.type)
class ServicePrimitive(RosPrimitive):
def __init__(self, node, service, message_type, rosname,
conditions = None, location = None):
RosPrimitive.__init__(self, node, rosname, conditions = conditions,
location = location)
self.service = service
self.type = message_type
@property
def topic_name(self):
return self.service.rosname.full
def to_JSON_object(self):
data = RosPrimitive.to_JSON_object(self)
data["service"] = self.topic_name
data["service_uid"] = str(id(self.service))
data["type"] = self.type
return data
def __repr__(self):
return self.__str__()
def __str__(self):
return "SrvCli({}, {}, {})".format(self.node.id, self.service.id,
self.type)
class ServiceLink(ServicePrimitive):
@classmethod
def link(cls, node, service, message_type, rosname, conditions = None,
location = None):
link = cls(node, service, message_type, rosname,
conditions = conditions, location = location)
link.node.servers.append(link)
link.service.server = link
return link
def __str__(self):
return "Service({}, {}, {})".format(self.node.id, self.service.id,
self.type)
class ClientLink(ServicePrimitive):
@classmethod
def link(cls, node, service, message_type, rosname, conditions = None,
location = None):
link = cls(node, service, message_type, rosname,
conditions = conditions, location = location)
link.node.clients.append(link)
link.service.clients.append(link)
return link
def __str__(self):
return "Client({}, {}, {})".format(self.node.id, self.service.id,
self.type)
class ParameterPrimitive(RosPrimitive):
def __init__(self, node, param, param_type, rosname, conditions = None,
location = None):
RosPrimitive.__init__(self, node, rosname, conditions = conditions,
location = location)
self.parameter = param
self.type = param_type
@property
def param_name(self):
return self.parameter.rosname.full
def to_JSON_object(self):
data = RosPrimitive.to_JSON_object(self)
data["param"] = self.param_name
data["param_uid"] = str(id(self.parameter))
data["type"] = self.type
return data
def __repr__(self):
return self.__str__()
def __str__(self):
return "Param({}, {}, {})".format(self.node.id, self.parameter.id,
self.type)
class ReadLink(ParameterPrimitive):
@classmethod
def link(cls, node, param, param_type, rosname, conditions = None,
location = None):
link = cls(node, param, param_type, rosname, conditions = conditions,
location = location)
link.node.reads.append(link)
link.parameter.reads.append(link)
return link
def __str__(self):
return "Read({}, {}, {})".format(self.node.id, self.parameter.id,
self.type)
class WriteLink(ParameterPrimitive):
@classmethod
def link(cls, node, param, param_type, rosname, conditions = None,
location = None):
link = cls(node, param, param_type, rosname, conditions = conditions,
location = location)
link.node.writes.append(link)
link.parameter.writes.append(link)
return link
def __str__(self):
return "Write({}, {}, {})".format(self.node.id, self.parameter.id,
self.type)
###############################################################################
# Helper Functions
###############################################################################
def _cpp_ignore_line(line):
return "// haros:ignore-line" in line
def _cpp_ignore_next_line(line):
return "// haros:ignore-next-line" in line
def _py_ignore_line(line):
return "# haros:ignore-line" in line
def _py_ignore_next_line(line):
return "# haros:ignore-next-line" in line
def _no_parser(line):
return False
###############################################################################
# Test Functions
###############################################################################
def test_rosname():
n1 = RosName("a")
n2 = RosName("a", "ns")
assert n1 != n2
assert n1 == "/a"
assert "/a" == n1
assert n2 == "ns/a"
assert "ns/a" == n2
n1 = RosName("a", "ns")
assert n1 == n2
n1 = RosName("a")
n2 = RosName("a")
assert n1 == n2
n1 = RosName("~a", "ns", "priv")
n2 = RosName("a", "ns")
assert n1 != n2
assert n1 == "priv/a"
n = Node("base", Package("pkg"), rosname = RosName("base"))
assert n.rosname == "/base"
if __name__ == "__main__":
test_rosname()
| 32.433975 | 80 | 0.554687 |
a963424cd3da033f00bffe2b364c607cc2e2a1b8 | 265 | py | Python | tests/test_array.py | IamGianluca/hackerrank | 6f58451ebf8726d88b19c6aab0368d9b704666cd | [
"MIT"
] | null | null | null | tests/test_array.py | IamGianluca/hackerrank | 6f58451ebf8726d88b19c6aab0368d9b704666cd | [
"MIT"
] | null | null | null | tests/test_array.py | IamGianluca/hackerrank | 6f58451ebf8726d88b19c6aab0368d9b704666cd | [
"MIT"
] | null | null | null | import pytest
from hrank.data_structures.arrays.arrays import reverse_array
@pytest.mark.parametrize('arr,expected', [
([1,2,3,10], [10,3,2,1]),
([0,0,-1,5], [5,-1,0,0])
])
def test_reverse_array(arr, expected):
assert reverse_array(arr) == expected
| 22.083333 | 61 | 0.675472 |
c2440a44857869e872bd4dced23e6a32335bf637 | 1,429 | py | Python | Algorithms/Python/OS/priority-premtive.py | ANUBHAVNATANI/Data-Strucures | fd57031b7b4c82b3ef734d26709d2066b6630f81 | [
"MIT"
] | 4 | 2018-03-15T20:54:44.000Z | 2018-04-15T10:09:33.000Z | Algorithms/Python/OS/priority-premtive.py | ANUBHAVNATANI/Data-Structures-And-Algorithms | fd57031b7b4c82b3ef734d26709d2066b6630f81 | [
"MIT"
] | null | null | null | Algorithms/Python/OS/priority-premtive.py | ANUBHAVNATANI/Data-Structures-And-Algorithms | fd57031b7b4c82b3ef734d26709d2066b6630f81 | [
"MIT"
] | 5 | 2018-07-30T07:37:46.000Z | 2020-10-03T09:57:23.000Z | # program for premtive priority
n_p = 5
brust_time = [7, 3, 2, 10, 8]
priority = [2, 3, 4, 1, 5]
arr_time = [0, 2, 5, 6, 7]
turn_around_time = [0, 0, 0, 0, 0]
waiting_time = [0, 0, 0, 0, 0]
c = [7, 3, 2, 10, 8]
time_stamp = 0
curr = 6
ij = -1
def ifless(a, curr, t):
s = []
i = 0
# print("time"+str(t))
while(i < len(a)):
# print(c)
if(a[i] <= curr and arr_time[i] <= t):
if(c[i] <= 0):
for i in range(0, len(a)):
ini = 0
if(a[i] <= curr and arr_time[i] <= t and c[i] > 0):
s.append(a[i])
ini = ini+1
if(ini == 0):
curr = curr+1
i = -1
else:
s.append(a[i])
i = i+1
if(len(s) >= 1):
return a.index(min(s))
else:
return -1
def checkWait(time_stamp, i):
if(arr_time[i] < time_stamp and c[i] > 0):
return True
return False
while(sum(c) > 0):
ii = ifless(priority, curr, time_stamp)
# print(ii)
curr = priority[ii]
c[ii] = c[ii]-1
time_stamp = time_stamp+1
for i in range(0, n_p):
if(i != ii):
if(checkWait(time_stamp, i)):
waiting_time[i] = waiting_time[i]+1
for i in range(0, n_p):
turn_around_time[i] = brust_time[i]+waiting_time[i]
print(waiting_time)
print(turn_around_time)
| 22.68254 | 71 | 0.463261 |
5a9a1ea43f50307f32f9250ba8fcc1225a93b89d | 324 | py | Python | contrib/capitalone_dataprofiler_expectations/setup.py | arunnthevapalan/great_expectations | 97f1481bcd1c3f4d8878c6f383f4e6f008b20cd1 | [
"Apache-2.0"
] | 1 | 2022-03-16T22:09:49.000Z | 2022-03-16T22:09:49.000Z | contrib/capitalone_dataprofiler_expectations/setup.py | draev/great_expectations | 317e15ee7e50f6e0d537b62154177440f33b795d | [
"Apache-2.0"
] | null | null | null | contrib/capitalone_dataprofiler_expectations/setup.py | draev/great_expectations | 317e15ee7e50f6e0d537b62154177440f33b795d | [
"Apache-2.0"
] | 1 | 2022-03-03T16:47:32.000Z | 2022-03-03T16:47:32.000Z | from typing import List
import setuptools
def get_requirements() -> List[str]:
with open("requirements.txt") as f:
requirements = f.read().splitlines()
return requirements
setuptools.setup(
name="capitalone_dataprofiler_expectations",
version="0.1.0",
install_requires=get_requirements(),
)
| 19.058824 | 48 | 0.709877 |
fff5732baad18e290e56b65403f3e8525b4ef105 | 12,738 | py | Python | tests/unit/modules/test_cmdmod.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_cmdmod.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_cmdmod.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 1 | 2019-06-10T17:42:31.000Z | 2019-06-10T17:42:31.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import tempfile
# Import Salt Libs
import salt.utils.platform
import salt.modules.cmdmod as cmdmod
from salt.exceptions import CommandExecutionError
from salt.log import LOG_LEVELS
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
mock_open,
Mock,
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
DEFAULT_SHELL = 'foo/bar'
MOCK_SHELL_FILE = '# List of acceptable shells\n' \
'\n'\
'/bin/bash\n'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class CMDMODTestCase(TestCase, LoaderModuleMockMixin):
'''
Unit tests for the salt.modules.cmdmod module
'''
def setup_loader_modules(self):
return {cmdmod: {}}
@classmethod
def setUpClass(cls):
cls.mock_loglevels = {'info': 'foo', 'all': 'bar', 'critical': 'bar',
'trace': 'bar', 'garbage': 'bar', 'error': 'bar',
'debug': 'bar', 'warning': 'bar', 'quiet': 'bar'}
@classmethod
def tearDownClass(cls):
del cls.mock_loglevels
def test_render_cmd_no_template(self):
'''
Tests return when template=None
'''
self.assertEqual(cmdmod._render_cmd('foo', 'bar', None),
('foo', 'bar'))
def test_render_cmd_unavailable_engine(self):
'''
Tests CommandExecutionError raised when template isn't in the
template registry
'''
self.assertRaises(CommandExecutionError,
cmdmod._render_cmd,
'boo', 'bar', 'baz')
def test_check_loglevel_bad_level(self):
'''
Tests return of providing an invalid loglevel option
'''
with patch.dict(LOG_LEVELS, self.mock_loglevels):
self.assertEqual(cmdmod._check_loglevel(level='bad_loglevel'), 'foo')
def test_check_loglevel_bad_level_not_str(self):
'''
Tests the return of providing an invalid loglevel option that is not a string
'''
with patch.dict(LOG_LEVELS, self.mock_loglevels):
self.assertEqual(cmdmod._check_loglevel(level=1000), 'foo')
def test_check_loglevel_quiet(self):
'''
Tests the return of providing a loglevel of 'quiet'
'''
with patch.dict(LOG_LEVELS, self.mock_loglevels):
self.assertEqual(cmdmod._check_loglevel(level='quiet'), None)
def test_parse_env_not_env(self):
'''
Tests the return of an env that is not an env
'''
self.assertEqual(cmdmod._parse_env(None), {})
def test_parse_env_list(self):
'''
Tests the return of an env that is a list
'''
ret = {'foo': None, 'bar': None}
self.assertEqual(ret, cmdmod._parse_env(['foo', 'bar']))
def test_parse_env_dict(self):
'''
Test the return of an env that is not a dict
'''
self.assertEqual(cmdmod._parse_env('test'), {})
def test_run_shell_is_not_file(self):
'''
Tests error raised when shell is not available after _is_valid_shell error msg
and os.path.isfile returns False
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=False)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar')
def test_run_shell_file_no_access(self):
'''
Tests error raised when shell is not available after _is_valid_shell error msg,
os.path.isfile returns True, but os.access returns False
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=False)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar')
def test_run_runas_with_windows(self):
'''
Tests error raised when runas is passed on windows
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)):
with patch.dict(cmdmod.__grains__, {'os': 'fake_os'}):
self.assertRaises(CommandExecutionError,
cmdmod._run,
'foo', 'bar', runas='baz')
def test_run_user_not_available(self):
'''
Tests return when runas user is not available
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar', runas='baz')
def test_run_zero_umask(self):
'''
Tests error raised when umask is set to zero
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar', umask=0)
def test_run_invalid_umask(self):
'''
Tests error raised when an invalid umask is given
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar', umask='baz')
def test_run_invalid_cwd_not_abs_path(self):
'''
Tests error raised when cwd is not an absolute path
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar')
def test_run_invalid_cwd_not_dir(self):
'''
Tests error raised when cwd is not a dir
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
with patch('os.path.isabs', MagicMock(return_value=True)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar')
def test_run_no_vt_os_error(self):
'''
Tests error raised when not useing vt and OSError is provided
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
with patch('salt.utils.timed_subprocess.TimedProc', MagicMock(side_effect=OSError)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo')
def test_run_no_vt_io_error(self):
'''
Tests error raised when not useing vt and IOError is provided
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
with patch('salt.utils.timed_subprocess.TimedProc', MagicMock(side_effect=IOError)):
self.assertRaises(CommandExecutionError, cmdmod._run, 'foo')
@skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
def test_run(self):
'''
Tests end result when a command is not found
'''
with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)):
with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)):
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.access', MagicMock(return_value=True)):
ret = cmdmod._run('foo', cwd=os.getcwd(), use_vt=True).get('stderr')
self.assertIn('foo', ret)
def test_is_valid_shell_windows(self):
'''
Tests return if running on windows
'''
with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)):
self.assertTrue(cmdmod._is_valid_shell('foo'))
@skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
def test_is_valid_shell_none(self):
'''
Tests return of when os.path.exists(/etc/shells) isn't available
'''
with patch('os.path.exists', MagicMock(return_value=False)):
self.assertIsNone(cmdmod._is_valid_shell('foo'))
def test_is_valid_shell_available(self):
'''
Tests return when provided shell is available
'''
with patch('os.path.exists', MagicMock(return_value=True)):
with patch('salt.utils.files.fopen', mock_open(read_data=MOCK_SHELL_FILE)):
self.assertTrue(cmdmod._is_valid_shell('/bin/bash'))
@skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
def test_is_valid_shell_unavailable(self):
'''
Tests return when provided shell is not available
'''
with patch('os.path.exists', MagicMock(return_value=True)):
with patch('salt.utils.files.fopen', mock_open(read_data=MOCK_SHELL_FILE)):
self.assertFalse(cmdmod._is_valid_shell('foo'))
@skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows')
def test_os_environment_remains_intact(self):
'''
Make sure the OS environment is not tainted after running a command
that specifies runas.
'''
with patch('pwd.getpwnam') as getpwnam_mock:
with patch('subprocess.Popen') as popen_mock:
environment = os.environ.copy()
popen_mock.return_value = Mock(
communicate=lambda *args, **kwags: ['{}', None],
pid=lambda: 1,
retcode=0
)
with patch.dict(cmdmod.__grains__, {'os': 'Darwin', 'os_family': 'Solaris'}):
if sys.platform.startswith(('freebsd', 'openbsd')):
shell = '/bin/sh'
else:
shell = '/bin/bash'
cmdmod._run('ls',
cwd=tempfile.gettempdir(),
runas='foobar',
shell=shell)
environment2 = os.environ.copy()
self.assertEqual(environment, environment2)
getpwnam_mock.assert_called_with('foobar')
def test_run_cwd_doesnt_exist_issue_7154(self):
'''
cmd.run should fail and raise
salt.exceptions.CommandExecutionError if the cwd dir does not
exist
'''
cmd = 'echo OHAI'
cwd = '/path/to/nowhere'
try:
cmdmod.run_all(cmd, cwd=cwd)
except CommandExecutionError:
pass
else:
raise RuntimeError
| 41.627451 | 108 | 0.605825 |
39740a59fb05d2a5c9bac9cbc0e37075774eeba0 | 1,174 | py | Python | ios_behave/steps/calc.py | jcnav/toolium-examples | 6e5429a6fe532407856103f655435e964fa89da9 | [
"Apache-2.0"
] | 9 | 2016-01-21T13:57:39.000Z | 2020-12-21T07:28:17.000Z | ios_behave/steps/calc.py | jcnav/toolium-examples | 6e5429a6fe532407856103f655435e964fa89da9 | [
"Apache-2.0"
] | 5 | 2017-09-12T09:00:38.000Z | 2022-01-19T13:08:33.000Z | ios_behave/steps/calc.py | jcnav/toolium-examples | 6e5429a6fe532407856103f655435e964fa89da9 | [
"Apache-2.0"
] | 13 | 2016-05-24T07:40:13.000Z | 2021-12-06T12:35:07.000Z | # -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from behave import given, when, then
from ios_behave.pageobjects.calc import CalcPageObject
@given('the calculator is open')
def step_impl(context):
context.current_page = CalcPageObject()
@when('the user adds {first_number} and {second_number}')
def step_impl(context, first_number, second_number):
context.current_page.sum(first_number, second_number)
@then('the result is {expected_result}')
def step_impl(context, expected_result):
assert int(expected_result) == context.current_page.get_sum_result()
| 31.72973 | 72 | 0.774276 |
2d2714e1cabfde76e088953d533dc15a7957831b | 15,383 | py | Python | tests/test_notifier.py | HumanCellAtlas/data-store | 6b27d0f7e0110c62b3079151708689ab5145f15b | [
"MIT"
] | 46 | 2017-03-24T15:56:09.000Z | 2021-03-15T19:49:07.000Z | tests/test_notifier.py | HumanCellAtlas/DCC | 6b27d0f7e0110c62b3079151708689ab5145f15b | [
"MIT"
] | 1,799 | 2017-04-04T17:54:28.000Z | 2020-11-19T12:30:13.000Z | tests/test_notifier.py | HumanCellAtlas/DCC | 6b27d0f7e0110c62b3079151708689ab5145f15b | [
"MIT"
] | 13 | 2017-03-27T23:49:35.000Z | 2021-01-18T07:39:49.000Z | #!/usr/bin/env python
# coding: utf-8
from collections import Counter
from http.server import HTTPServer, BaseHTTPRequestHandler
from itertools import count
import json
import logging
from math import sqrt
from socketserver import ThreadingMixIn
from typing import List, Tuple, Optional
from unittest import mock
import random
import threading
import uuid
import os
import sys
import unittest
import time
import requests
from requests_http_signature import HTTPSignatureAuth
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
import dss
from dss import Config
from dss.logging import configure_test_logging
from dss.notify.notification import Notification, attempt_header_name
from dss.notify.notifier import Notifier
from dss.util import networking
from dss.util.time import SpecificRemainingTime
from dss.util.types import JSON
from infra import testmode
logger = logging.getLogger(__name__)
def setUpModule():
# Add thread names and timing information to test logs. Also, since the tests intentionally involve numerous
# exceptions logged at WARNING level, we increase the log level to ERROR. Set DSS_DEBUG to 1 for verbose logs.
configure_test_logging(format="%(asctime)s %(levelname)s %(name)s %(threadName)s: %(message)s",
log_levels={dss.notify.__name__: (logging.ERROR, logging.DEBUG)})
class ThreadedHttpServerTestCase(unittest.TestCase):
server = None
port = None
address = "127.0.0.1"
server_thread = None
server_exception = None
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.port = networking.unused_tcp_port()
cls.server = cls.ThreadedHTTPServer((cls.address, cls.port), PostTestHandler)
cls.server_thread = threading.Thread(target=cls._serve)
cls.server_thread.start()
@classmethod
def _serve(cls):
try:
cls.server.serve_forever()
except BaseException as e:
cls.server_exception = e
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
cls.server_thread.join()
super().tearDownClass()
if cls.server_exception:
raise cls.server_exception
@testmode.standalone
class _TestNotifier(ThreadedHttpServerTestCase):
repeats = 1
timeout = 1.0
overhead = 30.0
delays = [0.0, 1.0, 2.0]
workers_per_queue: Optional[int] = None
@property
def num_queues(self):
return len(self.delays)
@property
def num_workers(self):
return self.workers_per_queue * self.num_queues
def setUp(self):
logger.critical("Random state for test reproduction: %r", random.getstate())
self.assertFalse(self.workers_per_queue is None, "Concrete subclasses must define workers_per_queue")
self.notification_id = count()
self.subscription_ids = [f"{self.__class__.__name__}-{self._testMethodName}-{time.time()}-{i}"
for i in range(2)]
PostTestHandler.reset()
self.notifier = Notifier(deployment_stage=f'test-{uuid.uuid4()}',
delays=self.delays,
num_workers=self.num_workers,
sqs_polling_timeout=5,
timeout=self.timeout,
overhead=self.overhead)
self.notifier.deploy()
def _estimate_running_time(self, total_attempts) -> float:
latency = sum(delay for delay in self.delays)
average_overhead = self.overhead / 5 # FIXME: this is a guess
average_timeout = self.timeout / 2 # FIXME: this is a guess
parallelism = self.num_queues + (self.num_workers - self.num_queues) / 5 # FIXME: this is a guess
total_time = latency + (average_timeout + average_overhead) * total_attempts / parallelism
return total_time
def tearDown(self):
self.notifier.destroy()
def test(self):
expected_receptions = set()
expected_misses = set()
total_attempts = 0
def notify(expect: bool, # whether the message should make it
max_attempts: Optional[int] = None, # how many attempts to allow
responses: List[Tuple[float, int]] = None, # a list of (delay, http_status) tuples, one per attempt
attempts=None): # expected number of attempts, currently only used to estimate the running time
if responses is None:
responses = [(0.0, 200)]
verify = random.random() > .5
notification_id = str(next(self.notification_id))
body: JSON = dict(notification_id=notification_id,
responses=responses,
verify=verify)
notification = Notification.create(notification_id=notification_id,
subscription_id=str(random.choice(self.subscription_ids)),
url=f"http://{self.address}:{self.port}/{notification_id}",
method='POST',
encoding='application/json',
body=body,
attempts=max_attempts,
hmac_key=PostTestHandler.hmac_secret_key if verify else None,
hmac_key_id='1234' if verify else None)
nonlocal total_attempts
total_attempts += min(notification.attempts, self.num_queues) if attempts is None else attempts
(expected_receptions if expect else expected_misses).add(notification_id)
self.notifier.enqueue(notification)
for repeat in range(self.repeats):
# A notification with …
# … zero permitted attempts will be missed.
notify(responses=[], max_attempts=0, expect=False)
for n in range(1, self.num_queues + 1):
# … n attempts makes it if it succeeds the n-th time.
notify(responses=[(0, 500)] * (n - 1) + [(0, 200)], max_attempts=n, expect=True)
# … n attempts is missed even if it would otherwise succeed the n+1-th time.
notify(responses=[(0, 500)] * n + [(0, 200)], max_attempts=n, expect=False)
# A notification whose endpoint …
# … consistently takes too long to respond, will be missed.
notify(responses=[(self.timeout * 2, 200)], expect=False)
# … takes too long to deliver only once will make it the second time
notify(responses=[(self.timeout * 2, 200), (0, 200)], expect=True, attempts=2)
# … takes a little longer to deliver will make it the first time
notify(responses=[(self.timeout / 2, 200)], expect=True, attempts=1)
# … takes a little longer to deliver, but then fails, will be missed
notify(responses=[(self.timeout / 2, 500)], expect=False)
remaining_time = SpecificRemainingTime(self._estimate_running_time(total_attempts))
self.notifier.run(remaining_time)
actual_receptions = set(PostTestHandler.actual_receptions)
self.assertEqual(expected_receptions, actual_receptions)
self.assertEqual(set(), actual_receptions.intersection(expected_misses))
self.assertEqual(len(actual_receptions), len(PostTestHandler.actual_receptions))
class TestNotifierOne(_TestNotifier):
workers_per_queue = 1
class TestNotifierTwo(_TestNotifier):
workers_per_queue = 2
del _TestNotifier
@testmode.standalone
class TestNotifierConfig(unittest.TestCase):
def test_notifier_from_config(self):
with mock.patch.dict(os.environ,
DSS_NOTIFY_DELAYS="",
DSS_NOTIFY_WORKERS="",
DSS_NOTIFY_ATTEMPTS=""):
self.assertFalse(Config.notification_is_async())
self.assertEqual(Config.notification_attempts(), 0)
self.assertEqual(Config.notification_delays(), [])
with mock.patch.dict(os.environ, DSS_NOTIFY_DELAYS="0"):
self.assertTrue(Config.notification_is_async())
self.assertEqual(Config.notification_attempts(), 1)
self.assertEqual(Config.notification_delays(), [0])
with mock.patch.dict(os.environ, DSS_NOTIFY_ATTEMPTS="0"):
self.assertEqual(Config.notification_attempts(), 0)
self.assertFalse(Config.notification_is_async())
with mock.patch.dict(os.environ,
DSS_NOTIFY_DELAYS="3 2 1 .5",
DSS_NOTIFY_WORKERS="7"):
notifier = Notifier.from_config()
self.assertEqual([3.0, 2.0, 1.0, .5], notifier._delays)
self.assertEqual(7, notifier._num_workers)
self.assertTrue(Config.notification_is_async())
self.assertEqual(Config.notification_attempts(), 4)
@testmode.standalone
class TestWorkerQueueAssignment(unittest.TestCase):
def _test(self, num_workers, queue_lengths):
notifier = Notifier(deployment_stage='foo', delays=[0] * len(queue_lengths), num_workers=num_workers)
return list(notifier._work_queue_indices(queue_lengths))
def test_edge_cases(self):
self.assertEquals(self._test(num_workers=1, queue_lengths=[0]), [0])
self.assertEquals(self._test(num_workers=2, queue_lengths=[0]), [0, 0])
self.assertIn(self._test(num_workers=1, queue_lengths=[0, 0]), ([0], [1]))
def test_worker_surplus(self):
num_workers = 10
repeats = 1000
imbalance = .5
queue_coverage = Counter()
for i in range(repeats):
# N workers for two queues, one queue shorter than the other
queue_indices = self._test(num_workers=num_workers,
queue_lengths=[100, imbalance * 100])
# Every queue should be served by at least one worker (mandatory services)
self.assertEquals(set(queue_indices), {0, 1})
# Count how many times each queue was served
queue_coverage.update(queue_indices)
# Every worker in every iteration serves exactly one queue
self.assertEquals(sum(queue_coverage.values()), repeats * num_workers)
# Compute the probability of the longer queue being served by a surplus workers
service_ratio = (queue_coverage[1] - repeats) / (queue_coverage[0] - repeats)
# The probability should be equal to the imbalance, within an epsilon
self.assertAlmostEqual(service_ratio, imbalance, delta=.1)
def test_worker_shortage(self):
num_queues = 10
num_workers = 2
repeats = 1000
imbalance = .5
queue_coverage = Counter()
for i in range(repeats):
queue_indices = self._test(num_workers=num_workers,
queue_lengths=[100] + [round(imbalance * 100)] * (num_queues - 1))
queue_coverage.update(queue_indices)
# Every worker in every iteration serves exactly one queue
self.assertEquals(sum(queue_coverage.values()), repeats * num_workers)
first_queue_coverage = queue_coverage.pop(0)
avg = sum(c for c in queue_coverage.values()) / (num_queues - 1)
# Since the first queue was longer by 1/imbalance compared to the other queues, it should get propoertionally
# more coverage than the average short queue (within 10% of a margin)
self.assertAlmostEqual(avg / first_queue_coverage, imbalance, delta=.15)
# Compute standard deviation
sigma = sqrt(sum((c - avg) ** 2 for c in queue_coverage.values()) / (num_queues - 2))
# The short queues' covereage should be within one standard deviation
self.assertTrue(all(abs(c - avg) <= sigma) for c in queue_coverage.values())
class PostTestHandler(BaseHTTPRequestHandler):
actual_receptions: List[str] = []
hmac_secret_key = str(uuid.uuid4()).encode()
@classmethod
def reset(cls):
cls.actual_receptions = []
# noinspection PyAttributeOutsideInit
def setup(self):
super().setup()
# Since we're messing with the connection timing, we make sure connections aren't reused on the client-side.
self.close_connection = True
# Generate a response large enough to detect a hung-up connection. On macOS 1 MiB was sufficient, not so in Docker
# containers. In case you're wondering why we're repeating a small amount of random data instead of just generating
# the equivalent amount: the former is two orders of magnitude faster.
#
response_body = b''.join([os.urandom(1024)] * 1024 * 10)
def do_POST(self):
length = int(self.headers['content-length'])
attempt = int(self.headers[attempt_header_name])
payload = json.loads(self.rfile.read(length).decode())
verify = payload['verify']
if verify:
HTTPSignatureAuth.verify(requests.Request("POST", self.path, self.headers),
key_resolver=lambda key_id, algorithm: self.hmac_secret_key)
try:
HTTPSignatureAuth.verify(requests.Request("POST", self.path, self.headers),
key_resolver=lambda key_id, algorithm: self.hmac_secret_key[::-1])
except AssertionError:
pass
else:
raise AssertionError("Expected AssertionError")
responses = payload['responses']
delay, status = responses[attempt if attempt < len(responses) else -1]
self.send_response(status)
if delay:
self.send_header("Content-length", len(self.response_body))
self.end_headers()
time.sleep(delay)
# Write a lot of data to force the detection of a client disconnect. The connection is to the loopback
# interface so this shouldn't matter much performance-wise. When the disconnect is detected,
# the execptions raised range from the expected EPIPE to the exotic 'OSError: [Errno 41] Protocol wrong
# type for socket'. We don't care which exception is raised as long as it prevents the request being
# recorded as a success.
try:
self.wfile.write(self.response_body)
self.wfile.flush()
except OSError:
logger.info("An expected exception occurred while sending response to client:", exc_info=True)
return
else:
self.send_header("Content-length", 0)
self.end_headers()
if status == 200:
notification_id = payload['notification_id']
logger.info("Received notification_id %s", notification_id)
self.actual_receptions.append(notification_id)
def log_message(self, fmt, *args):
logger.info("%s - - [%s] %s\n", self.address_string(), self.log_date_time_string(), fmt % args)
if __name__ == "__main__":
unittest.main()
| 44.588406 | 119 | 0.631216 |
7f892f1bd98c82a7aa9893c46869bc7401e92d1e | 5,951 | py | Python | real_trade/Algorithm_PriceDeciderByContinuousPositiveLine.py | taka-mochi/cryptocurrency-autotrading | 16677018c793d7bd3fffdcd3575aecb3535dbd04 | [
"BSD-3-Clause"
] | 3 | 2018-05-22T22:45:23.000Z | 2020-02-13T16:45:03.000Z | real_trade/Algorithm_PriceDeciderByContinuousPositiveLine.py | taka-mochi/cryptocurrency-autotrading | 16677018c793d7bd3fffdcd3575aecb3535dbd04 | [
"BSD-3-Clause"
] | null | null | null | real_trade/Algorithm_PriceDeciderByContinuousPositiveLine.py | taka-mochi/cryptocurrency-autotrading | 16677018c793d7bd3fffdcd3575aecb3535dbd04 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
import time
import math
from ChartBars import Chart
from Util import TimeUtil
from Algorithm_PriceDecider_CloseFixedRateAndTime_base import PriceDecider_CloseFixedRateAndTime_base
class PriceDeciderByContinuousPositiveLine(PriceDecider_CloseFixedRateAndTime_base):
def __init__(self,
cont_positive_line_count,
buy_order_up_rate,
close_div_rate_from_buy_value,
stop_loss_rate,
close_bar_count_to_hold,
do_filter_by_ma_slope,
filter_ma_count=0,
filter_ma_bar_span=0,
losscut_rate=None,
make_order_only_first_time_bar=True):
super(PriceDeciderByContinuousPositiveLine, self).__init__(close_div_rate_from_buy_value, close_bar_count_to_hold)
self.buy_order_up_rate = buy_order_up_rate
self.cont_positive_line_count = cont_positive_line_count
self.stop_loss_rate = stop_loss_rate
self.do_filter_by_ma_slope = do_filter_by_ma_slope
self.filter_ma_count = filter_ma_count
self.filter_ma_bar_span = filter_ma_bar_span
self.filter_ma_keyname = str(filter_ma_count) + "ma_end"
self.losscut_rate = losscut_rate
self.make_order_only_first_time_bar = make_order_only_first_time_bar
self.last_checked_bar_timestamp = None
print("parameter of %s" % str(self))
print("cont_positive_line_count", self.cont_positive_line_count)
print("buy_order_up_rate", self.buy_order_up_rate)
print("stop_loss_rate", self.stop_loss_rate)
print("do_filter_by_ma_slope", self.do_filter_by_ma_slope)
print("self.filter_ma_count", self.filter_ma_count)
print("filter_ma_bar_span", self.filter_ma_bar_span)
print("losscut_rate", self.losscut_rate)
def decide_make_position_order(self, chart):
assert ("ChartBars.Chart" in str(type(chart)))
#assert (isinstance(chart, Chart))
# amount is not decided by this method!!
position_type = None
position_price = None
stoploss_rate = None
last_timestamp, last_bar = chart.get_bar_from_last(with_timestamp=True)
prev_timestamp, prev_bar = chart.get_bar_from_last(1, with_timestamp=True)
if last_bar is None or prev_bar is None:
# no bar
return (None, position_price)
start_index = 0 if last_bar.is_freezed() else 1
use_bar = last_bar if last_bar.is_freezed() else prev_bar
use_timestamp = last_timestamp if last_bar.is_freezed() else prev_timestamp
# if bar is not changed from last time, this alg will not make any order
if self.make_order_only_first_time_bar and self.last_checked_bar_timestamp == use_timestamp:
print("rejected because this bar is not first time bar")
return (None, None)
self.last_checked_bar_timestamp = use_timestamp
# filter by ma direction
if self.do_filter_by_ma_slope:
if not self.check_filter_by_ma_dir(chart=chart,
cur_bar=use_bar,
cur_bar_index_from_last=start_index):
print("rejected by ma slope filter")
return (None, None)
# filter by positive line continuous
for i in range(self.cont_positive_line_count):
bar = chart.get_bar_from_last(start_index+i)
if bar is None:
print("no previous bar", i)
return (None, None)
is_positive_line = bar.end - bar.begin > 0
if not is_positive_line:
print(str(i) + " prev bar is not positive bar")
return (None, None)
position_type = "long"
target_value = use_bar.end * (1+self.buy_order_up_rate)
if self.stop_loss_rate is not None:
stoploss_rate = use_bar.end * self.stop_loss_rate
return (position_type, target_value, stoploss_rate)
else:
return (position_type, target_value)
def check_filter_by_ma_dir(self, chart, cur_bar, cur_bar_index_from_last):
# get check bar
check_ma_bar = chart.get_bar_from_last(cur_bar_index_from_last + self.filter_ma_bar_span)
if check_ma_bar is None:
# cannot check
print("check ma filter: no previous bar to check ma filter")
return False
# get check ma value
if self.filter_ma_keyname not in check_ma_bar.technical_values or \
self.filter_ma_keyname not in cur_bar.technical_values:
print("check ma filter: no techinical value of " + self.filter_ma_keyname)
return False
# check filter
cur_ma = cur_bar.technical_values[self.filter_ma_keyname]
pre_ma = check_ma_bar.technical_values[self.filter_ma_keyname]
if cur_ma is None or pre_ma is None: return False
if cur_ma < pre_ma:
print("check ma filter: cur_ma < pre_ma = %f < %f" % (cur_ma, pre_ma,))
return False
return True
def market_sell_decide_algorithm(self, chart, open_rate, created_time, current_time):
if super(PriceDeciderByContinuousPositiveLine, self).market_sell_decide_algorithm(chart, open_rate, created_time, current_time) is True:
return True
# check losscut rate
if self.losscut_rate is None: return False
losscut_price = open_rate * self.losscut_rate
last_bar = chart.get_bar_from_last()
if last_bar is None: return False
# dummy losscut
if last_bar.end < losscut_price:
print("losscut !!! end_v/losscut_v = %f/%f" % (float(last_bar.end),float(losscut_price),))
return True
return False
| 41.326389 | 144 | 0.648126 |
69a5880b609c89914bfe3947f5843eae0c0ce431 | 844 | py | Python | demo/test_basic_examples.py | AnandJyrm/allure-pytest | 495a90d64690d200ca402a5dbffb96f6335d428b | [
"Apache-2.0"
] | 112 | 2017-01-24T21:37:49.000Z | 2022-03-25T22:32:12.000Z | demo/test_basic_examples.py | AnandJyrm/allure-pytest | 495a90d64690d200ca402a5dbffb96f6335d428b | [
"Apache-2.0"
] | 56 | 2017-01-21T20:01:41.000Z | 2019-01-14T13:35:53.000Z | demo/test_basic_examples.py | AnandJyrm/allure-pytest | 495a90d64690d200ca402a5dbffb96f6335d428b | [
"Apache-2.0"
] | 52 | 2017-01-23T13:40:40.000Z | 2022-03-30T00:02:31.000Z | '''
This module contains basic examples of tests
'''
import pytest
def test_success():
'this test succedes'
assert True
def test_failure():
'this test fails'
assert False
def test_skip():
'this test is skipped'
pytest.skip('for a reason!')
@pytest.mark.xfail()
def test_xfail():
'this test is an xfail'
assert False
@pytest.mark.xfail()
def test_xpass():
'this test is a xpass -- it is expected to fail, but still passes'
assert True
def test_broken_fixture(NOT_A_FIXTURE):
'this test fails due to a non-satisfiable fixture request'
assert True
def a_func(x):
return b_func(x)
def b_func(x):
raise RuntimeError(x)
def test_long_stacktrace():
a_func('I am a failure reason')
def test_pytest_expansion():
a = {1: 2, 3: 4}
b = {1: 3, 3: 4}
assert a == b
| 15.071429 | 70 | 0.654028 |
45896dd1f59326b46c5723bbc8f2232796687193 | 4,873 | py | Python | setup.py | drbenschmidt/raspberrypi-hunter-remote | b8b21af4ebb555a71913180b2bf64a37c7d975a5 | [
"MIT"
] | null | null | null | setup.py | drbenschmidt/raspberrypi-hunter-remote | b8b21af4ebb555a71913180b2bf64a37c7d975a5 | [
"MIT"
] | null | null | null | setup.py | drbenschmidt/raspberrypi-hunter-remote | b8b21af4ebb555a71913180b2bf64a37c7d975a5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
from setuptools.command.test import test as TestCommand
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
install_requires = read('requirements.txt').splitlines()
extras_require = {}
# Dev dependencies
try:
extras_require['dev'] = read('requirements-dev.txt').splitlines()
except IOError:
# doesn't exist
pass
# Documentation dependencies
try:
extras_require['docs'] = read('requirements-docs.txt').splitlines()
except IOError:
# doesn't exist
pass
# If there are any extras, add a catch-all case that includes everything.
# This assumes that entries in extras_require are lists (not single strings).
if extras_require:
extras_require['all'] = sorted(
{x for v in extras_require.values() for x in v}
)
# Import meta data from __meta__.py
#
# We use exec for this because __meta__.py runs its __init__.py first,
# __init__.py may assume the requirements are already present, but this code
# is being run during the `python setup.py install` step, before requirements
# are installed.
# https://packaging.python.org/guides/single-sourcing-package-version/
meta = {}
exec(read('raspberrypi_hunter_remote/__meta__.py'), meta)
# Import the README and use it as the long-description.
# If your readme path is different, add it here.
possible_readme_names = ['README.rst', 'README.md', 'README.txt', 'README']
# Handle turning a README file into long_description
long_description = meta['description']
readme_fname = ''
for fname in possible_readme_names:
try:
long_description = read(fname)
except IOError:
# doesn't exist
continue
else:
# exists
readme_fname = fname
break
# Infer the content type of the README file from its extension.
# If the contents of your README do not match its extension, manually assign
# long_description_content_type to the appropriate value.
readme_ext = os.path.splitext(readme_fname)[1]
if readme_ext.lower() == '.rst':
long_description_content_type = 'text/x-rst'
elif readme_ext.lower() == '.md':
long_description_content_type = 'text/markdown'
else:
long_description_content_type = 'text/plain'
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
pytest.main(self.test_args)
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
here = os.path.abspath(os.path.dirname(__file__))
rmtree(here, 'dist')
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system(
'{0} setup.py sdist bdist_wheel --universal'
.format(sys.executable)
)
self.status('Uploading the package to PyPI via Twine...')
os.system('twine upload dist/*')
self.status('Pushing git tags...')
os.system('git tag v{0}'.format(meta['__version__']))
os.system('git push --tags')
sys.exit()
setup(
# Essential details on the package and its dependencies
name=meta['name'],
version=meta['version'],
packages=find_packages(
exclude=["tests", "*.tests", "*.tests.*", "tests.*"]
),
package_dir={meta['name']: os.path.join(".", meta['path'])},
# If any package contains *.txt or *.rst files, include them:
# package_data={'': ['*.txt', '*.rst'],}
install_requires=install_requires,
extras_require=extras_require,
# Metadata to display on PyPI
author=meta['author'],
author_email=meta['author_email'],
description=meta['description'],
long_description=long_description,
long_description_content_type=long_description_content_type,
license=meta['license'],
url=meta['url'],
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
"Natural Language :: English",
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
# Could also include keywords, download_url, project_urls, etc.
# Custom commands
cmdclass={
'test': PyTest,
'upload': UploadCommand,
},
)
| 27.845714 | 77 | 0.662836 |
0fa5354f5b578f43b1f6f5a556273bb58722475a | 5,546 | py | Python | testing/cross_language/prf_set_test.py | fax001/tink | 9f30c97cb84b10bbba6978bc9c12c86478024050 | [
"Apache-2.0"
] | 1 | 2022-03-15T03:21:44.000Z | 2022-03-15T03:21:44.000Z | testing/cross_language/prf_set_test.py | fax001/tink | 9f30c97cb84b10bbba6978bc9c12c86478024050 | [
"Apache-2.0"
] | 1 | 2022-03-02T13:25:38.000Z | 2022-03-02T13:25:38.000Z | testing/cross_language/prf_set_test.py | fax001/tink | 9f30c97cb84b10bbba6978bc9c12c86478024050 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross-language tests for the PrfSet primitive."""
from typing import Iterable
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import prf
from tink.testing import keyset_builder
from util import supported_key_types
from util import testing_servers
SUPPORTED_LANGUAGES = testing_servers.SUPPORTED_LANGUAGES_BY_PRIMITIVE['prf']
OUTPUT_LENGTHS = [
1, 2, 5, 10, 16, 17, 20, 32, 33, 48, 64, 65, 100, 256, 512, 1024
]
def all_prf_key_template_names() -> Iterable[str]:
"""Yields all PRF key template names."""
for key_type in supported_key_types.PRF_KEY_TYPES:
for key_template_name in supported_key_types.KEY_TEMPLATE_NAMES[key_type]:
yield key_template_name
def all_prf_key_template_names_with_some_output_length():
"""Yields (prf_key_template_name, output_length) tuples."""
for key_type in supported_key_types.PRF_KEY_TYPES:
for key_template_name in supported_key_types.KEY_TEMPLATE_NAMES[key_type]:
for output_length in OUTPUT_LENGTHS:
yield (key_template_name, output_length)
def gen_keyset(key_template_name: str) -> bytes:
builder = keyset_builder.new_keyset_builder()
primary_key_id = builder.add_new_key(
supported_key_types.KEY_TEMPLATE[key_template_name])
builder.set_primary_key(primary_key_id)
return builder.keyset()
def gen_keyset_with_2_prfs() -> bytes:
builder = keyset_builder.new_keyset_builder()
builder.add_new_key(prf.prf_key_templates.HMAC_SHA256)
primary_key_id = builder.add_new_key(prf.prf_key_templates.HKDF_SHA256)
builder.set_primary_key(primary_key_id)
return builder.keyset()
def setUpModule():
prf.register()
testing_servers.start('prf_set')
def tearDownModule():
testing_servers.stop()
class PrfSetPythonTest(parameterized.TestCase):
@parameterized.parameters(all_prf_key_template_names())
def test_unsupported(self, key_template_name):
supported_langs = supported_key_types.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
self.assertNotEmpty(supported_langs)
keyset = gen_keyset(key_template_name)
unsupported_languages = [
lang for lang in SUPPORTED_LANGUAGES if lang not in supported_langs
]
for lang in unsupported_languages:
p = testing_servers.prf_set(lang, keyset)
with self.assertRaises(
tink.TinkError,
msg='Language %s supports PRF compute with %s unexpectedly' %
(p.lang, key_template_name)):
p.primary().compute(b'input_data', output_length=16)
@parameterized.parameters(all_prf_key_template_names())
def test_supported(self, key_template_name):
supported_langs = supported_key_types.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
self.assertNotEmpty(supported_langs)
keyset = gen_keyset(key_template_name)
input_data = b'This is some input data.'
outputs = []
for lang in supported_langs:
p = testing_servers.prf_set(lang, keyset)
outputs.append(p.primary().compute(input_data, 16))
self.assertLen(outputs, len(supported_langs))
self.assertLen(outputs[0], 16)
self.assertLen(set(outputs), 1)
@parameterized.parameters(
all_prf_key_template_names_with_some_output_length())
def test_compute_consistent_for_output_length(self, key_template_name,
output_length):
supported_langs = supported_key_types.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
# This test checks that for a given output_length, either all
# implementations fail or all produce the same value.
self.assertNotEmpty(supported_langs)
keyset = gen_keyset(key_template_name)
input_data = b'This is some input data.'
errors = {}
outputs = {}
for lang in supported_langs:
try:
p = testing_servers.prf_set(lang, keyset)
outputs[lang] = p.primary().compute(input_data, output_length)
except tink.TinkError as e:
errors[lang] = e
inconsistent_errors = bool(errors) and bool(outputs)
inconsistent_output_values = len(set(outputs.values())) > 1
if inconsistent_errors or inconsistent_output_values:
self.fail('The PRF for template %s and output_length=%d is inconsistent: '
'outputs = %s, errors = %s.' %
(key_template_name, output_length, outputs, errors))
@parameterized.parameters(SUPPORTED_LANGUAGES)
def test_multiple_prfs(self, lang):
keyset = gen_keyset_with_2_prfs()
input_data = b'This is some input data.'
output_length = 15
p = testing_servers.prf_set(lang, keyset)
primary_output = p.primary().compute(input_data, output_length)
primary_id = p.primary_id()
all_outputs = {
key_id: f.compute(input_data, output_length)
for key_id, f in p.all().items()
}
self.assertLen(all_outputs, 2)
self.assertEqual(all_outputs[primary_id], primary_output)
if __name__ == '__main__':
absltest.main()
| 36.248366 | 80 | 0.740534 |
580a560f4d4a760a26e5cd4f85569ee5bc541aa0 | 1,202 | py | Python | tests/repository/elements.py | proteanhq/protean | 2006832265435cad8d4f9b86d1a789d8828d2707 | [
"BSD-3-Clause"
] | 6 | 2018-09-26T04:54:09.000Z | 2022-03-30T01:01:45.000Z | tests/repository/elements.py | proteanhq/protean | 2006832265435cad8d4f9b86d1a789d8828d2707 | [
"BSD-3-Clause"
] | 261 | 2018-09-20T09:53:33.000Z | 2022-03-08T17:43:04.000Z | tests/repository/elements.py | proteanhq/protean | 2006832265435cad8d4f9b86d1a789d8828d2707 | [
"BSD-3-Clause"
] | 6 | 2018-07-22T07:09:15.000Z | 2021-02-02T05:17:23.000Z | import re
from collections import defaultdict
from typing import List
from protean import BaseAggregate, BaseRepository, BaseValueObject
from protean.fields import Integer, String, ValueObject
from protean.globals import current_domain
class Person(BaseAggregate):
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
class PersonRepository(BaseRepository):
def find_adults(self, minimum_age: int = 21) -> List[Person]:
return current_domain.repository_for(Person)._dao.filter(age__gte=minimum_age)
class Meta:
aggregate_cls = Person
class Email(BaseValueObject):
REGEXP = r"\"?([-a-zA-Z0-9.`?{}]+@\w+\.\w+)\"?"
# This is the external facing data attribute
address = String(max_length=254, required=True)
def clean(self):
""" Business rules of Email address """
errors = defaultdict(list)
if not bool(re.match(Email.REGEXP, self.address)):
errors["address"].append("is invalid")
return errors
class User(BaseAggregate):
email = ValueObject(Email, required=True)
password = String(required=True, max_length=255)
| 27.318182 | 86 | 0.705491 |
f78adb9e923820de4494f299b90384f25c8e39e0 | 533 | py | Python | trac/versioncontrol/__init__.py | mikiec84/trac | d51a7119b9fcb9061d7fe135c7d648fa671555dd | [
"BSD-3-Clause"
] | null | null | null | trac/versioncontrol/__init__.py | mikiec84/trac | d51a7119b9fcb9061d7fe135c7d648fa671555dd | [
"BSD-3-Clause"
] | null | null | null | trac/versioncontrol/__init__.py | mikiec84/trac | d51a7119b9fcb9061d7fe135c7d648fa671555dd | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/.
from trac.versioncontrol.api import *
| 35.533333 | 67 | 0.761726 |
ccc4c9577fda3912d40be7b9cc8a1e414aede96a | 2,291 | py | Python | ircrobots/matching/responses.py | examknow/ircrobots | ac4c144d58aaf7b70ad7248a385922fe6528dbcb | [
"MIT"
] | 18 | 2020-04-13T23:32:55.000Z | 2022-02-04T16:11:34.000Z | ircrobots/matching/responses.py | examknow/ircrobots | ac4c144d58aaf7b70ad7248a385922fe6528dbcb | [
"MIT"
] | 6 | 2020-10-04T00:34:04.000Z | 2021-03-26T12:14:21.000Z | ircrobots/matching/responses.py | examknow/ircrobots | ac4c144d58aaf7b70ad7248a385922fe6528dbcb | [
"MIT"
] | 7 | 2020-04-20T01:11:09.000Z | 2022-03-28T22:36:55.000Z | from typing import List, Optional, Sequence, Union
from irctokens import Line
from ..interface import (IServer, IMatchResponse, IMatchResponseParam,
IMatchResponseHostmask)
from .params import *
TYPE_PARAM = Union[str, IMatchResponseParam]
class Responses(IMatchResponse):
def __init__(self,
commands: Sequence[str],
params: Sequence[TYPE_PARAM]=[],
source: Optional[IMatchResponseHostmask]=None):
self._commands = commands
self._source = source
self._params: Sequence[IMatchResponseParam] = []
for param in params:
if isinstance(param, str):
self._params.append(Literal(param))
elif isinstance(param, IMatchResponseParam):
self._params.append(param)
def __repr__(self) -> str:
return f"Responses({self._commands!r}: {self._params!r})"
def match(self, server: IServer, line: Line) -> bool:
for command in self._commands:
if (line.command == command and (
self._source is None or (
line.hostmask is not None and
self._source.match(server, line.hostmask)
))):
for i, param in enumerate(self._params):
if (i >= len(line.params) or
not param.match(server, line.params[i])):
break
else:
return True
else:
return False
class Response(Responses):
def __init__(self,
command: str,
params: Sequence[TYPE_PARAM]=[],
source: Optional[IMatchResponseHostmask]=None):
super().__init__([command], params, source=source)
def __repr__(self) -> str:
return f"Response({self._commands[0]}: {self._params!r})"
class ResponseOr(IMatchResponse):
def __init__(self, *responses: IMatchResponse):
self._responses = responses
def __repr__(self) -> str:
return f"ResponseOr({self._responses!r})"
def match(self, server: IServer, line: Line) -> bool:
for response in self._responses:
if response.match(server, line):
return True
else:
return False
| 35.796875 | 70 | 0.577041 |
f6ad6c574b8c08cbfb3f31cfaad8166ab3cce2ce | 1,342 | py | Python | tools_cmd/cmd_blog.py | Landers1037/tools | 864a5889af746e4114a4f6708858e7321e675a3c | [
"MIT"
] | null | null | null | tools_cmd/cmd_blog.py | Landers1037/tools | 864a5889af746e4114a4f6708858e7321e675a3c | [
"MIT"
] | null | null | null | tools_cmd/cmd_blog.py | Landers1037/tools | 864a5889af746e4114a4f6708858e7321e675a3c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Time: 2020-08-15 15:32
# Author: Landers1037
# Mail: liaorenj@gmail.com
# File: blog.py
import click,os
from utils.db_trans import *
from utils.to_json import newjson
@click.command()
@click.option("-r","--remove",is_flag=True,default=False,help="删除数据库")
@click.option("-n","--new",is_flag=True,default=False,help="新建数据库")
@click.option("-j","--json",default=None,help="json文件")
@click.option("-t","--test",is_flag=True,default=False,help="文章测试")
def blog(remove,new,json,test):
"""
生成/删除数据库文件
"""
if remove:
opt = os.path.join(os.path.expanduser("~"), 'Desktop', 'blog_opt')
flag = os.path.exists(os.path.join(opt, "app.db"))
if flag:
click.secho("db file exists.It will be removed!", fg="yellow")
os.remove(os.path.join(opt, "app.db"))
else:
click.secho("db file not exists!", fg="yellow")
click.secho("done!", fg="green")
elif new:
newDB()
click.secho("done!",fg="green")
elif json:
try:
newjson(json)
click.secho("done! blog.json generated", fg="yellow")
except:
click.secho("error!", fg="red")
elif test:
blog_test()
else:
click.secho("please type args!", fg="green")
| 29.173913 | 75 | 0.564083 |
134774fce1bd135409487fa79f95796ef93b64ba | 10,034 | py | Python | docs/src/conf.py | buruzaemon/natto-py | 8f3b8863ca48eb10d9bec74af81408bcaebab7ce | [
"BSD-2-Clause"
] | 83 | 2015-02-24T03:55:14.000Z | 2022-03-22T11:01:03.000Z | docs/src/conf.py | buruzaemon/natto-py | 8f3b8863ca48eb10d9bec74af81408bcaebab7ce | [
"BSD-2-Clause"
] | 60 | 2015-02-10T13:50:34.000Z | 2021-10-04T01:09:35.000Z | docs/src/conf.py | buruzaemon/natto-py | 8f3b8863ca48eb10d9bec74af81408bcaebab7ce | [
"BSD-2-Clause"
] | 16 | 2015-02-24T03:55:25.000Z | 2021-05-12T16:56:12.000Z | # -*- coding: utf-8 -*-
#
# natto-py documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 1 08:25:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# Steps to generate documentation
#
# 0. mkdir -p docs/src
# cd docs/src
#
# 1. sphinx-quickstart
# ... follow wizard set project name, copyright, author, autodoc
#
# 2. set sys.path in generated conf.py
# edit the generated index.rst
#
# 3. sphinx-autobuild . _build_html
# ... kick up the autobuild process
#
# 4. in another window, generate the API documentiona
# sphinx-apidoc -o source ../../natto
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'natto-py'
copyright = u'2021, Brooke M. Fujita'
author = u'Brooke M. Fujita'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'natto-py v0.8.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'natto-pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'natto-py.tex', u'natto-py Documentation',
u'Brooke M. Fujita', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'natto-py', u'natto-py Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'natto-py', u'natto-py Documentation',
author, 'natto-py', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 28.505682 | 80 | 0.70012 |
45c4fb5dfc45dd84a006d5497df177361ac1aa69 | 4,458 | py | Python | tests/inference/qhbm_utils_test.py | ecsbeats/qhbm-library | 9d938a915d54fdbb4839ce346110884e0c53feb8 | [
"Apache-2.0"
] | 19 | 2022-02-17T19:32:36.000Z | 2022-03-31T01:07:19.000Z | tests/inference/qhbm_utils_test.py | ecsbeats/qhbm-library | 9d938a915d54fdbb4839ce346110884e0c53feb8 | [
"Apache-2.0"
] | 37 | 2022-02-17T17:26:01.000Z | 2022-03-31T21:32:16.000Z | tests/inference/qhbm_utils_test.py | ecsbeats/qhbm-library | 9d938a915d54fdbb4839ce346110884e0c53feb8 | [
"Apache-2.0"
] | 8 | 2022-02-17T17:26:32.000Z | 2022-03-18T12:11:33.000Z | # Copyright 2021 The QHBM Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for qhbmlib.inference.qhbm_utils"""
import cirq
import tensorflow as tf
from qhbmlib import inference
from qhbmlib import models
from tests import test_util
class DensityMatrixTest(tf.test.TestCase):
"""Tests the density_matrix function."""
@test_util.eager_mode_toggle
def test_density_matrix(self):
"""Confirms the density matrix represented by the QHBM is correct."""
# Check density matrix of Bell state.
num_bits = 2
actual_energy = models.BernoulliEnergy(list(range(num_bits)))
actual_energy.build([None, num_bits])
actual_energy.set_weights([tf.constant([-10.0, -10.0])]) # pin at |00>
qubits = cirq.GridQubit.rect(1, num_bits)
test_u = cirq.Circuit([cirq.H(qubits[0]), cirq.CNOT(qubits[0], qubits[1])])
actual_circuit = models.DirectQuantumCircuit(test_u)
actual_circuit.build([])
model = models.Hamiltonian(actual_energy, actual_circuit)
expected_dm = tf.constant(
[[0.5, 0, 0, 0.5], [0, 0, 0, 0], [0, 0, 0, 0], [0.5, 0, 0, 0.5]],
tf.complex64,
)
density_matrix_wrapper = tf.function(inference.density_matrix)
actual_dm = density_matrix_wrapper(model)
self.assertAllClose(actual_dm, expected_dm)
class FidelityTest(tf.test.TestCase):
"""Tests the fidelity function."""
def setUp(self):
"""Initializes test objects."""
super().setUp()
self.close_rtol = 1e-4
@test_util.eager_mode_toggle
def test_fidelity_self(self):
"""Confirms the fidelity of a model with itself is 1."""
num_bits = 4
num_samples = 1 # required but not used
qubits = cirq.GridQubit.rect(1, num_bits)
num_layers = 3
model, _ = test_util.get_random_hamiltonian_and_inference(
qubits, num_layers, "test_fidelity", num_samples)
density_matrix_wrapper = tf.function(inference.density_matrix)
model_dm = density_matrix_wrapper(model)
fidelity_wrapper = tf.function(inference.fidelity)
actual_fidelity = fidelity_wrapper(model, model_dm)
expected_fidelity = 1.0
self.assertAllClose(
actual_fidelity, expected_fidelity, rtol=self.close_rtol)
def test_fidelity_random(self):
"""Confirms correct fidelity against slower direct formula."""
def direct_fidelity(rho, sigma):
"""Direct matrix to matrix fidelity function."""
sqrt_rho = tf.linalg.sqrtm(rho)
intermediate = tf.linalg.sqrtm(sqrt_rho @ sigma @ sqrt_rho)
return tf.linalg.trace(intermediate)**2
num_rerolls = 5
for _ in range(num_rerolls):
num_qubits = 4
sigma, _ = test_util.random_mixed_density_matrix(num_qubits,
2**num_qubits)
sigma_complex64 = tf.cast(sigma, tf.complex64)
sigma_complex128 = tf.cast(sigma, tf.complex128)
qubits = cirq.GridQubit.rect(num_qubits, 1)
num_layers = 3
identifier = "fidelity_test"
num_samples = 1 # required but unused
h, _ = test_util.get_random_hamiltonian_and_inference(
qubits, num_layers, identifier, num_samples)
h_dm = inference.density_matrix(h)
expected_fidelity = direct_fidelity(h_dm, sigma_complex64)
fidelity_wrapper = tf.function(inference.fidelity)
# Uses sigma of dtype complex128 to test typecasting of fidelity function
actual_fidelity = fidelity_wrapper(h, sigma_complex128)
self.assertAllClose(
actual_fidelity, expected_fidelity, rtol=self.close_rtol)
# Uses sigma of dtype complex64 to test default type of fidelity function
actual_fidelity = fidelity_wrapper(h, sigma_complex64)
self.assertAllClose(
actual_fidelity, expected_fidelity, rtol=self.close_rtol)
if __name__ == "__main__":
print("Running qhbm_utils_test.py ...")
tf.test.main()
| 36.540984 | 80 | 0.695379 |
6be3313a2ea31f912e49f763ab5d83b41786112c | 177 | py | Python | django_restframework_2fa/apps.py | jeetpatel9/django-restframework-2fa | 8544a2b553f6e61040541ccaa77fc15076c08345 | [
"MIT"
] | null | null | null | django_restframework_2fa/apps.py | jeetpatel9/django-restframework-2fa | 8544a2b553f6e61040541ccaa77fc15076c08345 | [
"MIT"
] | 3 | 2021-06-09T14:41:08.000Z | 2021-06-09T14:42:18.000Z | django_restframework_2fa/apps.py | jeetpatel9/django-restframework-2fa | 8544a2b553f6e61040541ccaa77fc15076c08345 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class DjangoRestframework2FaConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'django_restframework_2fa' | 29.5 | 56 | 0.80791 |
f59312fbaa0bc928f91cffc720570605161d86be | 2,835 | py | Python | demo/tasks.py | honorabel/python-fretboard | fb6a37e29bf6a19bee11940e9b42d58220c1306c | [
"MIT"
] | 1 | 2020-11-02T16:30:38.000Z | 2020-11-02T16:30:38.000Z | demo/tasks.py | honorabel/python-fretboard | fb6a37e29bf6a19bee11940e9b42d58220c1306c | [
"MIT"
] | null | null | null | demo/tasks.py | honorabel/python-fretboard | fb6a37e29bf6a19bee11940e9b42d58220c1306c | [
"MIT"
] | null | null | null | import os
import sys
import invoke
import livereload
sys.path.append(os.path.abspath('..'))
import fretboard
server = livereload.Server()
@invoke.task
def clean(ctx):
os.system('rm -rf ./svg/*.svg')
@invoke.task
def build(ctx):
# Chord (D)
chord = fretboard.Chord(positions='xx0232', fingers='---132')
chord.save('svg/D.svg')
# Barre chord (F#)
chord = fretboard.Chord(positions='133211', fingers='134211')
chord.save('svg/F-sharp.svg')
# C shape, higher up the neck
chord = fretboard.Chord(positions='x-15-14-11-12-11', fingers='-43121')
chord.save('svg/C-shape.svg')
# Ukulele chord (G)
chord = fretboard.UkuleleChord(positions='x232', fingers='-132')
chord.save('svg/ukulele-G.svg')
# Bass chord (E)
chord = fretboard.BassChord(positions='x221', fingers='-321')
chord.save('svg/bass-E.svg')
# Fretboard w/ Rocksmith-style string colors (F#)
fb = fretboard.Fretboard(style={
'drawing': {'background_color': 'black'},
'fret': {'color': 'darkslategray'},
'nut': {'color': 'darkslategray'},
'marker': {'color': 'darkslategray', 'border_color': 'slategray'},
'string': {'color': 'darkslategray'},
})
fb.add_marker(string=(0, 5), fret=1, label='1')
fb.add_marker(string=1, fret=3, label='3')
fb.add_marker(string=2, fret=3, label='4')
fb.add_marker(string=3, fret=2, label='2')
fb.strings[0].color = 'red'
fb.strings[1].color = 'gold'
fb.strings[2].color = 'deepskyblue'
fb.strings[3].color = 'orange'
fb.strings[4].color = 'limegreen'
fb.strings[5].color = 'magenta'
fb.save('svg/F-sharp-rocksmith.svg')
# Pentatonic scale shape w/ highlighted root notes
fb = fretboard.Fretboard(frets=(5, 8), style={'marker': {'color': 'cornflowerblue'}})
fb.add_marker(string=0, fret=5, label='A', color='salmon')
fb.add_marker(string=1, fret=5, label='D')
fb.add_marker(string=2, fret=5, label='G')
fb.add_marker(string=3, fret=5, label='C')
fb.add_marker(string=4, fret=5, label='E')
fb.add_marker(string=5, fret=5, label='A', color='salmon')
fb.add_marker(string=0, fret=8, label='C')
fb.add_marker(string=1, fret=7, label='E')
fb.add_marker(string=2, fret=7, label='A', color='salmon')
fb.add_marker(string=3, fret=7, label='D')
fb.add_marker(string=4, fret=8, label='G')
fb.add_marker(string=5, fret=8, label='C')
fb.save('svg/pentatonic-shape.svg')
@invoke.task(pre=[clean, build])
def serve(ctx):
server.watch(__file__, lambda: os.system('invoke build'))
server.watch('index.html', lambda: os.system('invoke build'))
server.watch('../fretboard/', lambda: os.system('invoke build'))
server.serve(
root='.',
host='localhost',
liveport=35729,
port=8080
)
| 30.483871 | 89 | 0.628571 |
0971055e828797d96000daa19b4c28cfbb2f6b83 | 14,640 | py | Python | data_utils.py | SauravMaheshkar/IGMC | b6f7ec543c3f713373b27f9cf486aa1456384fd8 | [
"MIT"
] | null | null | null | data_utils.py | SauravMaheshkar/IGMC | b6f7ec543c3f713373b27f9cf486aa1456384fd8 | [
"MIT"
] | 4 | 2022-01-06T19:49:47.000Z | 2022-01-07T10:21:41.000Z | data_utils.py | SauravMaheshkar/IGMC | b6f7ec543c3f713373b27f9cf486aa1456384fd8 | [
"MIT"
] | null | null | null | from __future__ import division, print_function
import os.path
import random
# For automatic dataset downloading
from urllib.request import urlopen
from zipfile import ZipFile
import numpy as np
import pandas as pd
import scipy.sparse as sp
from tqdm import tqdm
try:
from BytesIO import BytesIO
except ImportError:
from io import BytesIO
def data_iterator(data, batch_size):
"""
A simple data iterator from https://indico.io/blog/tensorflow-data-inputs-part1-placeholders-protobufs-queues/
:param data: list of numpy tensors that need to be randomly batched across their first dimension.
:param batch_size: int, batch_size of data_iterator.
Assumes same first dimension size of all numpy tensors.
:return: iterator over batches of numpy tensors
"""
# shuffle labels and features
max_idx = len(data[0])
idxs = np.arange(0, max_idx)
np.random.shuffle(idxs)
shuf_data = [dat[idxs] for dat in data]
# Does not yield last remainder of size less than batch_size
for i in range(max_idx // batch_size):
data_batch = [dat[i * batch_size : (i + 1) * batch_size] for dat in shuf_data]
yield data_batch
def map_data(data):
"""
Map data to proper indices in case they are not in a continues [0, N) range
Parameters
----------
data : np.int32 arrays
Returns
-------
mapped_data : np.int32 arrays
n : length of mapped_data
"""
uniq = list(set(data))
id_dict = {old: new for new, old in enumerate(sorted(uniq))}
data = np.array([id_dict[x] for x in data])
n = len(uniq)
return data, id_dict, n
def download_dataset(dataset, files, data_dir):
"""Downloads dataset if files are not present."""
if not np.all([os.path.isfile(data_dir + f) for f in files]):
url = (
"http://files.grouplens.org/datasets/movielens/"
+ dataset.replace("_", "-")
+ ".zip"
)
request = urlopen(url)
print("Downloading %s dataset" % dataset)
if dataset in ["ml_100k", "ml_1m"]:
target_dir = "raw_data/" + dataset.replace("_", "-")
elif dataset == "ml_10m":
target_dir = "raw_data/" + "ml-10M100K"
else:
raise ValueError("Invalid dataset option %s" % dataset)
with ZipFile(BytesIO(request.read())) as zip_ref:
zip_ref.extractall("raw_data/")
os.rename(target_dir, data_dir)
# shutil.rmtree(target_dir)
def load_data(fname, seed=1234, verbose=True):
"""Loads dataset and creates adjacency matrix
and feature matrix
Parameters
----------
fname : str, dataset
seed: int, dataset shuffling seed
verbose: to print out statements or not
Returns
-------
num_users : int
Number of users and items respectively
num_items : int
u_nodes : np.int32 arrays
User indices
v_nodes : np.int32 array
item (movie) indices
ratings : np.float32 array
User/item ratings s.t. ratings[k] is the rating given by user u_nodes[k] to
item v_nodes[k]. Note that that the all pairs u_nodes[k]/v_nodes[k] are unique, but
not necessarily all u_nodes[k] or all v_nodes[k] separately.
u_features: np.float32 array, or None
If present in dataset, contains the features of the users.
v_features: np.float32 array, or None
If present in dataset, contains the features of the users.
seed: int,
For datashuffling seed with pythons own random.shuffle, as in CF-NADE.
"""
u_features = None
v_features = None
print("Loading dataset", fname)
data_dir = "raw_data/" + fname
if fname == "ml_100k":
# Check if files exist and download otherwise
files = ["/u.data", "/u.item", "/u.user"]
download_dataset(fname, files, data_dir)
sep = "\t"
filename = data_dir + files[0]
dtypes = {
"u_nodes": np.int32,
"v_nodes": np.int32,
"ratings": np.float32,
"timestamp": np.float64,
}
data = pd.read_csv(
filename,
sep=sep,
header=None,
names=["u_nodes", "v_nodes", "ratings", "timestamp"],
dtype=dtypes,
)
# shuffle here like cf-nade paper with python's own random class
# make sure to convert to list, otherwise random.shuffle acts weird on it without a warning
data_array = data.values.tolist()
random.seed(seed)
random.shuffle(data_array)
data_array = np.array(data_array)
u_nodes_ratings = data_array[:, 0].astype(dtypes["u_nodes"])
v_nodes_ratings = data_array[:, 1].astype(dtypes["v_nodes"])
ratings = data_array[:, 2].astype(dtypes["ratings"])
u_nodes_ratings, u_dict, num_users = map_data(u_nodes_ratings)
v_nodes_ratings, v_dict, num_items = map_data(v_nodes_ratings)
u_nodes_ratings, v_nodes_ratings = u_nodes_ratings.astype(
np.int64
), v_nodes_ratings.astype(np.int32)
ratings = ratings.astype(np.float64)
# Movie features (genres)
sep = r"|"
movie_file = data_dir + files[1]
movie_headers = [
"movie id",
"movie title",
"release date",
"video release date",
"IMDb URL",
"unknown",
"Action",
"Adventure",
"Animation",
"Childrens",
"Comedy",
"Crime",
"Documentary",
"Drama",
"Fantasy",
"Film-Noir",
"Horror",
"Musical",
"Mystery",
"Romance",
"Sci-Fi",
"Thriller",
"War",
"Western",
]
movie_df = pd.read_csv(
movie_file, sep=sep, header=None, names=movie_headers, engine="python"
)
genre_headers = movie_df.columns.values[6:]
num_genres = genre_headers.shape[0]
v_features = np.zeros((num_items, num_genres), dtype=np.float32)
for movie_id, g_vec in zip(
movie_df["movie id"].values.tolist(),
movie_df[genre_headers].values.tolist(),
):
# Check if movie_id was listed in ratings file and therefore in mapping dictionary
if movie_id in v_dict.keys():
v_features[v_dict[movie_id], :] = g_vec
# User features
sep = r"|"
users_file = data_dir + files[2]
users_headers = ["user id", "age", "gender", "occupation", "zip code"]
users_df = pd.read_csv(
users_file, sep=sep, header=None, names=users_headers, engine="python"
)
occupation = set(users_df["occupation"].values.tolist())
gender_dict = {"M": 0.0, "F": 1.0}
occupation_dict = {f: i for i, f in enumerate(occupation, start=2)}
num_feats = 2 + len(occupation_dict)
u_features = np.zeros((num_users, num_feats), dtype=np.float32)
for _, row in users_df.iterrows():
u_id = row["user id"]
if u_id in u_dict.keys():
# age
u_features[u_dict[u_id], 0] = row["age"]
# gender
u_features[u_dict[u_id], 1] = gender_dict[row["gender"]]
# occupation
u_features[u_dict[u_id], occupation_dict[row["occupation"]]] = 1.0
u_features = sp.csr_matrix(u_features)
v_features = sp.csr_matrix(v_features)
elif fname == "ml_1m":
# Check if files exist and download otherwise
files = ["/ratings.dat", "/movies.dat", "/users.dat"]
download_dataset(fname, files, data_dir)
sep = r"\:\:"
filename = data_dir + files[0]
dtypes = {
"u_nodes": np.int64,
"v_nodes": np.int64,
"ratings": np.float32,
"timestamp": np.float64,
}
# use engine='python' to ignore warning about switching to python backend when using regexp for sep
data = pd.read_csv(
filename,
sep=sep,
header=None,
names=["u_nodes", "v_nodes", "ratings", "timestamp"],
converters=dtypes,
engine="python",
)
# shuffle here like cf-nade paper with python's own random class
# make sure to convert to list, otherwise random.shuffle acts weird on it without a warning
data_array = data.values.tolist()
random.seed(seed)
random.shuffle(data_array)
data_array = np.array(data_array)
u_nodes_ratings = data_array[:, 0].astype(dtypes["u_nodes"])
v_nodes_ratings = data_array[:, 1].astype(dtypes["v_nodes"])
ratings = data_array[:, 2].astype(dtypes["ratings"])
u_nodes_ratings, u_dict, num_users = map_data(u_nodes_ratings)
v_nodes_ratings, v_dict, num_items = map_data(v_nodes_ratings)
u_nodes_ratings, v_nodes_ratings = u_nodes_ratings.astype(
np.int64
), v_nodes_ratings.astype(np.int64)
ratings = ratings.astype(np.float32)
# Load movie features
movies_file = data_dir + files[1]
movies_headers = ["movie_id", "title", "genre"]
movies_df = pd.read_csv(
movies_file, sep=sep, header=None, names=movies_headers, engine="python"
)
# Extracting all genres
genres = []
for s in movies_df["genre"].values:
genres.extend(s.split("|"))
genres = list(set(genres))
num_genres = len(genres)
genres_dict = {g: idx for idx, g in enumerate(genres)}
# Creating 0 or 1 valued features for all genres
v_features = np.zeros((num_items, num_genres), dtype=np.float32)
for movie_id, s in zip(
movies_df["movie_id"].values.tolist(), movies_df["genre"].values.tolist()
):
# Check if movie_id was listed in ratings file and therefore in mapping dictionary
if movie_id in v_dict.keys():
gen = s.split("|")
for g in gen:
v_features[v_dict[movie_id], genres_dict[g]] = 1.0
# Load user features
users_file = data_dir + files[2]
users_headers = ["user_id", "gender", "age", "occupation", "zip-code"]
users_df = pd.read_csv(
users_file, sep=sep, header=None, names=users_headers, engine="python"
)
# Extracting all features
cols = users_df.columns.values[1:]
cntr = 0
feat_dicts = []
for header in cols:
d = dict()
feats = np.unique(users_df[header].values).tolist()
d.update({f: i for i, f in enumerate(feats, start=cntr)})
feat_dicts.append(d)
cntr += len(d)
num_feats = sum(len(d) for d in feat_dicts)
u_features = np.zeros((num_users, num_feats), dtype=np.float32)
for _, row in users_df.iterrows():
u_id = row["user_id"]
if u_id in u_dict.keys():
for k, header in enumerate(cols):
u_features[u_dict[u_id], feat_dicts[k][row[header]]] = 1.0
u_features = sp.csr_matrix(u_features)
v_features = sp.csr_matrix(v_features)
elif fname == "ml_10m":
# Check if files exist and download otherwise
files = ["/ratings.dat"]
download_dataset(fname, files, data_dir)
sep = r"\:\:"
filename = data_dir + files[0]
dtypes = {
"u_nodes": np.int64,
"v_nodes": np.int64,
"ratings": np.float32,
"timestamp": np.float64,
}
# use engine='python' to ignore warning about switching to python backend when using regexp for sep
data = pd.read_csv(
filename,
sep=sep,
header=None,
names=["u_nodes", "v_nodes", "ratings", "timestamp"],
converters=dtypes,
engine="python",
)
# shuffle here like cf-nade paper with python's own random class
# make sure to convert to list, otherwise random.shuffle acts weird on it without a warning
data_array = data.values.tolist()
random.seed(seed)
random.shuffle(data_array)
data_array = np.array(data_array)
u_nodes_ratings = data_array[:, 0].astype(dtypes["u_nodes"])
v_nodes_ratings = data_array[:, 1].astype(dtypes["v_nodes"])
ratings = data_array[:, 2].astype(dtypes["ratings"])
u_nodes_ratings, u_dict, num_users = map_data(u_nodes_ratings)
v_nodes_ratings, v_dict, num_items = map_data(v_nodes_ratings)
u_nodes_ratings, v_nodes_ratings = u_nodes_ratings.astype(
np.int64
), v_nodes_ratings.astype(np.int64)
ratings = ratings.astype(np.float32)
elif fname == "ml_25m":
# Please download the processed movielens25M.csv to raw_data/ml_25m/
# Each row is uid,iid,cid,time,rating, sorted by time
files = ["/movielens25M.csv"]
row_count = 24999850
filename = data_dir + files[0]
chunksize = 10000
data = pd.DataFrame()
pbar = tqdm(
pd.read_csv(
filename,
header=0,
usecols=["uid", "iid", "rating"],
chunksize=chunksize,
),
total=row_count // chunksize,
)
for chunk in pbar:
data = pd.concat([data, chunk], ignore_index=True)
data_array = data.values
u_nodes_ratings = data_array[:, 0]
v_nodes_ratings = data_array[:, 1]
ratings = data_array[:, 2]
u_nodes_ratings, u_dict, num_users = map_data(u_nodes_ratings)
v_nodes_ratings, v_dict, num_items = map_data(v_nodes_ratings)
u_nodes_ratings, v_nodes_ratings = u_nodes_ratings.astype(
np.int64
), v_nodes_ratings.astype(np.int64)
ratings = ratings.astype(np.float32)
else:
raise ValueError("Dataset name not recognized: " + fname)
if verbose:
print("Number of users = %d" % num_users)
print("Number of items = %d" % num_items)
print("Number of links = %d" % ratings.shape[0])
print(
"Fraction of positive links = %.4f"
% (float(ratings.shape[0]) / (num_users * num_items),)
)
return (
num_users,
num_items,
u_nodes_ratings,
v_nodes_ratings,
ratings,
u_features,
v_features,
)
| 31.483871 | 114 | 0.582309 |
c4d3910bbcfac2e0d5a0bad43d8f250a9115aa84 | 11,719 | py | Python | QUANTAXIS/__init__.py | yehonghao/QUANTAXIS | d977810233607e2bea016450bef6bb080582b498 | [
"MIT"
] | 1 | 2021-02-19T07:49:31.000Z | 2021-02-19T07:49:31.000Z | QUANTAXIS/__init__.py | yehonghao/QUANTAXIS | d977810233607e2bea016450bef6bb080582b498 | [
"MIT"
] | null | null | null | QUANTAXIS/__init__.py | yehonghao/QUANTAXIS | d977810233607e2bea016450bef6bb080582b498 | [
"MIT"
] | 1 | 2021-02-19T07:49:38.000Z | 2021-02-19T07:49:38.000Z | #coding :utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
QUANTAXIS
Quantitative Financial Strategy Framework
by yutiansut
2017/4/8
"""
__version__ = '1.5.16'
__author__ = 'yutiansut'
import argparse
# check
import sys
# CMD and Cli
import QUANTAXIS.QACmd
from QUANTAXIS.QAAnalysis import *
from QUANTAXIS.QAApplication.QAAnalysis import QA_backtest_analysis_backtest
# Backtest
from QUANTAXIS.QAApplication.QABacktest import QA_Backtest
from QUANTAXIS.QAApplication.QAResult import backtest_result_analyzer
from QUANTAXIS.QAARP.QAAccount import QA_Account
from QUANTAXIS.QAARP.QAPortfolio import QA_Portfolio, QA_PortfolioView
from QUANTAXIS.QAARP.QARisk import QA_Performance, QA_Risk
from QUANTAXIS.QAARP.QAStrategy import QA_Strategy
from QUANTAXIS.QAARP.QAUser import QA_User
from QUANTAXIS.QACmd import QA_cmd
# Data
from QUANTAXIS.QAData import (
QA_data_calc_marketvalue, QA_data_ctptick_resample, QA_data_day_resample,
QA_data_futuremin_resample, QA_data_futuremin_resample_series,
QA_data_futuremin_resample_tb_kq, QA_data_futuremin_resample_tb_kq2,
QA_data_marketvalue, QA_data_min_resample, QA_data_stock_to_fq,
QA_data_tick_resample, QA_data_tick_resample_1min, QA_DataStruct_Day,
QA_DataStruct_Financial, QA_DataStruct_Future_day,
QA_DataStruct_Future_min, QA_DataStruct_Index_day, QA_DataStruct_Index_min,
QA_DataStruct_Indicators, QA_DataStruct_Min, QA_DataStruct_Series,
QA_DataStruct_Stock_block, QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min, QA_DataStruct_Stock_realtime,
QA_DataStruct_Stock_transaction, QDS_IndexDayWarpper, QDS_IndexMinWarpper,
QDS_StockDayWarpper, QDS_StockMinWarpper, from_tushare)
from QUANTAXIS.QAData.dsmethods import *
# ENGINE
from QUANTAXIS.QAEngine import (
QA_AsyncExec, QA_AsyncQueue, QA_AsyncScheduler, QA_AsyncTask,
QA_AsyncThread, QA_Engine, QA_Event, QA_Task, QA_Thread, QA_Worker)
from QUANTAXIS.QAFetch import (
QA_fetch_get_bond_list, QA_fetch_get_chibor, QA_fetch_get_exchangerate_day,
QA_fetch_get_exchangerate_list, QA_fetch_get_exchangerate_min,
QA_fetch_get_future_day, QA_fetch_get_future_list, QA_fetch_get_future_min,
QA_fetch_get_future_realtime, QA_fetch_get_future_transaction,
QA_fetch_get_future_transaction_realtime, QA_fetch_get_globalfuture_day,
QA_fetch_get_globalfuture_list, QA_fetch_get_globalfuture_min,
QA_fetch_get_globalindex_day, QA_fetch_get_globalindex_list,
QA_fetch_get_globalindex_min, QA_fetch_get_hkfund_day,
QA_fetch_get_hkfund_list, QA_fetch_get_hkfund_min,
QA_fetch_get_hkindex_day, QA_fetch_get_hkindex_list,
QA_fetch_get_hkindex_min, QA_fetch_get_hkstock_day,
QA_fetch_get_hkstock_list, QA_fetch_get_hkstock_min,
QA_fetch_get_index_day, QA_fetch_get_index_list, QA_fetch_get_index_min,
QA_fetch_get_macroindex_day, QA_fetch_get_macroindex_list,
QA_fetch_get_macroindex_min, QA_fetch_get_option_day,
QA_fetch_get_option_list, QA_fetch_get_option_min,
QA_fetch_get_security_bars, QA_fetch_get_stock_block,
QA_fetch_get_stock_day, QA_fetch_get_stock_indicator,
QA_fetch_get_stock_info, QA_fetch_get_stock_list, QA_fetch_get_stock_min,
QA_fetch_get_stock_realtime, QA_fetch_get_stock_transaction,
QA_fetch_get_stock_transaction_realtime, QA_fetch_get_stock_xdxr,
QA_fetch_get_trade_date, QA_fetch_get_usstock_day,
QA_fetch_get_usstock_list, QA_fetch_get_usstock_min, get_stock_market)
# fetch methods
from QUANTAXIS.QAFetch.Fetcher import QA_quotation
from QUANTAXIS.QAFetch.QACrawler import (QA_fetch_get_sh_margin,
QA_fetch_get_sz_margin)
from QUANTAXIS.QAFetch.QAQuery import (
QA_fetch_account, QA_fetch_backtest_history, QA_fetch_backtest_info,
QA_fetch_ctp_tick, QA_fetch_etf_list, QA_fetch_financial_report,
QA_fetch_future_day, QA_fetch_future_list, QA_fetch_future_min,
QA_fetch_future_tick, QA_fetch_index_day, QA_fetch_index_list,
QA_fetch_index_min, QA_fetch_quotation, QA_fetch_quotations,
QA_fetch_stock_block, QA_fetch_stock_day, QA_fetch_stock_full,
QA_fetch_stock_info, QA_fetch_stock_list, QA_fetch_stock_min,
QA_fetch_stock_name, QA_fetch_stock_xdxr, QA_fetch_trade_date)
from QUANTAXIS.QAFetch.QAQuery_Advance import *
from QUANTAXIS.QAIndicator import *
# market
from QUANTAXIS.QAMarket import (QA_BacktestBroker, QA_Broker, QA_Dealer,
QA_Market, QA_Order, QA_OrderHandler,
QA_OrderQueue, QA_Position, QA_RandomBroker,
QA_RealBroker, QA_SimulatedBroker,
QA_TTSBroker)
from QUANTAXIS.QASetting.QALocalize import (cache_path, download_path,
log_path, qa_path, setting_path)
# save
from QUANTAXIS.QASU.main import (QA_SU_save_etf_day, QA_SU_save_etf_min,
QA_SU_save_financialfiles,
QA_SU_save_future_list, QA_SU_save_index_day,
QA_SU_save_index_list, QA_SU_save_index_min,
QA_SU_save_stock_block, QA_SU_save_stock_day,
QA_SU_save_stock_info,
QA_SU_save_stock_info_tushare,
QA_SU_save_stock_list, QA_SU_save_stock_min,
QA_SU_save_stock_min_5, QA_SU_save_stock_xdxr)
from QUANTAXIS.QASU.save_strategy import QA_SU_save_strategy
from QUANTAXIS.QASU.user import QA_user_sign_in, QA_user_sign_up
from QUANTAXIS.QAUtil import ( # QAPARAMETER
AMOUNT_MODEL, BROKER_EVENT, BROKER_TYPE, DATABASE, DATASOURCE,
ENGINE_EVENT, EXCHANGE_ID, FREQUENCE, MARKET_ERROR, MARKET_EVENT,
MARKET_TYPE, ORDER_DIRECTION, ORDER_EVENT, ORDER_MODEL, ORDER_STATUS,
OUTPUT_FORMAT, RUNNING_ENVIRONMENT, RUNNING_STATUS, TRADE_STATUS,
QA_Setting, QA_util_calc_time, QA_util_cfg_initial, QA_util_code_tolist,
QA_util_code_tostr, QA_util_date_gap, QA_util_date_int2str,
QA_util_date_stamp, QA_util_date_str2int, QA_util_date_today,
QA_util_date_valid, QA_util_dict_remove_key, QA_util_diff_list,
QA_util_file_md5, QA_util_format_date2str, QA_util_get_cfg,
QA_util_get_date_index, QA_util_get_index_date, QA_util_get_last_datetime,
QA_util_get_last_day, QA_util_get_next_datetime, QA_util_get_next_day,
QA_util_get_next_trade_date, QA_util_get_order_datetime,
QA_util_get_pre_trade_date, QA_util_get_real_date,
QA_util_get_real_datelist, QA_util_get_trade_datetime,
QA_util_get_trade_gap, QA_util_get_trade_range, QA_util_id2date,
QA_util_if_trade, QA_util_if_tradetime, QA_util_is_trade,
QA_util_log_debug, QA_util_log_expection, QA_util_log_info,
QA_util_make_hour_index, QA_util_make_min_index, QA_util_mongo_infos,
QA_util_mongo_initial, QA_util_mongo_status, QA_util_ms_stamp,
QA_util_multi_demension_list, QA_util_random_with_topic, QA_util_realtime,
QA_util_save_csv, QA_util_select_hours, QA_util_select_min,
QA_util_send_mail, QA_util_sql_async_mongo_setting,
QA_util_sql_mongo_setting, QA_util_sql_mongo_sort_ASCENDING,
QA_util_sql_mongo_sort_DESCENDING, QA_util_tdxtimestamp,
QA_util_time_delay, QA_util_time_gap, QA_util_time_now, QA_util_time_stamp,
QA_util_to_datetime, QA_util_to_json_from_pandas,
QA_util_to_list_from_numpy, QA_util_to_list_from_pandas,
QA_util_to_pandas_from_json, QA_util_to_pandas_from_list, QA_util_web_ping,
QATZInfo_CN, future_ip_list, info_ip_list, stock_ip_list, trade_date_sse)
# from QUANTAXIS.QASU.save_backtest import (
# QA_SU_save_account_message, QA_SU_save_backtest_message, QA_SU_save_account_to_csv)
# event driver
# Account,Risk,Portfolio,User,Strategy
# Setting
# Util
#from QUANTAXIS.QAFetch.QATdx_adv import bat
if sys.version_info.major != 3 or sys.version_info.minor not in [4, 5, 6, 7, 8]:
print('wrong version, should be 3.4/3.5/3.6/3.7/3.8 version')
sys.exit()
#QA_util_log_info('Welcome to QUANTAXIS, the Version is {}'.format(__version__))
def __repr__():
return ' \n \
```````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n \
``########`````##````````##``````````##`````````####````````##```##########````````#``````##``````###```##`````######`` \n \
`##``````## ```##````````##`````````####````````##`##```````##```````##```````````###``````##````##`````##```##`````##` \n \
##````````##```##````````##````````##`##````````##``##``````##```````##``````````####```````#```##``````##```##``````## \n \
##````````##```##````````##```````##```##```````##```##`````##```````##`````````##`##```````##`##```````##````##``````` \n \
##````````##```##````````##``````##`````##``````##````##````##```````##````````##``###```````###````````##`````##`````` \n \
##````````##```##````````##``````##``````##`````##`````##```##```````##```````##````##```````###````````##``````###```` \n \
##````````##```##````````##`````##````````##````##``````##``##```````##``````##``````##`````##`##```````##````````##``` \n \
##````````##```##````````##````#############````##```````##`##```````##`````###########`````##``##``````##`````````##`` \n \
###```````##```##````````##```##```````````##```##```````##`##```````##````##`````````##```##```##``````##```##`````##` \n \
`##``````###````##``````###``##`````````````##``##````````####```````##```##``````````##``###````##`````##````##`````## \n \
``#########``````########```##``````````````###`##``````````##```````##``##````````````##`##``````##````##`````###``### \n \
````````#####`````````````````````````````````````````````````````````````````````````````````````````````````````##`` \n \
``````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n \
``````````````````````````Copyright``yutiansut``2018``````QUANTITATIVE FINANCIAL FRAMEWORK````````````````````````````` \n \
``````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n \
```````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n \
```````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n '
__str__ = __repr__
# QA_util_log_info(Logo)
| 56.613527 | 137 | 0.632989 |
46c535414ccdd8943ae848255aaf38bfe69effe9 | 10,757 | py | Python | gbpclient/tests/unit/test_cli20_l3policy.py | noironetworks/python-group-based-policy-client | 150c9b0d752bfea79651253d9fa3382842df1a69 | [
"Apache-2.0"
] | null | null | null | gbpclient/tests/unit/test_cli20_l3policy.py | noironetworks/python-group-based-policy-client | 150c9b0d752bfea79651253d9fa3382842df1a69 | [
"Apache-2.0"
] | null | null | null | gbpclient/tests/unit/test_cli20_l3policy.py | noironetworks/python-group-based-policy-client | 150c9b0d752bfea79651253d9fa3382842df1a69 | [
"Apache-2.0"
] | 1 | 2015-09-29T21:36:04.000Z | 2015-09-29T21:36:04.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import sys
from gbpclient.gbp.v2_0 import groupbasedpolicy as gbp
from gbpclient.tests.unit import test_cli20
class CLITestV20L3PolicyJSON(test_cli20.CLITestV20Base):
LOG = logging.getLogger(__name__)
def setUp(self):
super(CLITestV20L3PolicyJSON, self).setUp()
def test_create_l3_policy_with_mandatory_params(self):
resource = 'l3_policy'
cmd = gbp.CreateL3Policy(test_cli20.MyApp(sys.stdout), None)
name = 'my-name'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = ['--tenant-id', tenant_id,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id)
def test_create_l3_policy_with_all_params(self):
"""l3-policy-create with all params."""
resource = 'l3_policy'
cmd = gbp.CreateL3Policy(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
tenant_id = 'mytenant'
description = 'My L3 Policy'
my_id = 'someid'
ip_version = '4'
ip_pool = '172.16.0.0/12'
subnet_prefix_length = '24'
address_scope_v4_id = 'ascpid'
subnetpools_v4 = 'sp1,sp2'
external_segment = 'seg_uuid1=1.1.1.0:2.2.2.0'
expected_external_segments = {'seg_uuid1': ['1.1.1.0', '2.2.2.0']}
routers = 'uuid1,uuid2'
shared = 'true'
args = ['--tenant-id', tenant_id,
'--description', description,
'--ip-version', ip_version,
'--ip-pool', ip_pool,
'--subnet-prefix-length', subnet_prefix_length,
'--address-scope-v4-id', address_scope_v4_id,
'--subnetpools-v4', subnetpools_v4,
'--external-segment', external_segment,
'--routers', routers,
'--shared', shared,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id,
description=description,
ip_version=4,
ip_pool=ip_pool,
subnet_prefix_length=24,
address_scope_v4_id=address_scope_v4_id,
subnetpools_v4=['sp1', 'sp2'],
routers=['uuid1', 'uuid2'],
external_segments=
expected_external_segments, shared=shared)
def test_create_l3_policy_with_ipv6(self):
"""l3-policy-create with ipv6 params."""
resource = 'l3_policy'
cmd = gbp.CreateL3Policy(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
tenant_id = 'mytenant'
description = 'My L3 Policy'
my_id = 'someid'
ip_version = '6'
address_scope_v6_id = 'ascpid'
subnetpools_v6 = 'sp1,sp2'
args = ['--tenant-id', tenant_id,
'--description', description,
'--ip-version', ip_version,
'--address-scope-v6-id', address_scope_v6_id,
'--subnetpools-v6', subnetpools_v6,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id,
description=description,
ip_version=6,
address_scope_v6_id=address_scope_v6_id,
subnetpools_v6=['sp1', 'sp2'])
def test_create_l3_policy_with_external_segment(self):
"""l3-policy-create with all params."""
resource = 'l3_policy'
cmd = gbp.CreateL3Policy(test_cli20.MyApp(sys.stdout), None)
name = 'name'
tenant_id = 'mytenant'
my_id = 'someid'
external_segment = 'seg_uuid1'
expected_external_segments = {'seg_uuid1': []}
args = ['--tenant-id', tenant_id,
'--external-segment', external_segment,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id,
external_segments=
expected_external_segments)
def test_create_l3_policy_with_allowed_vm_names(self):
resource = 'l3_policy'
cmd = gbp.CreateL3Policy(test_cli20.MyApp(sys.stdout), None)
name = 'name'
tenant_id = 'mytenant'
my_id = 'someid'
allowed_vm_names = "^safe_vm*,good_vm*"
args = ['--tenant-id', tenant_id,
'--allowed-vm-names', allowed_vm_names,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id,
allowed_vm_names=['^safe_vm*', 'good_vm*'])
def test_list_l3_policies(self):
resource = 'l3_policies'
cmd = gbp.ListL3Policy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resource, cmd, True)
def test_show_l3_policy(self):
resource = 'l3_policy'
cmd = gbp.ShowL3Policy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_update_l3_policy(self):
resource = 'l3_policy'
cmd = gbp.UpdateL3Policy(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], })
def test_update_l3_policy_with_all_params(self):
resource = 'l3_policy'
cmd = gbp.UpdateL3Policy(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
description = 'My L3 Policy'
my_id = 'someid'
subnet_prefix_length = '24'
external_segment = 'seg_uuid1=1.1.1.0:2.2.2.0'
expected_external_segments = {'seg_uuid1': ['1.1.1.0', '2.2.2.0']}
shared = 'true'
subnetpools_v4 = 'sp1,sp2'
routers = 'uuid1,uuid2'
args = ['--name', name,
'--description', description,
'--subnet-prefix-length', subnet_prefix_length,
'--subnetpools-v4', subnetpools_v4,
'--external-segment', external_segment,
'--routers', routers,
'--shared', shared,
my_id]
params = {
'name': name,
'description': description,
'subnet_prefix_length': 24,
'subnetpools_v4': ['sp1', 'sp2'],
'external_segments': expected_external_segments,
'routers': routers,
'routers': ['uuid1', 'uuid2'],
'shared': shared
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_update_l3_policy_ipv6_subnetpools(self):
resource = 'l3_policy'
cmd = gbp.UpdateL3Policy(test_cli20.MyApp(sys.stdout), None)
my_id = 'someid'
subnetpools_v6 = 'sp1,sp2'
args = ['--subnetpools-v6', subnetpools_v6,
my_id]
params = {
'subnetpools_v6': ['sp1', 'sp2'],
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_update_l3_policy_unset_external_segment(self):
resource = 'l3_policy'
cmd = gbp.UpdateL3Policy(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
description = 'My L3 Policy'
my_id = 'someid'
subnet_prefix_length = '24'
external_segment = ''
expected_external_segments = {}
args = ['--name', name,
'--description', description,
'--subnet-prefix-length', subnet_prefix_length,
'--external-segment', external_segment,
my_id]
params = {
'name': name,
'description': description,
'subnet_prefix_length': 24,
'external_segments': expected_external_segments,
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_update_l3_policy_unset_routers(self):
resource = 'l3_policy'
cmd = gbp.UpdateL3Policy(test_cli20.MyApp(sys.stdout), None)
my_id = 'someid'
routers = ''
args = ['--routers', routers,
my_id]
params = {
'routers': [],
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_update_l3_policy_with_allowed_vm_names(self):
resource = 'l3_policy'
cmd = gbp.UpdateL3Policy(test_cli20.MyApp(sys.stdout), None)
my_id = 'someid'
allowed_vm_names = "bad_vm*,^worse_vm*"
args = ['--allowed-vm-names', allowed_vm_names,
my_id]
params = {
'allowed_vm_names': ['bad_vm*', '^worse_vm*'],
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_delete_l3_policy_name(self):
resource = 'l3_policy'
cmd = gbp.DeleteL3Policy(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
| 40.746212 | 78 | 0.549596 |
28873e06fec5d13855310a4e94a1b01786466372 | 3,541 | py | Python | fmriprep/workflows/bold/t2s.py | franklin-feingold/fmriprep | f46467101fccb257a23b1ba81ed5cdc66f278932 | [
"BSD-3-Clause"
] | null | null | null | fmriprep/workflows/bold/t2s.py | franklin-feingold/fmriprep | f46467101fccb257a23b1ba81ed5cdc66f278932 | [
"BSD-3-Clause"
] | null | null | null | fmriprep/workflows/bold/t2s.py | franklin-feingold/fmriprep | f46467101fccb257a23b1ba81ed5cdc66f278932 | [
"BSD-3-Clause"
] | 1 | 2022-02-21T08:19:49.000Z | 2022-02-21T08:19:49.000Z | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Generate T2* map from multi-echo BOLD images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_bold_t2s_wf
"""
from nipype import logging
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from ...interfaces import T2SMap
from .util import init_skullstrip_bold_wf
LOGGER = logging.getLogger('nipype.workflow')
# pylint: disable=R0914
def init_bold_t2s_wf(echo_times, mem_gb, omp_nthreads,
t2s_coreg=False, name='bold_t2s_wf'):
"""
This workflow wraps the `tedana`_ `T2* workflow`_ to optimally
combine multiple echos and derive a T2* map for optional use as a
coregistration target.
The following steps are performed:
#. :abbr:`HMC (head motion correction)` on individual echo files.
#. Compute the T2* map
#. Create an optimally combined ME-EPI time series
**Parameters**
echo_times
list of TEs associated with each echo
mem_gb : float
Size of BOLD file in GB
omp_nthreads : int
Maximum number of threads an individual process may use
t2s_coreg : bool
Use the calculated T2*-map for T2*-driven coregistration
name : str
Name of workflow (default: ``bold_t2s_wf``)
**Inputs**
bold_file
list of individual echo files
**Outputs**
bold
the optimally combined time series for all supplied echos
bold_mask
the binarized, skull-stripped adaptive T2* map
bold_ref_brain
the adaptive T2* map
.. _tedana: https://github.com/me-ica/tedana
.. _`T2* workflow`: https://tedana.readthedocs.io/en/latest/generated/tedana.workflows.t2smap_workflow.html#tedana.workflows.t2smap_workflow # noqa
"""
workflow = Workflow(name=name)
workflow.__desc__ = """\
A T2* map was estimated from the preprocessed BOLD by fitting to a monoexponential signal
decay model with log-linear regression.
For each voxel, the maximal number of echoes with reliable signal in that voxel were
used to fit the model.
The calculated T2* map was then used to optimally combine preprocessed BOLD across
echoes following the method described in [@posse_t2s].
The optimally combined time series was carried forward as the *preprocessed BOLD*{}.
""".format('' if not t2s_coreg else ', and the T2* map was also retained as the BOLD reference')
inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['bold', 'bold_mask', 'bold_ref_brain']),
name='outputnode')
LOGGER.log(25, 'Generating T2* map and optimally combined ME-EPI time series.')
t2smap_node = pe.Node(T2SMap(echo_times=echo_times), name='t2smap_node')
skullstrip_t2smap_wf = init_skullstrip_bold_wf(name='skullstrip_t2smap_wf')
workflow.connect([
(inputnode, t2smap_node, [('bold_file', 'in_files')]),
(t2smap_node, outputnode, [('optimal_comb', 'bold')]),
(t2smap_node, skullstrip_t2smap_wf, [('t2star_map', 'inputnode.in_file')]),
(skullstrip_t2smap_wf, outputnode, [
('outputnode.mask_file', 'bold_mask'),
('outputnode.skull_stripped_file', 'bold_ref_brain')]),
])
return workflow
| 35.767677 | 152 | 0.675798 |
be1ed61ff7fccadd9c383600f5b1b919765c5a8f | 22,682 | py | Python | neutron/services/qos/qos_plugin.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | null | null | null | neutron/services/qos/qos_plugin.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | null | null | null | neutron/services/qos/qos_plugin.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import port_resource_request
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import qos as qos_apidef
from neutron_lib.api.definitions import qos_bw_minimum_ingress
from neutron_lib.callbacks import events as callbacks_events
from neutron_lib.callbacks import registry as callbacks_registry
from neutron_lib.callbacks import resources as callbacks_resources
from neutron_lib import constants as nl_constants
from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.db import resource_extend
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import qos as qos_exc
from neutron_lib.placement import constants as pl_constants
from neutron_lib.placement import utils as pl_utils
from neutron_lib.services.qos import constants as qos_consts
from neutron._i18n import _
from neutron.db import db_base_plugin_common
from neutron.extensions import qos
from neutron.objects import base as base_obj
from neutron.objects import network as network_object
from neutron.objects import ports as ports_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import qos_policy_validator as checker
from neutron.objects.qos import rule_type as rule_type_object
from neutron.services.qos.drivers import manager
@resource_extend.has_resource_extenders
class QoSPlugin(qos.QoSPluginBase):
"""Implementation of the Neutron QoS Service Plugin.
This class implements a Quality of Service plugin that provides quality of
service parameters over ports and networks.
"""
supported_extension_aliases = [qos_apidef.ALIAS,
'qos-bw-limit-direction',
'qos-default',
'qos-rule-type-details',
port_resource_request.ALIAS,
qos_bw_minimum_ingress.ALIAS]
__native_pagination_support = True
__native_sorting_support = True
__filter_validation_support = True
def __init__(self):
super(QoSPlugin, self).__init__()
self.driver_manager = manager.QosServiceDriverManager()
callbacks_registry.subscribe(
self._validate_create_port_callback,
callbacks_resources.PORT,
callbacks_events.PRECOMMIT_CREATE)
callbacks_registry.subscribe(
self._validate_update_port_callback,
callbacks_resources.PORT,
callbacks_events.PRECOMMIT_UPDATE)
callbacks_registry.subscribe(
self._validate_update_network_callback,
callbacks_resources.NETWORK,
callbacks_events.PRECOMMIT_UPDATE)
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME])
def _extend_port_resource_request(port_res, port_db):
"""Add resource request to a port."""
port_res['resource_request'] = None
qos_policy = policy_object.QosPolicy.get_port_policy(
context.get_admin_context(), port_res['id'])
# Note(lajoskatona): QosPolicyPortBinding is not ready for some
# reasons, so let's try and fetch the QoS policy directly if there is a
# qos_policy_id in port_res.
if (not qos_policy and 'qos_policy_id' in port_res and
port_res['qos_policy_id']):
qos_policy = policy_object.QosPolicy.get_policy_obj(
context.get_admin_context(), port_res['qos_policy_id']
)
# Note(lajoskatona): handle the case when the port inherits qos-policy
# from the network.
if not qos_policy:
net = network_object.Network.get_object(
context.get_admin_context(), id=port_res['network_id'])
if net and net.qos_policy_id:
qos_policy = policy_object.QosPolicy.get_network_policy(
context.get_admin_context(), net.id)
if not qos_policy:
return port_res
resources = {}
rule_direction_class = {
nl_constants.INGRESS_DIRECTION:
pl_constants.CLASS_NET_BW_INGRESS_KBPS,
nl_constants.EGRESS_DIRECTION:
pl_constants.CLASS_NET_BW_EGRESS_KBPS
}
for rule in qos_policy.rules:
if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
resources[rule_direction_class[rule.direction]] = rule.min_kbps
if not resources:
return port_res
vnic_trait = pl_utils.vnic_type_trait(
port_res[portbindings.VNIC_TYPE])
# TODO(lajoskatona): Change to handle all segments when any traits
# support will be available. See Placement spec:
# https://review.openstack.org/565730
first_segment = network_object.NetworkSegment.get_objects(
context.get_admin_context(),
network_id=port_res['network_id'])[0]
if not first_segment or not first_segment.physical_network:
return port_res
physnet_trait = pl_utils.physnet_trait(
first_segment.physical_network)
resource_request = {
'required': [physnet_trait, vnic_trait],
'resources': resources
}
port_res['resource_request'] = resource_request
return port_res
def _get_ports_with_policy(self, context, policy):
networks_ids = policy.get_bound_networks()
ports_with_net_policy = ports_object.Port.get_objects(
context, network_id=networks_ids)
# Filter only this ports which don't have overwritten policy
ports_with_net_policy = [
port for port in ports_with_net_policy if
port.qos_policy_id is None
]
ports_ids = policy.get_bound_ports()
ports_with_policy = ports_object.Port.get_objects(
context, id=ports_ids)
return list(set(ports_with_policy + ports_with_net_policy))
def _validate_create_port_callback(self, resource, event, trigger,
**kwargs):
context = kwargs['context']
port_id = kwargs['port']['id']
port = ports_object.Port.get_object(context, id=port_id)
network = network_object.Network.get_object(context,
id=port.network_id)
policy_id = port.qos_policy_id or network.qos_policy_id
if policy_id is None:
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self.validate_policy_for_port(policy, port)
def _validate_update_port_callback(self, resource, event, trigger,
payload=None):
context = payload.context
original_policy_id = payload.states[0].get(
qos_consts.QOS_POLICY_ID)
policy_id = payload.desired_state.get(qos_consts.QOS_POLICY_ID)
if policy_id is None or policy_id == original_policy_id:
return
updated_port = ports_object.Port.get_object(
context, id=payload.desired_state['id'])
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
self.validate_policy_for_port(policy, updated_port)
def _validate_update_network_callback(self, resource, event, trigger,
payload=None):
context = payload.context
original_network = payload.states[0]
updated_network = payload.desired_state
original_policy_id = original_network.get(qos_consts.QOS_POLICY_ID)
policy_id = updated_network.get(qos_consts.QOS_POLICY_ID)
if policy_id is None or policy_id == original_policy_id:
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
ports = ports_object.Port.get_objects(
context, network_id=updated_network['id'])
# Filter only this ports which don't have overwritten policy
ports = [
port for port in ports if port.qos_policy_id is None
]
self.validate_policy_for_ports(policy, ports)
def validate_policy(self, context, policy):
ports = self._get_ports_with_policy(context, policy)
self.validate_policy_for_ports(policy, ports)
def validate_policy_for_ports(self, policy, ports):
for port in ports:
self.validate_policy_for_port(policy, port)
def validate_policy_for_port(self, policy, port):
for rule in policy.rules:
if not self.driver_manager.validate_rule_for_port(rule, port):
raise qos_exc.QosRuleNotSupported(rule_type=rule.rule_type,
port_id=port['id'])
def reject_min_bw_rule_updates(self, context, policy):
ports = self._get_ports_with_policy(context, policy)
for port in ports:
# NOTE(bence romsics): In some cases the presence of
# 'binding:profile.allocation' is a more precise marker than
# 'device_owner' about when we have to reject min-bw related
# policy/rule updates. However 'binding:profile.allocation' cannot
# be used in a generic way here. Consider the case when the first
# min-bw rule is added to a policy having ports in-use. Those ports
# will not have 'binding:profile.allocation', but this policy
# update must be rejected.
if (port.device_owner is not None and
port.device_owner.startswith(
nl_constants.DEVICE_OWNER_COMPUTE_PREFIX)):
raise NotImplementedError(_(
'Cannot update QoS policies/rules backed by resources '
'tracked in Placement'))
@db_base_plugin_common.convert_result_to_dict
def create_policy(self, context, policy):
"""Create a QoS policy.
:param context: neutron api request context
:type context: neutron_lib.context.Context
:param policy: policy data to be applied
:type policy: dict
:returns: a QosPolicy object
"""
# NOTE(dasm): body 'policy' contains both tenant_id and project_id
# but only latter needs to be used to create QosPolicy object.
# We need to remove redundant keyword.
# This cannot be done in other place of stacktrace, because neutron
# needs to be backward compatible.
tenant_id = policy['policy'].pop('tenant_id', None)
if not policy['policy'].get('project_id'):
policy['policy']['project_id'] = tenant_id
policy_obj = policy_object.QosPolicy(context, **policy['policy'])
with db_api.CONTEXT_WRITER.using(context):
policy_obj.create()
self.driver_manager.call(qos_consts.CREATE_POLICY_PRECOMMIT,
context, policy_obj)
self.driver_manager.call(qos_consts.CREATE_POLICY, context, policy_obj)
return policy_obj
@db_base_plugin_common.convert_result_to_dict
def update_policy(self, context, policy_id, policy):
"""Update a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to update
:param policy_id: str uuid
:param policy: new policy data to be applied
:type policy: dict
:returns: a QosPolicy object
"""
policy_data = policy['policy']
with db_api.CONTEXT_WRITER.using(context):
policy_obj = policy_object.QosPolicy.get_policy_obj(
context, policy_id)
policy_obj.update_fields(policy_data, reset_changes=True)
policy_obj.update()
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy_obj)
self.driver_manager.call(qos_consts.UPDATE_POLICY,
context, policy_obj)
return policy_obj
def delete_policy(self, context, policy_id):
"""Delete a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to delete
:type policy_id: str uuid
:returns: None
"""
with db_api.CONTEXT_WRITER.using(context):
policy = policy_object.QosPolicy(context)
policy.id = policy_id
policy.delete()
self.driver_manager.call(qos_consts.DELETE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.DELETE_POLICY,
context, policy)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy(self, context, policy_id, fields=None):
"""Get a QoS policy.
:param context: neutron api request context
:type context: neutron.context.Context
:param policy_id: the id of the QosPolicy to update
:type policy_id: str uuid
:returns: a QosPolicy object
"""
return policy_object.QosPolicy.get_policy_obj(context, policy_id)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policies(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
"""Get QoS policies.
:param context: neutron api request context
:type context: neutron.context.Context
:param filters: search criteria
:type filters: dict
:returns: QosPolicy objects meeting the search criteria
"""
filters = filters or dict()
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
return policy_object.QosPolicy.get_objects(context, _pager=pager,
**filters)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_rule_type(self, context, rule_type_name, fields=None):
if not context.is_admin:
raise lib_exc.NotAuthorized()
return rule_type_object.QosRuleType.get_object(rule_type_name)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_rule_types(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False):
if not filters:
filters = {}
return rule_type_object.QosRuleType.get_objects(**filters)
def supported_rule_type_details(self, rule_type_name):
return self.driver_manager.supported_rule_type_details(rule_type_name)
@property
def supported_rule_types(self):
return self.driver_manager.supported_rule_types
@db_base_plugin_common.convert_result_to_dict
def create_policy_rule(self, context, rule_cls, policy_id, rule_data):
"""Create a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param policy_id: the id of the QosPolicy for which to create the rule
:type policy_id: str uuid
:param rule_data: the rule data to be applied
:type rule_data: dict
:returns: a QoS policy rule object
"""
rule_type = rule_cls.rule_type
rule_data = rule_data[rule_type + '_rule']
with db_api.autonested_transaction(context.session):
# Ensure that we have access to the policy.
policy = policy_object.QosPolicy.get_policy_obj(context, policy_id)
checker.check_bandwidth_rule_conflict(policy, rule_data)
rule = rule_cls(context, qos_policy_id=policy_id, **rule_data)
checker.check_rules_conflict(policy, rule)
rule.create()
policy.obj_load_attr('rules')
self.validate_policy(context, policy)
if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
self.reject_min_bw_rule_updates(context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
return rule
@db_base_plugin_common.convert_result_to_dict
def update_policy_rule(self, context, rule_cls, rule_id, policy_id,
rule_data):
"""Update a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QoS policy rule to update
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:param rule_data: the new rule data to update
:type rule_data: dict
:returns: a QoS policy rule object
"""
rule_type = rule_cls.rule_type
rule_data = rule_data[rule_type + '_rule']
with db_api.autonested_transaction(context.session):
# Ensure we have access to the policy.
policy = policy_object.QosPolicy.get_policy_obj(context, policy_id)
# Ensure the rule belongs to the policy.
checker.check_bandwidth_rule_conflict(policy, rule_data)
rule = policy.get_rule_by_id(rule_id)
rule.update_fields(rule_data, reset_changes=True)
checker.check_rules_conflict(policy, rule)
rule.update()
policy.obj_load_attr('rules')
self.validate_policy(context, policy)
if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
self.reject_min_bw_rule_updates(context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
return rule
def delete_policy_rule(self, context, rule_cls, rule_id, policy_id):
"""Delete a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QosPolicy Rule to delete
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:returns: None
"""
with db_api.autonested_transaction(context.session):
# Ensure we have access to the policy.
policy = policy_object.QosPolicy.get_policy_obj(context, policy_id)
rule = policy.get_rule_by_id(rule_id)
rule.delete()
policy.obj_load_attr('rules')
if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
self.reject_min_bw_rule_updates(context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT,
context, policy)
self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_rule(self, context, rule_cls, rule_id, policy_id,
fields=None):
"""Get a QoS policy rule.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param rule_id: the id of the QoS policy rule to get
:type rule_id: str uuid
:param policy_id: the id of the rule's policy
:type policy_id: str uuid
:returns: a QoS policy rule object
:raises: qos_exc.QosRuleNotFound
"""
with db_api.autonested_transaction(context.session):
# Ensure we have access to the policy.
policy_object.QosPolicy.get_policy_obj(context, policy_id)
rule = rule_cls.get_object(context, id=rule_id)
if not rule:
raise qos_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id)
return rule
# TODO(QoS): enforce rule types when accessing rule objects
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_rules(self, context, rule_cls, policy_id, filters=None,
fields=None, sorts=None, limit=None, marker=None,
page_reverse=False):
"""Get QoS policy rules.
:param context: neutron api request context
:type context: neutron.context.Context
:param rule_cls: the rule object class
:type rule_cls: a class from the rule_object (qos.objects.rule) module
:param policy_id: the id of the QosPolicy for which to get rules
:type policy_id: str uuid
:returns: QoS policy rule objects meeting the search criteria
"""
with db_api.autonested_transaction(context.session):
# Ensure we have access to the policy.
policy_object.QosPolicy.get_policy_obj(context, policy_id)
filters = filters or dict()
filters[qos_consts.QOS_POLICY_ID] = policy_id
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
return rule_cls.get_objects(context, _pager=pager, **filters)
| 42.555347 | 79 | 0.660083 |
a630191266da5c993c2097f065eb69e34d5565c6 | 2,260 | py | Python | source/conf.py | xxzhai123/nfn | 5896c30cbe3fe6bb4d5d35af97c3a26cb9f6b59b | [
"MIT"
] | null | null | null | source/conf.py | xxzhai123/nfn | 5896c30cbe3fe6bb4d5d35af97c3a26cb9f6b59b | [
"MIT"
] | null | null | null | source/conf.py | xxzhai123/nfn | 5896c30cbe3fe6bb4d5d35af97c3a26cb9f6b59b | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'nln'
copyright = '2022, seeyou'
author = 'seeyou'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.731343 | 79 | 0.682301 |
d04753fe91c8146937314e57fe53406b12fce300 | 1,477 | py | Python | mask.py | sahin88/face_mask_detection | be3bb506a62be5298b05ff93e9c7bb6d856650fd | [
"Unlicense"
] | null | null | null | mask.py | sahin88/face_mask_detection | be3bb506a62be5298b05ff93e9c7bb6d856650fd | [
"Unlicense"
] | null | null | null | mask.py | sahin88/face_mask_detection | be3bb506a62be5298b05ff93e9c7bb6d856650fd | [
"Unlicense"
] | null | null | null | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
cascade_path='/home/alex/Downloads/haarcascade_frontalface_default.xml'
model_paramaters='/home/alex/Downloads/face_mask1.h5'
name="face_mask_detection"
new_model=tf.keras.models.load_model(model_paramaters)
cv2.namedWindow(name)
cap=cv2.VideoCapture(0)
while True:
image_size=224
_,frame= cap.read()
faceCascade=cv2.CascadeClassifier(cascade_path)
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=faceCascade.detectMultiScale(gray,1.3,5)
for x,y,w, h in faces:
#burada sadece insan yüzünün oldugu kisimin matrix alinir
roi_face=frame[y:y+h,y:x+w]
final_array=cv2.resize(roi_face,(image_size,image_size))
final_array=np.expand_dims(final_array,axis=0)
final_array=final_array/255
predictions=new_model.predict(final_array)
font=cv2.FONT_HERSHEY_PLAIN
font_scale=1.5
print("predictions",predictions)
#Maskeli durum
if predictions<0.5:
status='Mask {} %'.format(round(100-predictions[0][0]*100,2))
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3)
cv2.putText(frame,status,(x,y-45), font,font_scale,color=(0,255,0),thickness=3)
else:
status='No Mask {} %'.format(round(predictions[0][0]*100,2))
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3)
cv2.putText(frame,status,(x,y-45), font,font_scale,color=(0,0,255),thickness=3)
cv2.imshow('face_mask_detection',frame)
if cv2.waitKey(1)==27:
break
cap.release()
cv2.destroyWindow()
| 29.54 | 82 | 0.748138 |
88bb8215fe26dd6d125b7dfc701496e608d772c5 | 44,722 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/xmodule/xmodule/modulestore/mixed.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/xmodule/xmodule/modulestore/mixed.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/xmodule/xmodule/modulestore/mixed.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
MixedModuleStore allows for aggregation between multiple modulestores.
In this way, courses can be served up via either SplitMongoModuleStore or MongoModuleStore.
"""
import functools
import itertools
import logging
from contextlib import contextmanager
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from xmodule.assetstore import AssetMetadata
from . import XMODULE_FIELDS_WITH_USAGE_KEYS, ModuleStoreEnum, ModuleStoreWriteBase
from .draft_and_published import ModuleStoreDraftAndPublished
from .exceptions import DuplicateCourseError, ItemNotFoundError
from .split_migrator import SplitMigrator
log = logging.getLogger(__name__)
def strip_key(func):
"""
A decorator for stripping version and branch information from return values that are, or contain, UsageKeys or
CourseKeys.
Additionally, the decorated function is called with an optional 'field_decorator' parameter that can be used
to strip any location(-containing) fields, which are not directly returned by the function.
The behavior can be controlled by passing 'remove_version' and 'remove_branch' booleans to the decorated
function's kwargs.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
"""
Supported kwargs:
remove_version - If True, calls 'version_agnostic' on all return values, including those in lists and dicts.
remove_branch - If True, calls 'for_branch(None)' on all return values, including those in lists and dicts.
Note: The 'field_decorator' parameter passed to the decorated function is a function that honors the
values of these kwargs.
"""
# remove version and branch, by default
rem_vers = kwargs.pop('remove_version', True)
rem_branch = kwargs.pop('remove_branch', True)
# helper function for stripping individual values
def strip_key_func(val):
"""
Strips the version and branch information according to the settings of rem_vers and rem_branch.
Recursively calls this function if the given value has a 'location' attribute.
"""
retval = val
if rem_vers and hasattr(retval, 'version_agnostic'):
retval = retval.version_agnostic()
if rem_branch and hasattr(retval, 'for_branch'):
retval = retval.for_branch(None)
for field_name in XMODULE_FIELDS_WITH_USAGE_KEYS:
if hasattr(retval, field_name):
setattr(retval, field_name, strip_key_func(getattr(retval, field_name)))
return retval
# function for stripping both, collection of, and individual, values
def strip_key_collection(field_value):
"""
Calls strip_key_func for each element in the given value.
"""
if rem_vers or rem_branch:
if isinstance(field_value, list):
field_value = [strip_key_func(fv) for fv in field_value]
elif isinstance(field_value, dict):
for key, val in field_value.items():
field_value[key] = strip_key_func(val)
else:
field_value = strip_key_func(field_value)
return field_value
# call the decorated function
retval = func(field_decorator=strip_key_collection, *args, **kwargs)
# strip the return value
return strip_key_collection(retval)
return inner
def prepare_asides(func):
"""
A decorator to handle optional asides param
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Supported kwargs:
asides - list with connected asides data for the passed block
"""
if 'asides' in kwargs:
kwargs['asides'] = prepare_asides_to_store(kwargs['asides'])
return func(*args, **kwargs)
return wrapper
def prepare_asides_to_store(asides_source):
"""
Convert Asides Xblocks objects to the list of dicts (to store this information in MongoDB)
"""
asides = None
if asides_source:
asides = []
for asd in asides_source:
aside_fields = {}
for asd_field_key, asd_field_val in asd.fields.items():
aside_fields[asd_field_key] = asd_field_val.read_from(asd)
asides.append({
'aside_type': asd.scope_ids.block_type,
'fields': aside_fields
})
return asides
class MixedModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase):
"""
ModuleStore knows how to route requests to the right persistence ms
"""
def __init__(
self,
contentstore,
mappings,
stores,
i18n_service=None,
fs_service=None,
user_service=None,
create_modulestore_instance=None,
signal_handler=None,
**kwargs
):
"""
Initialize a MixedModuleStore. Here we look into our passed in kwargs which should be a
collection of other modulestore configuration information
"""
super().__init__(contentstore, **kwargs)
if create_modulestore_instance is None:
raise ValueError('MixedModuleStore constructor must be passed a create_modulestore_instance function')
self.modulestores = []
self.mappings = {}
for course_id, store_name in mappings.items():
try:
self.mappings[CourseKey.from_string(course_id)] = store_name
except InvalidKeyError:
log.exception("Invalid MixedModuleStore configuration. Unable to parse course_id %r", course_id)
continue
for store_settings in stores:
key = store_settings['NAME']
store = create_modulestore_instance(
store_settings['ENGINE'],
self.contentstore,
store_settings.get('DOC_STORE_CONFIG', {}),
store_settings.get('OPTIONS', {}),
i18n_service=i18n_service,
fs_service=fs_service,
user_service=user_service,
signal_handler=signal_handler,
)
# replace all named pointers to the store into actual pointers
for course_key, store_name in self.mappings.items():
if store_name == key:
self.mappings[course_key] = store
self.modulestores.append(store)
def _clean_locator_for_mapping(self, locator):
"""
In order for mapping to work, the locator must be minimal--no version, no branch--
as we never store one version or one branch in one ms and another in another ms.
:param locator: the CourseKey
"""
if hasattr(locator, 'version_agnostic'):
locator = locator.version_agnostic()
if hasattr(locator, 'branch'):
locator = locator.replace(branch=None)
return locator
def _get_modulestore_for_courselike(self, locator=None):
"""
For a given locator, look in the mapping table and see if it has been pinned
to a particular modulestore
If locator is None, returns the first (ordered) store as the default
"""
if locator is not None:
locator = self._clean_locator_for_mapping(locator)
mapping = self.mappings.get(locator, None)
if mapping is not None:
return mapping
else:
if isinstance(locator, LibraryLocator):
has_locator = lambda store: hasattr(store, 'has_library') and store.has_library(locator)
else:
has_locator = lambda store: store.has_course(locator)
for store in self.modulestores:
if has_locator(store):
self.mappings[locator] = store
return store
# return the default store
return self.default_modulestore
def _get_modulestore_by_type(self, modulestore_type):
"""
This method should only really be used by tests and migration scripts when necessary.
Returns the module store as requested by type. The type can be a value from ModuleStoreEnum.Type.
"""
for store in self.modulestores:
if store.get_modulestore_type() == modulestore_type:
return store
return None
def fill_in_run(self, course_key):
"""
Some course_keys are used without runs. This function calls the corresponding
fill_in_run function on the appropriate modulestore.
"""
store = self._get_modulestore_for_courselike(course_key)
if not hasattr(store, 'fill_in_run'):
return course_key
return store.fill_in_run(course_key)
def has_item(self, usage_key, **kwargs):
"""
Does the course include the xblock who's id is reference?
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.has_item(usage_key, **kwargs)
@strip_key
def get_item(self, usage_key, depth=0, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
see parent doc
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.get_item(usage_key, depth, **kwargs)
@strip_key
def get_items(self, course_key, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_key
NOTE: don't use this to look for courses
as the course_key is required. Use get_courses.
Args:
course_key (CourseKey): the course identifier
kwargs:
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as kwargs below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as kwargs below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
For some modulestores, ``name`` is another commonly provided key (Location based stores)
For some modulestores,
you can search by ``edited_by``, ``edited_on`` providing either a datetime for == (probably
useless) or a function accepting one arg to do inequality
"""
if not isinstance(course_key, CourseKey):
raise Exception("Must pass in a course_key when calling get_items()")
store = self._get_modulestore_for_courselike(course_key)
return store.get_items(course_key, **kwargs)
@strip_key
def get_course_summaries(self, **kwargs):
"""
Returns a list containing the course information in CourseSummary objects.
Information contains `location`, `display_name`, `locator` of the courses in this modulestore.
"""
course_summaries = {}
for store in self.modulestores:
for course_summary in store.get_course_summaries(**kwargs):
course_id = self._clean_locator_for_mapping(locator=course_summary.id)
# Check if course is indeed unique. Save it in result if unique
if course_id in course_summaries:
log.warning(
"Modulestore %s have duplicate courses %s; skipping from result.", store, course_id
)
else:
course_summaries[course_id] = course_summary
return list(course_summaries.values())
@strip_key
def get_courses(self, **kwargs):
'''
Returns a list containing the top level XModuleDescriptors of the courses in this modulestore.
'''
courses = {}
for store in self.modulestores:
# filter out ones which were fetched from earlier stores but locations may not be ==
for course in store.get_courses(**kwargs):
course_id = self._clean_locator_for_mapping(course.id)
if course_id not in courses:
# course is indeed unique. save it in result
courses[course_id] = course
return list(courses.values())
def get_library_keys(self):
"""
Returns a list of all unique content library keys in the mixed
modulestore.
Returns: list[LibraryLocator]
"""
all_library_keys = set()
for store in self.modulestores:
if not hasattr(store, 'get_library_keys'):
continue
all_library_keys |= {
self._clean_locator_for_mapping(library_key)
for library_key in store.get_library_keys()
}
return list(all_library_keys)
@strip_key
def get_library_summaries(self, **kwargs):
"""
Returns a list of LibrarySummary objects.
Information contains `location`, `display_name`, `locator` of the libraries in this modulestore.
"""
library_summaries = {}
for store in self.modulestores:
if not hasattr(store, 'get_libraries'):
continue
# fetch library summaries and filter out any duplicated entry across/within stores
for library_summary in store.get_library_summaries(**kwargs):
library_id = self._clean_locator_for_mapping(library_summary.location)
if library_id not in library_summaries:
library_summaries[library_id] = library_summary
return list(library_summaries.values())
@strip_key
def get_libraries(self, **kwargs):
"""
Returns a list containing the top level XBlock of the libraries (LibraryRoot) in this modulestore.
"""
libraries = {}
for store in self.modulestores:
if not hasattr(store, 'get_libraries'):
continue
# filter out ones which were fetched from earlier stores but locations may not be ==
for library in store.get_libraries(**kwargs):
library_id = self._clean_locator_for_mapping(library.location)
if library_id not in libraries:
# library is indeed unique. save it in result
libraries[library_id] = library
return list(libraries.values())
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
# If there is a mapping that match this org/course/run, use that
for course_id, store in self.mappings.items():
candidate_key = store.make_course_key(org, course, run)
if candidate_key == course_id:
return candidate_key
# Otherwise, return the key created by the default store
return self.default_modulestore.make_course_key(org, course, run)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for the modulestore
that matches the supplied course_key.
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.make_course_usage_key(course_key)
@strip_key
def get_course(self, course_key, depth=0, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
returns the course module associated with the course_id. If no such course exists,
it returns None
:param course_key: must be a CourseKey
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
try:
return store.get_course(course_key, depth=depth, **kwargs)
except ItemNotFoundError:
return None
@strip_key
def get_library(self, library_key, depth=0, **kwargs):
"""
returns the library block associated with the given key. If no such library exists,
it returns None
:param library_key: must be a LibraryLocator
"""
try:
store = self._verify_modulestore_support(library_key, 'get_library')
return store.get_library(library_key, depth=depth, **kwargs)
except NotImplementedError:
log.exception("Modulestore configured for %s does not have get_library method", library_key)
return None
except ItemNotFoundError:
return None
@strip_key
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
Args:
* course_id (CourseKey)
* ignore_case (bool): If True, do a case insensitive search. If
False, do a case sensitive search
"""
assert isinstance(course_id, CourseKey)
store = self._get_modulestore_for_courselike(course_id)
return store.has_course(course_id, ignore_case, **kwargs)
def delete_course(self, course_key, user_id): # lint-amnesty, pylint: disable=arguments-differ
"""
See xmodule.modulestore.__init__.ModuleStoreWrite.delete_course
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.delete_course(course_key, user_id)
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves the asset metadata for a particular course's asset.
Args:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
store = self._get_modulestore_for_courselike(asset_metadata.asset_id.course_key)
return store.save_asset_metadata(asset_metadata, user_id, import_only)
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves the asset metadata for each asset in a list of asset metadata.
Optimizes the saving of many assets.
Args:
asset_metadata_list (list(AssetMetadata)): list of data about several course assets
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
if len(asset_metadata_list) == 0:
return True
store = self._get_modulestore_for_courselike(asset_metadata_list[0].asset_id.course_key)
return store.save_asset_metadata_list(asset_metadata_list, user_id, import_only)
@strip_key
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Args:
asset_key (AssetKey): locator containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.find_asset_metadata(asset_key, **kwargs)
@strip_key
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of static assets for a course.
By default all assets are returned, but start and maxresults can be provided to limit the query.
Args:
course_key (CourseKey): course identifier
asset_type (str): type of asset, such as 'asset', 'video', etc. If None, return assets of all types.
start (int): optional - start at this asset number
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of 'ascending' or 'descending'
Returns:
List of AssetMetadata objects.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_all_asset_metadata(course_key, asset_type, start, maxresults, sort, **kwargs)
def delete_asset_metadata(self, asset_key, user_id):
"""
Deletes a single asset's metadata.
Arguments:
asset_id (AssetKey): locator containing original asset filename
user_id (int_long): user deleting the metadata
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.delete_asset_metadata(asset_key, user_id)
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
user_id (int|long): user copying the asset metadata
"""
source_store = self._get_modulestore_for_courselike(source_course_key)
dest_store = self._get_modulestore_for_courselike(dest_course_key)
if source_store != dest_store:
with self.bulk_operations(dest_course_key):
# Get all the asset metadata in the source course.
all_assets = source_store.get_all_asset_metadata(source_course_key, 'asset')
# Store it all in the dest course.
for asset in all_assets:
new_asset_key = dest_course_key.make_asset_key('asset', asset.asset_id.path)
copied_asset = AssetMetadata(new_asset_key)
copied_asset.from_storable(asset.to_storable())
dest_store.save_asset_metadata(copied_asset, user_id)
else:
# Courses in the same modulestore can be handled by the modulestore itself.
source_store.copy_all_asset_metadata(source_course_key, dest_course_key, user_id)
def set_asset_metadata_attr(self, asset_key, attr, value, user_id):
"""
Add/set the given attr on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr (str): which attribute to set
value: the value to set it to (any type pymongo accepts such as datetime, number, string)
user_id: (int|long): user setting the attribute
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, {attr: value}, user_id)
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id): # lint-amnesty, pylint: disable=arguments-differ
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute/value pairs to set
user_id: (int|long): user setting the attributes
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, attr_dict, user_id)
@strip_key
def get_parent_location(self, location, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
returns the parent locations for a given location
"""
store = self._get_modulestore_for_courselike(location.course_key)
return store.get_parent_location(location, **kwargs)
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator from which the
copy was inherited.
"""
try:
store = self._verify_modulestore_support(usage_key.course_key, 'get_block_original_usage')
return store.get_block_original_usage(usage_key)
except NotImplementedError:
return None, None
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given course_id.
The return can be one of:
"xml" (for XML based courses),
"mongo" for old-style MongoDB backed courses,
"split" for new-style split MongoDB backed courses.
"""
return self._get_modulestore_for_courselike(course_id).get_modulestore_type()
@strip_key
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_orphans(course_key, **kwargs)
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
errs = {}
for store in self.modulestores:
errs.update(store.get_errored_courses())
return errs
@strip_key
def create_course(self, org, course, run, user_id, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseBlock
"""
# first make sure an existing course doesn't already exist in the mapping
course_key = self.make_course_key(org, course, run)
log.info('Creating course run %s...', course_key)
if course_key in self.mappings and self.mappings[course_key].has_course(course_key):
log.error('Cannot create course run %s. It already exists!', course_key)
raise DuplicateCourseError(course_key, course_key)
# create the course
store = self._verify_modulestore_support(None, 'create_course')
course = store.create_course(org, course, run, user_id, **kwargs)
log.info('Course run %s created successfully!', course_key)
# add new course to the mapping
self.mappings[course_key] = store
return course
@strip_key
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Creates and returns a new library.
Args:
org (str): the organization that owns the course
library (str): the code/number/name of the library
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization - e.g. display_name
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a LibraryRoot
"""
# first make sure an existing course/lib doesn't already exist in the mapping
lib_key = LibraryLocator(org=org, library=library)
if lib_key in self.mappings:
raise DuplicateCourseError(lib_key, lib_key)
# create the library
store = self._verify_modulestore_support(None, 'create_library')
library = store.create_library(org, library, user_id, fields, **kwargs)
# add new library to the mapping
self.mappings[lib_key] = store
return library
@strip_key
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See the superclass for the general documentation.
If cloning w/in a store, delegates to that store's clone_course which, in order to be self-
sufficient, should handle the asset copying (call the same method as this one does)
If cloning between stores,
* copy the assets
* migrate the courseware
"""
source_modulestore = self._get_modulestore_for_courselike(source_course_id)
# for a temporary period of time, we may want to hardcode dest_modulestore as split if there's a split
# to have only course re-runs go to split. This code, however, uses the config'd priority
dest_modulestore = self._get_modulestore_for_courselike(dest_course_id)
if source_modulestore == dest_modulestore:
return source_modulestore.clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
if dest_modulestore.get_modulestore_type() == ModuleStoreEnum.Type.split:
split_migrator = SplitMigrator(dest_modulestore, source_modulestore)
split_migrator.migrate_mongo_course(source_course_id, user_id, dest_course_id.org,
dest_course_id.course, dest_course_id.run, fields, **kwargs)
# the super handles assets and any other necessities
super().clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
else:
raise NotImplementedError("No code for cloning from {} to {}".format(
source_modulestore, dest_modulestore
))
@strip_key
@prepare_asides
def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(course_key, 'create_item')
return modulestore.create_item(user_id, course_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
@prepare_asides
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that is a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(parent_usage_key.course_key, 'create_child')
return modulestore.create_child(user_id, parent_usage_key, block_type, block_id=block_id, fields=fields, **kwargs) # lint-amnesty, pylint: disable=line-too-long
@strip_key
@prepare_asides
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
See :py:meth `ModuleStoreDraftAndPublished.import_xblock`
Defer to the course's modulestore if it supports this method
"""
store = self._verify_modulestore_support(course_key, 'import_xblock')
return store.import_xblock(user_id, course_key, block_type, block_id, fields, runtime, **kwargs)
@strip_key
def copy_from_template(self, source_keys, dest_key, user_id, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
See :py:meth `SplitMongoModuleStore.copy_from_template`
"""
store = self._verify_modulestore_support(dest_key.course_key, 'copy_from_template')
return store.copy_from_template(source_keys, dest_key, user_id)
@strip_key
@prepare_asides
def update_item(self, xblock, user_id, allow_not_found=False, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Update the xblock persisted to be the same as the given for all types of fields
(content, children, and metadata) attribute the change to the given user.
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'update_item')
return store.update_item(xblock, user_id, allow_not_found, **kwargs)
@strip_key
def delete_item(self, location, user_id, **kwargs): # lint-amnesty, pylint: disable=arguments-differ
"""
Delete the given item from persistence. kwargs allow modulestore specific parameters.
"""
store = self._verify_modulestore_support(location.course_key, 'delete_item')
return store.delete_item(location, user_id=user_id, **kwargs)
def revert_to_published(self, location, user_id):
"""
Reverts an item to its last published version (recursively traversing all of its descendants).
If no published version exists, an InvalidVersionError is thrown.
If a published version exists but there is no draft version of this item or any of its descendants, this
method is a no-op.
:raises InvalidVersionError: if no published version exists for the location specified
"""
store = self._verify_modulestore_support(location.course_key, 'revert_to_published')
return store.revert_to_published(location, user_id)
def reset_course_to_version(self, course_key, version_guid, user_id):
"""
Resets the content of a course at `course_key` to a version specified by `version_guid`.
:raises NotImplementedError: if not supported by store.
"""
store = self._verify_modulestore_support(course_key, 'reset_course_to_version')
return store.reset_course_to_version(
course_key=course_key,
version_guid=version_guid,
user_id=user_id,
)
def close_all_connections(self):
"""
Close all db connections
"""
for modulestore in self.modulestores:
modulestore.close_connections()
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
for modulestore in self.modulestores:
# drop database if the store supports it (read-only stores do not)
if hasattr(modulestore, '_drop_database'):
modulestore._drop_database(database, collections, connections) # pylint: disable=protected-access
@strip_key
def create_xblock(self, runtime, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Create the new xmodule but don't save it. Returns the new module.
Args:
runtime: :py:class `xblock.runtime` from another xblock in the same course. Providing this
significantly speeds up processing (inheritance and subsequent persistence)
course_key: :py:class `opaque_keys.CourseKey`
block_type: :py:class `string`: the string identifying the xblock type
block_id: the string uniquely identifying the block within the given course
fields: :py:class `dict` field_name, value pairs for initializing the xblock fields. Values
should be the pythonic types not the json serialized ones.
"""
store = self._verify_modulestore_support(course_key, 'create_xblock')
return store.create_xblock(runtime, course_key, block_type, block_id, fields or {}, **kwargs)
@strip_key
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
courses = []
for modulestore in self.modulestores:
courses.extend(modulestore.get_courses_for_wiki(wiki_slug, **kwargs))
return courses
def heartbeat(self):
"""
Delegate to each modulestore and package the results for the caller.
"""
# could be done in parallel threads if needed
return dict(
itertools.chain.from_iterable(
store.heartbeat().items()
for store in self.modulestores
)
)
def has_published_version(self, xblock):
"""
Returns whether this xblock is draft, public, or private.
Returns:
PublishState.draft - content is in the process of being edited, but still has a previous
version deployed to LMS
PublishState.public - content is locked and deployed to LMS
PublishState.private - content is editable and not deployed to LMS
"""
course_id = xblock.scope_ids.usage_id.course_key
store = self._get_modulestore_for_courselike(course_id)
return store.has_published_version(xblock)
@strip_key
def publish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly published item.
"""
store = self._verify_modulestore_support(location.course_key, 'publish')
return store.publish(location, user_id, **kwargs)
@strip_key
def unpublish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly unpublished item.
"""
store = self._verify_modulestore_support(location.course_key, 'unpublish')
return store.unpublish(location, user_id, **kwargs)
def convert_to_draft(self, location, user_id):
"""
Create a copy of the source and mark its revision as draft.
Note: This method is to support the Mongo Modulestore and may be deprecated.
:param location: the location of the source (its revision must be None)
"""
store = self._verify_modulestore_support(location.course_key, 'convert_to_draft')
return store.convert_to_draft(location, user_id)
def has_changes(self, xblock):
"""
Checks if the given block has unpublished changes
:param xblock: the block to check
:return: True if the draft and published versions differ
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'has_changes')
return store.has_changes(xblock)
def check_supports(self, course_key, method):
"""
Verifies that the modulestore for a particular course supports a feature.
Returns True/false based on this.
"""
try:
self._verify_modulestore_support(course_key, method)
return True
except NotImplementedError:
return False
def _verify_modulestore_support(self, course_key, method):
"""
Finds and returns the store that contains the course for the given location, and verifying
that the store supports the given method.
Raises NotImplementedError if the found store does not support the given method.
"""
store = self._get_modulestore_for_courselike(course_key)
if hasattr(store, method):
return store
else:
raise NotImplementedError(f"Cannot call {method} on store {store}")
@property
def default_modulestore(self):
"""
Return the default modulestore
"""
thread_local_default_store = getattr(self.thread_cache, 'default_store', None)
if thread_local_default_store:
# return the thread-local cache, if found
return thread_local_default_store
else:
# else return the default store
return self.modulestores[0]
@contextmanager
def default_store(self, store_type):
"""
A context manager for temporarily changing the default store in the Mixed modulestore to the given store type
"""
# find the store corresponding to the given type
store = next((store for store in self.modulestores if store.get_modulestore_type() == store_type), None)
if not store:
raise Exception(f"Cannot find store of type {store_type}")
prev_thread_local_store = getattr(self.thread_cache, 'default_store', None)
try:
self.thread_cache.default_store = store
yield
finally:
self.thread_cache.default_store = prev_thread_local_store
@contextmanager
def branch_setting(self, branch_setting, course_id=None):
"""
A context manager for temporarily setting the branch value for the given course' store
to the given branch_setting. If course_id is None, the default store is used.
"""
store = self._verify_modulestore_support(course_id, 'branch_setting')
previous_thread_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
try:
self.thread_cache.branch_setting = branch_setting
with store.branch_setting(branch_setting, course_id):
yield
finally:
self.thread_cache.branch_setting = previous_thread_branch_setting
@contextmanager
def bulk_operations(self, course_id, emit_signals=True, ignore_case=False):
"""
A context manager for notifying the store of bulk operations.
If course_id is None, the default store is used.
"""
store = self._get_modulestore_for_courselike(course_id)
with store.bulk_operations(course_id, emit_signals, ignore_case):
yield
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
for store in self.modulestores:
store.ensure_indexes()
| 42.919386 | 169 | 0.651514 |
f4e1138cdb9f300fce2661a1d3e1c03e56b5431b | 134 | py | Python | pypi_librarian/_version.py | jayvdb/pypi_librarian | e1b98bd035c7d9bbab7bdd1511d03e58fb927236 | [
"MIT"
] | 3 | 2019-06-07T14:45:03.000Z | 2019-12-26T19:48:29.000Z | pypi_librarian/_version.py | jayvdb/pypi_librarian | e1b98bd035c7d9bbab7bdd1511d03e58fb927236 | [
"MIT"
] | 2 | 2019-12-26T15:13:18.000Z | 2020-03-30T06:35:22.000Z | pypi_librarian/_version.py | jayvdb/pypi_librarian | e1b98bd035c7d9bbab7bdd1511d03e58fb927236 | [
"MIT"
] | 2 | 2019-06-07T14:45:07.000Z | 2019-12-26T14:37:16.000Z | # coding=utf-8
"""
Just version so that if this is run, there is no side effects.
"""
__version__ = "0.1.6" # Jiggle Version Was Here
| 22.333333 | 62 | 0.686567 |
d1c16281614bfaf120352121c36fd7406b7c16f4 | 3,972 | py | Python | riak/transports/http/connection.py | lixen/riak-python-client | 43c0e2d43b185180fe8dd02ca759fbfb54bfec4b | [
"Apache-2.0"
] | 89 | 2015-01-06T01:54:57.000Z | 2020-11-25T04:47:09.000Z | riak/transports/http/connection.py | lixen/riak-python-client | 43c0e2d43b185180fe8dd02ca759fbfb54bfec4b | [
"Apache-2.0"
] | 125 | 2015-01-05T09:32:37.000Z | 2021-06-27T21:28:51.000Z | riak/transports/http/connection.py | lixen/riak-python-client | 43c0e2d43b185180fe8dd02ca759fbfb54bfec4b | [
"Apache-2.0"
] | 73 | 2015-01-19T18:04:35.000Z | 2022-03-25T17:10:51.000Z | # Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from six import PY2
from riak.util import str_to_bytes
if PY2:
from httplib import NotConnected, HTTPConnection
else:
from http.client import NotConnected, HTTPConnection
class HttpConnection(object):
"""
Connection and low-level request methods for HttpTransport.
"""
def _request(self, method, uri, headers={}, body='', stream=False):
"""
Given a Method, URL, Headers, and Body, perform and HTTP
request, and return a 3-tuple containing the response status,
response headers (as httplib.HTTPMessage), and response body.
"""
response = None
headers.setdefault('Accept',
'multipart/mixed, application/json, */*;q=0.5')
if self._client._credentials:
self._security_auth_headers(self._client._credentials.username,
self._client._credentials.password,
headers)
try:
self._connection.request(method, uri, body, headers)
try:
response = self._connection.getresponse(buffering=True)
except TypeError:
response = self._connection.getresponse()
if stream:
# The caller is responsible for fully reading the
# response and closing it when streaming.
response_body = response
else:
response_body = response.read()
finally:
if response and not stream:
response.close()
return response.status, response.msg, response_body
def _connect(self):
"""
Use the appropriate connection class; optionally with security.
"""
timeout = None
if self._options is not None and 'timeout' in self._options:
timeout = self._options['timeout']
if self._client._credentials:
self._connection = self._connection_class(
host=self._node.host,
port=self._node.http_port,
credentials=self._client._credentials,
timeout=timeout)
else:
self._connection = self._connection_class(
host=self._node.host,
port=self._node.http_port,
timeout=timeout)
# Forces the population of stats and resources before any
# other requests are made.
self.server_version
def close(self):
"""
Closes the underlying HTTP connection.
"""
try:
self._connection.close()
except NotConnected:
pass
# These are set by the HttpTransport initializer
_connection_class = HTTPConnection
_node = None
def _security_auth_headers(self, username, password, headers):
"""
Add in the requisite HTTP Authentication Headers
:param username: Riak Security Username
:type str
:param password: Riak Security Password
:type str
:param headers: Dictionary of headers
:type dict
"""
userColonPassword = username + ":" + password
b64UserColonPassword = base64. \
b64encode(str_to_bytes(userColonPassword)).decode("ascii")
headers['Authorization'] = 'Basic %s' % b64UserColonPassword
| 34.241379 | 75 | 0.614552 |
2b750d2cea6f0e163ee49617941e82f8a4e81fe1 | 6,868 | py | Python | spacq/gui/config/measurement.py | bleutooth65/SpanishAcquisition3 | 50d1445c57f7ecf3bbf03a2cb28befedba1bd57a | [
"BSD-2-Clause"
] | 1 | 2020-09-30T15:52:48.000Z | 2020-09-30T15:52:48.000Z | spacq/gui/config/measurement.py | bleutooth65/SpanishAcquisition3 | 50d1445c57f7ecf3bbf03a2cb28befedba1bd57a | [
"BSD-2-Clause"
] | null | null | null | spacq/gui/config/measurement.py | bleutooth65/SpanishAcquisition3 | 50d1445c57f7ecf3bbf03a2cb28befedba1bd57a | [
"BSD-2-Clause"
] | 3 | 2019-06-13T20:59:13.000Z | 2021-02-07T03:23:11.000Z | from pubsub import pub
import wx
from spacq.iteration.variables import InputVariable
from ..tool.box import OK_BACKGROUND_COLOR, MessageDialog
from .scaling import ScalingSettings, ScalingSettingsDialog
class MeasurementConfigPanel(wx.Panel):
"""
Measurement configuration panel.
"""
def __init__(self, parent, global_store, scaling=True, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.global_store = global_store
self.scaling = scaling
if self.scaling:
self.scaling_settings = ScalingSettings()
# Ensure that we get a unique name.
with self.global_store.variables.lock:
num = 1
done = False
while not done:
name = 'New measurement {0}'.format(num)
self.var = InputVariable(name=name, enabled=True)
try:
self.global_store.variables[name] = self.var
except KeyError:
num += 1
else:
done = True
# Keep track of the scaling wrapper and resource.
if self.scaling:
self.scaling_wrap_token = '{0}.{1}'.format(self.__class__.__name__, self.wrap_with_scaling.__name__)
self.resource = None
self.unwrapping = False
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Configuration.
configuration_box = wx.BoxSizer(wx.HORIZONTAL)
panel_box.Add(configuration_box, flag=wx.EXPAND|wx.ALL, border=5)
self.enabled_checkbox = wx.CheckBox(self, label='Capture')
self.enabled_checkbox.Value = self.var.enabled
configuration_box.Add(self.enabled_checkbox, flag=wx.CENTER|wx.RIGHT, border=15)
self.Bind(wx.EVT_CHECKBOX, self.OnCaptureChecked, self.enabled_checkbox)
### Names.
names_box = wx.FlexGridSizer(rows=2, cols=2, hgap=5)
names_box.AddGrowableCol(1, 1)
configuration_box.Add(names_box, flag=wx.EXPAND, proportion=1)
names_box.Add(wx.StaticText(self, label='Resource name:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.resource_name_input = wx.TextCtrl(self, value=self.var.resource_name, style=wx.TE_PROCESS_ENTER)
self.resource_name_input.default_background_color = self.resource_name_input.BackgroundColour
self.resource_name_input.BackgroundColour = OK_BACKGROUND_COLOR
names_box.Add(self.resource_name_input, flag=wx.EXPAND)
self.Bind(wx.EVT_TEXT, self.OnResourceNameChange, self.resource_name_input)
self.Bind(wx.EVT_TEXT_ENTER, self.OnResourceNameInput, self.resource_name_input)
names_box.Add(wx.StaticText(self, label='Measurement name:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.measurement_name_input = wx.TextCtrl(self, value=self.var.name, style=wx.TE_PROCESS_ENTER)
self.measurement_name_input.default_background_color = self.measurement_name_input.BackgroundColour
self.measurement_name_input.BackgroundColour = OK_BACKGROUND_COLOR
names_box.Add(self.measurement_name_input, flag=wx.EXPAND)
self.Bind(wx.EVT_TEXT, self.OnMeasurementNameChange, self.measurement_name_input)
self.Bind(wx.EVT_TEXT_ENTER, self.OnMeasurementNameInput, self.measurement_name_input)
### Scaling.
if self.scaling:
scaling_button = wx.Button(self, label='Scaling...', style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.OnScaling, scaling_button)
configuration_box.Add(scaling_button, flag=wx.EXPAND|wx.LEFT, border=10)
self.SetSizerAndFit(panel_box)
self.set_title()
# Subscriptions.
if self.scaling:
pub.subscribe(self.msg_resource, 'resource.added')
pub.subscribe(self.msg_resource, 'resource.removed')
@property
def live_view_panel(self):
return self.parent.live_view_panel
def wrap_with_scaling(self, name, resource):
if not self.scaling:
return
# Don't double-wrap.
if resource.is_wrapped_by(self.scaling_wrap_token):
return
# Modify the resource value by the scaling.
def transform(x):
# Close over self, so that updating scaling settings automatically takes effect.
return self.scaling_settings.transform(x)
wrapped_resource = resource.wrapped(self.scaling_wrap_token, transform)
with self.global_store.lock:
del self.global_store.resources[name]
self.global_store.resources[name] = wrapped_resource
def unwrap_with_scaling(self):
if not self.scaling:
return
if self.resource is None:
return
# Don't allow immediate re-wrapping.
self.unwrapping = True
name = self.live_view_panel.measurement_resource_name
unwrapped_resource = self.resource.unwrapped(self.scaling_wrap_token)
with self.global_store.lock:
del self.global_store.resources[name]
self.global_store.resources[name] = unwrapped_resource
self.resource = None
self.unwrapping = False
def set_title(self):
self.parent.Title = '{0} ({1}){2}'.format(self.var.name, self.var.resource_name,
'' if self.var.enabled else ' [Disabled]')
def close(self):
self.unwrap_with_scaling()
del self.global_store.variables[self.var.name]
def OnCaptureChecked(self, evt=None):
self.var.enabled = self.live_view_panel.enabled = self.enabled_checkbox.Value
self.set_title()
def OnResourceNameChange(self, evt=None):
self.resource_name_input.BackgroundColour = self.resource_name_input.default_background_color
def OnResourceNameInput(self, evt=None):
if self.var.resource_name != self.resource_name_input.Value:
# Ensure that the resource is unwrapped before releasing it.
self.unwrap_with_scaling()
self.var.resource_name = name = self.resource_name_input.Value
# Inform the panel.
self.live_view_panel.measurement_resource_name = name
# Grab the new resource if it already exists.
try:
self.resource = self.global_store.resources[name]
except KeyError:
pass
else:
self.wrap_with_scaling(name, self.resource)
self.resource_name_input.BackgroundColour = OK_BACKGROUND_COLOR
self.set_title()
def OnMeasurementNameChange(self, evt=None):
self.measurement_name_input.BackgroundColour = self.measurement_name_input.default_background_color
def OnMeasurementNameInput(self, evt=None):
if self.var.name != self.measurement_name_input.Value:
# Attempt to add a new entry first.
var_new_name = self.measurement_name_input.Value
try:
self.global_store.variables[var_new_name] = self.var
except KeyError:
MessageDialog(self, var_new_name, 'Variable name conflicts').Show()
else:
# Remove the old entry.
del self.global_store.variables[self.var.name]
self.var.name = var_new_name
self.measurement_name_input.BackgroundColour = OK_BACKGROUND_COLOR
self.set_title()
def OnScaling(self, evt=None):
def ok_callback(dlg):
self.scaling_settings = dlg.GetValue()
dlg = ScalingSettingsDialog(self, ok_callback)
dlg.SetValue(self.scaling_settings)
dlg.Show()
def msg_resource(self, name, value=None):
resource_name = self.var.resource_name
if name == resource_name:
self.resource = value
if value is not None and not self.unwrapping:
self.wrap_with_scaling(resource_name, value)
| 32.093458 | 103 | 0.765871 |
4b8d13ce30355419383d93b71018f02a3a048e49 | 182,160 | py | Python | pandas/core/frame.py | ledmonster/pandas | bae392d2ee27273b11cd7c4d1c2a20a999bed357 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/core/frame.py | ledmonster/pandas | bae392d2ee27273b11cd7c4d1c2a20a999bed357 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/core/frame.py | ledmonster/pandas | bae392d2ee27273b11cd7c4d1c2a20a999bed357 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from numpy import nan as NA
import numpy as np
import numpy.ma as ma
from pandas.core.common import (isnull, notnull, PandasError, _try_sort,
_default_index, _maybe_upcast, _is_sequence,
_infer_dtype_from_scalar, _values_from_object,
is_list_like, _get_dtype)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import (_maybe_droplevels,
_convert_to_index_sliceable,
_check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.computation.expressions as expressions
from pandas.computation.eval import eval as _eval
from numpy import percentile as _quantile
from pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.util.decorators import deprecate, Appender, Substitution, \
deprecate_kwarg
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import DatetimeIndex
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.core.format as fmt
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.lib as lib
import pandas.algos as _algos
from pandas.core.config import get_option
#----------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame',
axes_single_arg="{0,1,'index','columns'}")
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
the columns by default.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> merge(A, B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
"""
#----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
>>> d = {'col1': ts1, 'col2': ts2}
>>> df = DataFrame(data=d, index=index)
>>> df2 = DataFrame(np.random.randn(10, 5))
>>> df3 = DataFrame(np.random.randn(10, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_csv : from CSV files
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
_auto_consolidate = True
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = dict((k, data[k]) for k in data_columns)
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None):
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if index is None and isinstance(data[0], Series):
index = _get_names_from_index(data)
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
if index is None:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
if isinstance(data, compat.string_types) and dtype is None:
dtype = np.object_
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty((len(index), len(columns)), dtype=dtype)
values.fill(data)
mgr = self._init_ndarray(values, index, columns, dtype=dtype,
copy=False)
else:
raise PandasError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(NA)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns,
dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None,
copy=False):
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if values.dtype != dtype:
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
N, K = values.shape
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return create_block_manager_from_blocks([values.T], [columns, index])
@property
def axes(self):
return [self.index, self.columns]
@property
def shape(self):
return (len(self.index), len(self.columns))
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = fmt.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
if (ignore_width # used by repr_html under IPython notebook
# scripts ignore terminal dims
or not com.in_interactive_session()):
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actualy checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = fmt.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report it's line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for IPython 2.x
# is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1).replace('>',
r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return ('<div style="max-height:1000px;'
'max-width:1500px;overflow:auto;">\n' +
self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions) + '\n</div>')
else:
return None
def iteritems(self):
"""Iterator over (column, series) pairs"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self.icol(i)
def iterrows(self):
"""
Iterate over rows of DataFrame as (index, Series) pairs.
Notes
-----
* ``iterrows`` does **not** preserve dtypes across the rows (dtypes
are preserved across columns for DataFrames). For example,
>>> df = DataFrame([[1, 1.0]], columns=['x', 'y'])
>>> row = next(df.iterrows())[1]
>>> print(row['x'].dtype)
float64
>>> print(df['x'].dtype)
int64
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
"""
columns = self.columns
for k, v in zip(self.index, self.values):
s = Series(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True):
"""
Iterate over rows of DataFrame as tuples, with index value
as first element of the tuple
"""
arrays = []
if index:
arrays.append(self.index)
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
return zip(*arrays)
if compat.PY3: # pragma: no cover
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals),
index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, np.ndarray):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
#----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, outtype='dict'):
"""
Convert DataFrame to dictionary.
Parameters
----------
outtype : str {'dict', 'list', 'series', 'records'}
Determines the type of the values of the dictionary. The
default `dict` is a nested dictionary {column -> {index -> value}}.
`list` returns {column -> list(values)}. `series` returns
{column -> Series(values)}. `records` returns [{columns -> value}].
Abbreviations are allowed.
Returns
-------
result : dict like {column -> {index -> value}}
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning)
if outtype.lower().startswith('d'):
return dict((k, v.to_dict()) for k, v in compat.iteritems(self))
elif outtype.lower().startswith('l'):
return dict((k, v.tolist()) for k, v in compat.iteritems(self))
elif outtype.lower().startswith('s'):
return dict((k, v) for k, v in compat.iteritems(self))
elif outtype.lower().startswith('r'):
return [dict((k, v) for k, v in zip(self.columns, row))
for row in self.values]
else: # pragma: no cover
raise ValueError("outtype %s not understood" % outtype)
def to_gbq(self, destination_table, project_id=None, chunksize=10000,
verbose=True, reauth=False):
"""Write a DataFrame to a Google BigQuery table.
THIS IS AN EXPERIMENTAL LIBRARY
If the table exists, the dataframe will be written to the table using
the defined table schema and column types. For simplicity, this method
uses the Google BigQuery streaming API. The to_gbq method chunks data
into a default chunk size of 10,000. Failures return the complete error
response which can be quite long depending on the size of the insert.
There are several important limitations of the Google streaming API
which are detailed at:
https://developers.google.com/bigquery/streaming-data-into-bigquery.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose,
reauth=reauth)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if com.is_iterator(data):
if nrows == 0:
return cls()
try:
if compat.PY3:
first_row = next(data)
else:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
result_index = MultiIndex.from_arrays(
[arrays[i] for i in to_remove], names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index,
columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if com.is_datetime64_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].values for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = index_names + lmap(str, self.columns)
else:
arrays = [self[c].values for c in self.columns]
names = lmap(str, self.columns)
dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)])
return np.rec.fromarrays(arrays, dtype=dtype, names=names)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
return cls._from_arrays(arrays, columns, None)
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0,
parse_dates=True, encoding=None, tupleize_cols=False,
infer_datetime_format=False):
"""
Read delimited file into DataFrame
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use at header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Notes
-----
Preferable to use read_table for most general purposes but from_csv
makes for an easy roundtrip to and from file, especially with a
DataFrame of time series data
Returns
-------
y : DataFrame
"""
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
from pandas.core.panel import Panel
from pandas.core.reshape import block2d_to_blocknd
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sortlevel(0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
new_blocks = []
for block in selfsorted._data.blocks:
newb = block2d_to_blocknd(
values=block.values.T,
placement=block.mgr_locs, shape=shape,
labels=[major_labels, minor_labels],
ref_items=selfsorted.columns)
new_blocks.append(newb)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
new_axes = [selfsorted.columns, major_axis, minor_axis]
new_mgr = create_block_manager_from_blocks(new_blocks, new_axes)
return Panel(new_mgr)
to_wide = deprecate('to_wide', to_panel)
@deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, **kwds):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ","
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
nanRep : None
deprecated, use na_rep
mode : str
Python write mode, default 'w'
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
quotechar : string (length 1), default '"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
cols : kwarg only alias of columns [deprecated]
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator,
sep=sep, encoding=encoding,
quoting=quoting, na_rep=na_rep,
float_format=float_format, cols=columns,
header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
engine=kwds.get("engine"),
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf'):
"""
Write DataFrame to a excel sheet
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
cols : kwarg only alias of columns [deprecated]
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
"""
from pandas.io.excel import ExcelWriter
need_save = False
if encoding == None:
encoding = 'ascii'
if isinstance(excel_writer, compat.string_types):
excel_writer = ExcelWriter(excel_writer, engine=engine)
need_save = True
formatter = fmt.ExcelFormatter(self,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatted_cells = formatter.get_formatted_cells()
excel_writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol)
if need_save:
excel_writer.save()
def to_stata(
self, fname, convert_dates=None, write_index=True, encoding="latin-1",
byteorder=None, time_stamp=None, data_label=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : file path or buffer
Where to save the dta file.
convert_dates : dict
Dictionary mapping column of datetime types to the stata internal
format that you want to use for the dates. Options are
'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
number or a name.
encoding : str
Default is latin-1. Note that Stata does not support unicode.
byteorder : str
Can be ">", "<", "little", or "big". The default is None which uses
`sys.byteorder`
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index)
writer.write_file()
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
if colSpace is not None: # pragma: no cover
warnings.warn("colSpace is deprecated, use col_space",
FutureWarning)
col_space = colSpace
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, bold_rows=True, classes=None, escape=True,
max_rows=None, max_cols=None, show_dimensions=False):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
"""
if colSpace is not None: # pragma: no cover
warnings.warn("colSpace is deprecated, use col_space",
FutureWarning)
col_space = colSpace
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_html(classes=classes)
if buf is None:
return formatter.buf.getvalue()
@Appender(fmt.docstring_to_string, indents=1)
def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
bold_rows=True, longtable=False, escape=True):
"""
Render a DataFrame to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
`to_latex`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
longtable : boolean, default False
Use a longtable environment instead of tabular. Requires adding
a \\usepackage{longtable} to your LaTeX preamble.
escape : boolean, default True
When set to False prevents from escaping latex special
characters in column names.
"""
if colSpace is not None: # pragma: no cover
warnings.warn("colSpace is deprecated, use col_space",
FutureWarning)
col_space = colSpace
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape)
formatter.to_latex(longtable=longtable)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
"""
from pandas.core.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option(
'display.max_info_columns', len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)' %
(len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes[col]
col = com.pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) +
tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
_put_lines(buf, lines)
def transpose(self):
"""Transpose index and columns"""
return super(DataFrame, self).transpose(1, 0)
T = property(transpose)
#----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols),
copy=False)
dm = dm.join(objects)
self._data = dm._data
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
if takeable:
series = self._iget_item_cache(col)
return series.values[index]
series = self._get_item_cache(col)
engine = self.index._engine
return engine.get_value(series.get_values(), index)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series.set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series.values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def irow(self, i, copy=False):
return self._ixs(i, axis=0)
def icol(self, i):
return self._ixs(i, axis=1)
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy=True
else:
new_values = self._data.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values,np.ndarray) and new_values.base is None
result = Series(new_values, index=self.columns,
name=self.index[i], dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.ix[:, lab_slice]
else:
label = self.columns[i]
if isinstance(label, Index):
return self.take(i, axis=1, convert=True)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if not len(values):
values = np.array([np.nan] * len(self.index), dtype=object)
result = self._constructor_sliced.from_array(
values, index=self.index,
name=label, fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def iget_value(self, i, j):
return self.iat[i, j]
def __getitem__(self, key):
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = _convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionaility
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com._is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# _check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = _check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0, convert=False)
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
return self.take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray)):
new_columns = self.columns[loc]
result_columns = _maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = DataFrame(new_values, index=self.index,
columns=result_columns).__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
(type(top) == tuple and top[0] == '')):
result = result['']
if isinstance(result, Series):
result = Series(result, index=self.index, name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.dtype != np.bool_:
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, **kwargs):
"""Query the columns of a frame with a boolean expression.
.. versionadded:: 0.13
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
kwargs['level'] = kwargs.pop('level', 0) + 1
res = self.eval(expr, **kwargs)
try:
return self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
return self[res]
def eval(self, expr, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers
return _eval(expr, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : list-like
A list of dtypes or strings to be included/excluded. You must pass
in a non-empty sequence for at least one of these.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
TypeError
* If either of ``include`` or ``exclude`` is not a sequence
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
include, exclude = include or (), exclude or ()
if not (com.is_list_like(include) and com.is_list_like(exclude)):
raise TypeError('include and exclude must both be non-string'
' sequences')
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(lambda x:
frozenset(map(com._get_dtype_from_object, x)),
selection)
for dtypes in (include, exclude):
com._invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s'
% (include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced.from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
# see if we can slice the rows
indexer = _convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, (Series, np.ndarray, list)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.ix._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com._is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = _check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.ix._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.ix._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.dtype != np.bool_:
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
self._check_setitem_copy()
self.where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
if not len(self.index):
# GH5632, make sure that we are a Series convertible
if is_list_like(value):
try:
value = Series(value)
except:
pass
if not isinstance(value, Series):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
# we are a scalar
# noop
else:
pass
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exeption to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
If `allow_duplicates` is False, raises Exception if column
is already contained in the DataFrame.
Parameters
----------
loc : int
Must have 0 <= loc <= len(columns)
column : object
value : int, Series, or array-like
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value)
self._data.insert(
loc, column, value, allow_duplicates=allow_duplicates)
def _sanitize_column(self, key, value):
# Need to make sure new columns (which go into the BlockManager as new
# blocks) are always copied
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value.values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index).values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif (isinstance(value, Index) or _is_sequence(value)):
from pandas.core.series import _sanitize_index
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = com._possibly_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
else:
value = value.copy()
else:
# upcast the scalar
dtype, value = _infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = com._possibly_cast_to_datetime(value, dtype)
# return categoricals directly
if isinstance(value, Categorical):
return value
# broadcast across multiple columns if necessary
if key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns,
MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self.get_value(r, c)
if result.dtype == 'O':
result = lib.maybe_convert_objects(result)
return result
#----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, method, fill_value, copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, copy, level, fill_value,
limit)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
limit=None):
new_index, indexer = self.index.reindex(new_index, method, level,
limit=limit,
copy_if_needed=True)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, copy, level, fill_value=NA,
limit=None):
new_columns, indexer = self.columns.reindex(new_columns, level=level,
limit=limit,
copy_if_needed=True)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = com.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).reindex(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).rename(index=index, columns=columns,
**kwargs)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> indexed_df = df.set_index(['A', 'B'])
>>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])
>>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])
Returns
-------
dataframe : DataFrame
"""
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index.get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col.get_level_values(n))
level = col.get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col.values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
level = col
names.append(None)
else:
level = frame[col].values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject
elif (isinstance(index, DatetimeIndex) and
index.tz is not None):
values = index.asobject
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
values = values.take(labels)
if mask.any():
values, changed = com._maybe_upcast_putmask(values,
mask, np.nan)
return values
new_index = np.arange(len(new_obj),dtype='int64')
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if not drop:
names = self.index.names
zipped = lzip(self.index.levels, self.index.labels)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(zipped))):
col_name = names[i]
if col_name is None:
col_name = 'level_%d' % i
if multi_col:
if col_fill is None:
col_name = tuple([col_name] *
self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = col_name
col_name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
if level is None or i in level:
new_obj.insert(0, col_name, level_values)
elif not drop:
name = self.index.name
if name is None or name == 'index':
name = 'index' if 'index' not in self else 'level_0'
if isinstance(self.columns, MultiIndex):
if col_fill is None:
name = tuple([name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = name
name = tuple(name_lst)
values = _maybe_casted_values(self.index)
new_obj.insert(0, name, values)
new_obj.index = new_index
if not inplace:
return new_obj
delevel = deprecate('delevel', reset_index)
#----------------------------------------------------------------------
# Reindex-based selection methods
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0, 1}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, defalt False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
"""
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh,
subset=subset, axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
agg_obj = self.take(ax.get_indexer_for(subset),axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self.take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
@deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')
def drop_duplicates(self, subset=None, take_last=False, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
take_last : boolean, default False
Take the last observed row in a row. Defaults to the first row
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
cols : kwargs only argument of subset [deprecated]
Returns
-------
deduplicated : DataFrame
"""
duplicated = self.duplicated(subset, take_last=take_last)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
@deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')
def duplicated(self, subset=None, take_last=False):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
take_last : boolean, default False
Take the last observed row in a row. Defaults to the first row
cols : kwargs only argument of subset [deprecated]
Returns
-------
duplicated : Series
"""
# kludge for #1833
def _m8_to_i8(x):
if issubclass(x.dtype.type, np.datetime64):
return x.view(np.int64)
return x
if subset is None:
values = list(_m8_to_i8(self.values.T))
else:
if np.iterable(subset) and not isinstance(subset, compat.string_types):
if isinstance(subset, tuple):
if subset in self.columns:
values = [self[subset].values]
else:
values = [_m8_to_i8(self[x].values) for x in subset]
else:
values = [_m8_to_i8(self[x].values) for x in subset]
else:
values = [self[subset].values]
keys = lib.fast_zip_fillna(values)
duplicated = lib.duplicated(keys, take_last=take_last)
return Series(duplicated, index=self.index)
#----------------------------------------------------------------------
# Sorting
def sort(self, columns=None, axis=0, ascending=True,
inplace=False, kind='quicksort', na_position='last'):
"""
Sort DataFrame either by labels (along either axis) or by the values in
column(s)
Parameters
----------
columns : object
Column name(s) in frame. Accepts a column name or a list
for a nested sort. A tuple will be interpreted as the
levels of a multi-index.
ascending : boolean or list, default True
Sort ascending vs. descending. Specify list for multiple sort
orders
axis : {0, 1}
Sort index/rows versus columns
inplace : boolean, default False
Sort the DataFrame without creating a new instance
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
This option is only applied when sorting on a single column or label.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Examples
--------
>>> result = df.sort(['A', 'B'], ascending=[1, 0])
Returns
-------
sorted : DataFrame
"""
return self.sort_index(by=columns, axis=axis, ascending=ascending,
inplace=inplace, kind=kind, na_position=na_position)
def sort_index(self, axis=0, by=None, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort DataFrame either by labels (along either axis) or by the values in
a column
Parameters
----------
axis : {0, 1}
Sort index/rows versus columns
by : object
Column name(s) in frame. Accepts a column name or a list
for a nested sort. A tuple will be interpreted as the
levels of a multi-index.
ascending : boolean or list, default True
Sort ascending vs. descending. Specify list for multiple sort
orders
inplace : boolean, default False
Sort the DataFrame without creating a new instance
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
This option is only applied when sorting on a single column or label.
Examples
--------
>>> result = df.sort_index(by=['A', 'B'], ascending=[True, False])
Returns
-------
sorted : DataFrame
"""
from pandas.core.groupby import _lexsort_indexer, _nargsort
axis = self._get_axis_number(axis)
if axis not in [0, 1]: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
labels = self._get_axis(axis)
if by is not None:
if axis != 0:
raise ValueError('When sorting by column, axis must be 0 '
'(rows)')
if not isinstance(by, list):
by = [by]
if com._is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by'
' (%d)' % (len(ascending), len(by)))
if len(by) > 1:
def trans(v):
if com.needs_i8_conversion(v):
return v.view('i8')
return v
keys = []
for x in by:
k = self[x].values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' % str(x))
keys.append(trans(k))
indexer = _lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = com._ensure_platform_int(indexer)
else:
by = by[0]
k = self[by].values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a multi-index'
' you need to explicity provide all the levels'
% str(by))
raise ValueError('Cannot sort by duplicate column %s'
% str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = _nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
elif isinstance(labels, MultiIndex):
indexer = _lexsort_indexer(labels.labels, orders=ascending,
na_position=na_position)
indexer = com._ensure_platform_int(indexer)
else:
indexer = _nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer, axis=bm_axis,
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True,
inplace=False, sort_remaining=True):
"""
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0, 1}
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
"""
axis = self._get_axis_number(axis)
the_axis = self._get_axis(axis)
if not isinstance(the_axis, MultiIndex):
raise TypeError('can only sort by level with a hierarchical index')
new_axis, indexer = the_axis.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
if self._is_mixed_type and not inplace:
ax = 'index' if axis == 0 else 'columns'
if new_axis.is_unique:
return self.reindex(**{ax: new_axis})
else:
return self.take(indexer, axis=axis, convert=False)
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer, axis=bm_axis,
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def swaplevel(self, i, j, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
#----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isnull(left)
right_mask = isnull(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([(col, f(col)) for col in this])
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([
(i, f(i)) for i, col in enumerate(this.columns)
])
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index,
columns=new_columns, copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level, fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level, fill_value=fill_value)
return self._combine_series_infer(other, func, level=level, fill_value=fill_value)
def _combine_series_infer(self, other, func, level=None, fill_value=None):
if len(other) == 0:
return self * NA
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
# teeny hack because one does DataFrame + TimeSeries all the time
if self.index.is_all_dates and other.index.is_all_dates:
warnings.warn(("TimeSeries broadcasting along DataFrame index "
"by default is deprecated. Please use "
"DataFrame.<op> to explicitly broadcast arithmetic "
"operations along the index"),
FutureWarning)
return self._combine_match_index(other, func, level=level, fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level, fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=0, level=level, copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index,
columns=self.columns, copy=False)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=1, level=level, copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(
func=func, other=right, axes=[left.columns, self.index])
return self._constructor(new_data)
def _combine_const(self, other, func, raise_on_error=True):
if self.empty:
return self
new_data = self._data.eval(func=func, other=other, raise_on_error=raise_on_error)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep):
# unique
if self.columns.is_unique:
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep)
def _flex_compare_frame(self, other, func, str_rep, level):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level)
return self._compare_frame_evaluate(other, func, str_rep)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
# don't overwrite columns unecessarily
# DO propogate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if this_dtype != other_dtype:
new_dtype = com._lcd_dtypes(this_dtype, other_dtype)
series = series.astype(new_dtype)
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion = com.needs_i8_conversion(new_dtype)
if needs_i8_conversion:
this_dtype = new_dtype
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = com.ensure_float(arr)
arr[this_mask & other_mask] = NA
# try to downcast back to the original dtype
if needs_i8_conversion:
arr = com._possibly_cast_to_datetime(arr, this_dtype)
else:
arr = com._possibly_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result,
index=new_index,
columns=new_columns).convert_objects(
convert_dates=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Examples
--------
a's values prioritized, use values from b to fill holes:
>>> a.combine_first(b)
Returns
-------
combined : DataFrame
"""
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isnull(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
return expressions.where(mask, y_values, x_values,
raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
"""
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
mask = ~filter_func(this) | isnull(that)
else:
if raise_conflict:
mask_this = notnull(that)
mask_that = notnull(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isnull(that)
# don't overwrite columns unecessarily
if mask.all():
continue
else:
mask = notnull(this)
self[col] = expressions.where(
mask, this, that, raise_on_error=True)
#----------------------------------------------------------------------
# Misc methods
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
return self.index[self.count(1) > 0][0]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
return self.index[self.count(1) > 0][-1]
#----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes and return either
DataFrame or Panel, depending on whether you request a single value
column (DataFrame) or all columns (Panel)
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df
foo bar baz
0 one A 1.
1 one B 2.
2 one C 3.
3 two A 4.
4 two B 5.
5 two C 6.
>>> df.pivot('foo', 'bar', 'baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot('foo', 'bar')['baz']
A B C
one 1 2 3
two 4 5 6
Returns
-------
pivoted : DataFrame
If no values column specified, will have hierarchically indexed
columns
"""
from pandas.core.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex)
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.
b 3.
two a 2.
b 4.
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape import unstack
return unstack(self, level)
#----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
Returns
-------
diffed : DataFrame
"""
new_data = self._data.diff(n=periods)
return self._constructor(new_data)
#----------------------------------------------------------------------
# Function application
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""
Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0, 1}
* 0 : apply function to each column
* 1 : apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
Returns
-------
applied : Series or DataFrame
"""
axis = self._get_axis_number(axis)
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
if len(self.columns) == 0 and len(self.index) == 0:
return self._apply_empty_result(func, axis, reduce, *args, **kwds)
if isinstance(f, np.ufunc):
results = f(self.values)
return self._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
else:
if not broadcast:
if not all(self.shape):
return self._apply_empty_result(func, axis, reduce, *args,
**kwds)
if raw and not self._is_mixed_type:
return self._apply_raw(f, axis)
else:
if reduce is None:
reduce = True
return self._apply_standard(f, axis, reduce=reduce)
else:
return self._apply_broadcast(f, axis)
def _apply_empty_result(self, func, axis, reduce, *args, **kwds):
if reduce is None:
reduce = False
try:
reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),
Series)
except Exception:
pass
if reduce:
return Series(NA, index=self._get_agg_axis(axis))
else:
return self.copy()
def _apply_raw(self, func, axis):
try:
result = lib.reduce(self.values, func, axis=axis)
except Exception:
result = np.apply_along_axis(func, axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return DataFrame(result, index=self.index,
columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if reduce and axis==1 and self._is_mixed_type and self._is_datelike_mixed_type:
reduce=False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
try:
# the is the fast-path
values = self.values
dummy = Series(NA, index=self._get_axis(axis),
dtype=values.dtype)
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self.icol(i) for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype)
for i, (arr, name) in
enumerate(zip(values, res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
com.pprint_thing(k),)
raise
if len(results) > 0 and _is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result.convert_objects(copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if com.is_datetime64_dtype(x):
x = lib.map_infer(_values_from_object(x), lib.Timestamp)
return lib.map_infer(_values_from_object(x), func)
return self.apply(infer)
#----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append columns of other to end of this frame's columns and index,
returning a new object. Columns not in this frame are added as new
columns.
Parameters
----------
other : DataFrame or list of Series/dict-like objects
ignore_index : boolean, default False
If True do not use the index labels. Useful for gluing together
record arrays
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates
Notes
-----
If a list of dict is passed and the keys are all contained in the
DataFrame's index, the order of the columns in the resulting DataFrame
will be unchanged
Returns
-------
appended : DataFrame
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if '
'ignore_index=True')
index = None if other.name is None else [other.name]
combined_columns = self.columns.tolist() + ((self.columns | other.index) - self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index, columns=combined_columns).convert_objects()
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.ix[:, self.columns]
from pandas.tools.merge import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : column name, tuple/list of column names, or array-like
Column(s) to use for joining, otherwise join on index. If multiples
columns given, the passed DataFrame must have a MultiIndex. Can
pass an array as the join key if not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
preserves the index order of the calling (left) DataFrame
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.tools.merge import merge, concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how,
left_index=True, right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True):
from pandas.tools.merge import merge
return merge(self, right, how=how, on=on,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index, sort=sort,
suffixes=suffixes, copy=copy)
#----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
mat = numeric_df.values
if method == 'pearson':
correl = _algos.nancorr(com._ensure_float64(mat),
minp=min_periods)
elif method == 'spearman':
correl = _algos.nancorr_spearman(com._ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = NA
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=cols, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
mat = numeric_df.values
if notnull(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = _algos.nancorr(com._ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=cols, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame
axis : {0, 1}
0 to compute column-wise, 1 for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
#----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if axis == 1:
counts = notnull(frame.values).sum(1)
result = Series(counts, index=frame._get_agg_axis(axis))
else:
result = notnull(frame).sum(axis=axis)
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
if axis == 1:
frame = frame.T
if not isinstance(frame.index, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
# python 2.5
mask = notnull(frame.values).view(np.uint8)
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
level_index = frame.index.levels[level]
labels = com._ensure_int64(frame.index.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index))
result = DataFrame(counts, index=level_index,
columns=frame.columns)
if axis == 1:
return result.T
else:
return result
def any(self, axis=None, bool_only=None, skipna=True, level=None,
**kwargs):
"""
Return whether any element is True over requested axis.
%(na_action)s
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
bool_only : boolean, default None
Only include boolean data.
Returns
-------
any : Series (or DataFrame if level specified)
"""
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level('any', axis=axis, level=level,
skipna=skipna)
return self._reduce(nanops.nanany, axis=axis, skipna=skipna,
numeric_only=bool_only, filter_type='bool')
def all(self, axis=None, bool_only=None, skipna=True, level=None,
**kwargs):
"""
Return whether all elements are True over requested axis.
%(na_action)s
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
bool_only : boolean, default None
Only include boolean data.
Returns
-------
any : Series (or DataFrame if level specified)
"""
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level('all', axis=axis, level=level,
skipna=skipna)
return self._reduce(nanops.nanall, axis=axis, skipna=skipna,
numeric_only=bool_only, filter_type='bool')
def _reduce(self, op, axis=0, skipna=True, numeric_only=None,
filter_type=None, name=None, **kwds):
axis = self._get_axis_number(axis)
f = lambda x: op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
result = self.apply(f,reduce=False)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented."
% filter_type)
raise_with_traceback(e)
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if result.dtype == np.object_:
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notnull(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = com._coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be first index.
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explict about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode of each element along the axis selected. Empty if nothing
has 2+ occurrences. Adds a row for each mode per label, fills in gaps
with nan.
Parameters
----------
axis : {0, 1, 'index', 'columns'} (default 0)
* 0/'index' : get mode of each column
* 1/'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
"""
data = self if not numeric_only else self._get_numeric_data()
f = lambda s: s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
quantiles : Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
per = np.asarray(q) * 100
if not com.is_list_like(per):
per = [per]
q = [q]
squeeze = True
else:
squeeze = False
def f(arr, per):
if arr._is_datelike_mixed_type:
values = _values_from_object(arr).view('i8')
else:
values = arr.astype(float)
values = values[notnull(values)]
if len(values) == 0:
return NA
else:
return _quantile(values, per)
data = self._get_numeric_data() if numeric_only else self
if axis == 1:
data = data.T
# need to know which cols are timestamp going in so that we can
# map timestamp over them after getting the quantile.
is_dt_col = data.dtypes.map(com.is_datetime64_dtype)
is_dt_col = is_dt_col[is_dt_col].index
quantiles = [[f(vals, x) for x in per]
for (_, vals) in data.iteritems()]
result = DataFrame(quantiles, index=data._info_axis, columns=q).T
if len(is_dt_col) > 0:
result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp)
if squeeze:
if result.shape == (1, 1):
result = result.T.iloc[:, 0] # don't want scalar
else:
result = result.T.squeeze()
result.name = None # For groupby, so it can set an index name
return result
def rank(self, axis=0, numeric_only=None, method='average',
na_option='keep', ascending=True, pct=False):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values
Parameters
----------
axis : {0, 1}, default 0
Ranks over columns (0) or rows (1)
numeric_only : boolean, default None
Include only float, int, boolean data
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
na_option : {'keep', 'top', 'bottom'}
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Computes percentage rank of data
Returns
-------
ranks : DataFrame
"""
axis = self._get_axis_number(axis)
if numeric_only is None:
try:
ranks = algos.rank(self.values, axis=axis, method=method,
ascending=ascending, na_option=na_option,
pct=pct)
return self._constructor(ranks, index=self.index,
columns=self.columns)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
ranks = algos.rank(data.values, axis=axis, method=method,
ascending=ascending, na_option=na_option, pct=pct)
return self._constructor(ranks, index=data.index, columns=data.columns)
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0, 1} default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0, 1}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from collections import defaultdict
from pandas.tools.merge import concat
values = defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("ValueError: cannot compute isin with"
" a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("ValueError: cannot compute isin with"
" a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are"
" allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(lib.ismember(self.values.ravel(),
set(values)).reshape(self.shape),
self.index,
self.columns)
#----------------------------------------------------------------------
# Deprecated stuff
def combineAdd(self, other):
"""
Add two DataFrame objects and do not propagate
NaN values, so if for a (column, time) one frame is missing a
value, it will default to the other frame's value (which might
be NaN as well)
Parameters
----------
other : DataFrame
Returns
-------
DataFrame
"""
return self.add(other, fill_value=0.)
def combineMult(self, other):
"""
Multiply two DataFrame objects and do not propagate NaN values, so if
for a (column, time) one frame is missing a value, it will default to
the other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
Returns
-------
DataFrame
"""
return self.mul(other, fill_value=1.)
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
_EMPTY_SERIES = Series([])
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d'
% (lengths[0], len(index)))
raise ValueError(msg)
else:
index = Index(np.arange(lengths[0]))
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return com._possibly_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if com.is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data.icol(i).values for i, col in enumerate(data.columns)
if col in columns]
else:
columns = data.columns
arrays = [data.icol(i).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif (isinstance(data, (np.ndarray, Series))
and data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(
arr_columns).get_indexer(columns)
arr_columns = _ensure_index(
[arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_combined_index
if columns is None:
columns = _get_combined_index([
s.index for s in data if getattr(s, 'index', None) is not None
])
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(com.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
columns = lib.fast_unique_multiple_list_gen(gen)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = com._possibly_cast_to_datetime(arr, dtype)
return arr
arrays = [ convert(arr) for arr in content ]
return arrays, columns
def _get_names_from_index(data):
index = lrange(len(data))
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
return index
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if type(v) == dict:
# fast cython method
v = lib.fast_multiget(v, oindex, default=NA)
else:
v = lib.map_infer(oindex, v.get)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
#----------------------------------------------------------------------
# Add plotting methods to DataFrame
import pandas.tools.plotting as gfx
DataFrame.plot = gfx.plot_frame
DataFrame.hist = gfx.hist_frame
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(self, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
import pandas.tools.plotting as plots
import matplotlib.pyplot as plt
ax = plots.boxplot(self, column=column, by=by, ax=ax,
fontsize=fontsize, grid=grid, rot=rot,
figsize=figsize, layout=layout, return_type=return_type,
**kwds)
plt.draw_if_interactive()
return ax
DataFrame.boxplot = boxplot
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 37.001828 | 109 | 0.544038 |
f0110d5ea1d9f0bfc4ad92240c8d101dcbfdd22c | 193 | py | Python | tests/quick/se/40.m5threads-test-atomic/test.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 135 | 2016-10-21T03:31:49.000Z | 2022-03-25T01:22:20.000Z | tests/quick/se/40.m5threads-test-atomic/test.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 35 | 2017-03-10T17:57:46.000Z | 2022-02-18T17:34:16.000Z | tests/quick/se/40.m5threads-test-atomic/test.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 48 | 2016-12-08T12:03:13.000Z | 2022-02-16T09:16:13.000Z | process = Process(executable = binpath('m5threads', 'test_atomic'),
cmd = ['test_atomic', str(nb_cores)])
for i in range(nb_cores):
root.system.cpu[i].workload = process
| 32.166667 | 67 | 0.642487 |
ffc7dc1837223ad3e751d7e597e39301ab0bf2f9 | 3,368 | py | Python | dataloaders/datasets/combine_dbs.py | dfsbbl/pytorch-deeplabv3 | 66877e98bcfa1caf8e3074418221ed83df921bb2 | [
"MIT"
] | null | null | null | dataloaders/datasets/combine_dbs.py | dfsbbl/pytorch-deeplabv3 | 66877e98bcfa1caf8e3074418221ed83df921bb2 | [
"MIT"
] | null | null | null | dataloaders/datasets/combine_dbs.py | dfsbbl/pytorch-deeplabv3 | 66877e98bcfa1caf8e3074418221ed83df921bb2 | [
"MIT"
] | null | null | null | import torch.utils.data as data
class CombineDBs(data.Dataset):
NUM_CLASSES = 21
def __init__(self, dataloaders, excluded=None):
self.dataloaders = dataloaders
self.excluded = excluded
self.im_ids = []
# Combine object lists
for dl in dataloaders:
for elem in dl.im_ids:
if elem not in self.im_ids:
self.im_ids.append(elem)
# Exclude
if excluded:
for dl in excluded:
for elem in dl.im_ids:
if elem in self.im_ids:
self.im_ids.remove(elem)
# Get object pointers
self.cat_list = []
self.im_list = []
new_im_ids = []
num_images = 0
for ii, dl in enumerate(dataloaders):
for jj, curr_im_id in enumerate(dl.im_ids):
if (curr_im_id in self.im_ids) and (
curr_im_id not in new_im_ids):
num_images += 1
new_im_ids.append(curr_im_id)
self.cat_list.append({'db_ii': ii, 'cat_ii': jj})
self.im_ids = new_im_ids
print('Combined number of images: {:d}'.format(num_images))
def __getitem__(self, index):
_db_ii = self.cat_list[index]["db_ii"]
_cat_ii = self.cat_list[index]['cat_ii']
sample = self.dataloaders[_db_ii].__getitem__(_cat_ii)
if 'meta' in sample.keys():
sample['meta']['db'] = str(self.dataloaders[_db_ii])
return sample
def __len__(self):
return len(self.cat_list)
def __str__(self):
include_db = [str(db) for db in self.dataloaders]
exclude_db = [str(db) for db in self.excluded]
return 'Included datasets:' + \
str(include_db) + '\n' + 'Excluded datasets:' + str(exclude_db)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from dataloaders.datasets import pascal, sbd
from dataloaders import sbd
import torch
import numpy as np
from dataloaders.utils import decode_segmap
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
pascal_voc_val = pascal.VOCSegmentation(args, split='val')
sbd = sbd.SBDSegmentation(args, split=['train', 'val'])
pascal_voc_train = pascal.VOCSegmentation(args, split='train')
dataset = CombineDBs([pascal_voc_train, sbd], excluded=[pascal_voc_val])
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=2, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='pascal')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
| 32.07619 | 76 | 0.574228 |
8d3c33334ccb416d48f66a5362837377f9559d0a | 83 | py | Python | openpifpaf_posetrack/decoder/pose_distance/__init__.py | vita-epfl/openpifpaf_posetrack | 282ba063450d523728637167420d9ade4d9c1e65 | [
"MIT"
] | 9 | 2021-03-04T04:47:27.000Z | 2021-03-30T01:48:55.000Z | openpifpaf_posetrack/decoder/pose_distance/__init__.py | vita-epfl/openpifpaf_posetrack | 282ba063450d523728637167420d9ade4d9c1e65 | [
"MIT"
] | 4 | 2021-03-16T03:24:27.000Z | 2021-04-01T19:22:02.000Z | openpifpaf_posetrack/decoder/pose_distance/__init__.py | vita-epfl/openpifpaf_posetrack | 282ba063450d523728637167420d9ade4d9c1e65 | [
"MIT"
] | null | null | null | from .crafted import Crafted
from .euclidean import Euclidean
from .oks import Oks
| 20.75 | 32 | 0.819277 |
1320629aeb4bd1a7459449a19f3d9756442f482b | 6,154 | py | Python | homeassistant/components/image_processing/__init__.py | paranoidmonoid/core | c4f98a3084f2648749d3e4eeeade9696630d9abd | [
"Apache-2.0"
] | 1 | 2021-03-23T07:20:03.000Z | 2021-03-23T07:20:03.000Z | homeassistant/components/image_processing/__init__.py | paranoidmonoid/core | c4f98a3084f2648749d3e4eeeade9696630d9abd | [
"Apache-2.0"
] | 51 | 2020-08-03T07:30:44.000Z | 2022-03-22T06:02:42.000Z | homeassistant/components/image_processing/__init__.py | mkrzywie/core | 0503b14fbe5a50bc725a22bcaf40167445689dc8 | [
"Apache-2.0"
] | null | null | null | """Provides functionality to interact with image processing services."""
import asyncio
from datetime import timedelta
import logging
from typing import final
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.util.async_ import run_callback_threadsafe
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "image_processing"
SCAN_INTERVAL = timedelta(seconds=10)
DEVICE_CLASSES = [
"alpr", # Automatic license plate recognition
"face", # Face
"ocr", # OCR
]
SERVICE_SCAN = "scan"
EVENT_DETECT_FACE = "image_processing.detect_face"
ATTR_AGE = "age"
ATTR_CONFIDENCE = "confidence"
ATTR_FACES = "faces"
ATTR_GENDER = "gender"
ATTR_GLASSES = "glasses"
ATTR_MOTION = "motion"
ATTR_TOTAL_FACES = "total_faces"
CONF_CONFIDENCE = "confidence"
DEFAULT_TIMEOUT = 10
DEFAULT_CONFIDENCE = 80
SOURCE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_domain("camera"),
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SOURCE): vol.All(cv.ensure_list, [SOURCE_SCHEMA]),
vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=100)
),
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
async def async_setup(hass, config):
"""Set up the image processing."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
async def async_scan_service(service):
"""Service handler for scan."""
image_entities = await component.async_extract_from_service(service)
update_tasks = []
for entity in image_entities:
entity.async_set_context(service.context)
update_tasks.append(asyncio.create_task(entity.async_update_ha_state(True)))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
DOMAIN, SERVICE_SCAN, async_scan_service, schema=make_entity_service_schema({})
)
return True
class ImageProcessingEntity(Entity):
"""Base entity class for image processing."""
timeout = DEFAULT_TIMEOUT
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return None
@property
def confidence(self):
"""Return minimum confidence for do some things."""
return None
def process_image(self, image):
"""Process image."""
raise NotImplementedError()
async def async_process_image(self, image):
"""Process image."""
return await self.hass.async_add_executor_job(self.process_image, image)
async def async_update(self):
"""Update image and process it.
This method is a coroutine.
"""
camera = self.hass.components.camera
image = None
try:
image = await camera.async_get_image(
self.camera_entity, timeout=self.timeout
)
except HomeAssistantError as err:
_LOGGER.error("Error on receive image from entity: %s", err)
return
# process image data
await self.async_process_image(image.content)
class ImageProcessingFaceEntity(ImageProcessingEntity):
"""Base entity class for face image processing."""
def __init__(self):
"""Initialize base face identify/verify entity."""
self.faces = []
self.total_faces = 0
@property
def state(self):
"""Return the state of the entity."""
confidence = 0
state = None
# No confidence support
if not self.confidence:
return self.total_faces
# Search high confidence
for face in self.faces:
if ATTR_CONFIDENCE not in face:
continue
f_co = face[ATTR_CONFIDENCE]
if f_co > confidence:
confidence = f_co
for attr in [ATTR_NAME, ATTR_MOTION]:
if attr in face:
state = face[attr]
break
return state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "face"
@final
@property
def state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_FACES: self.faces, ATTR_TOTAL_FACES: self.total_faces}
def process_faces(self, faces, total):
"""Send event with detected faces and store data."""
run_callback_threadsafe(
self.hass.loop, self.async_process_faces, faces, total
).result()
@callback
def async_process_faces(self, faces, total):
"""Send event with detected faces and store data.
known are a dict in follow format:
[
{
ATTR_CONFIDENCE: 80,
ATTR_NAME: 'Name',
ATTR_AGE: 12.0,
ATTR_GENDER: 'man',
ATTR_MOTION: 'smile',
ATTR_GLASSES: 'sunglasses'
},
]
This method must be run in the event loop.
"""
# Send events
for face in faces:
if ATTR_CONFIDENCE in face and self.confidence:
if face[ATTR_CONFIDENCE] < self.confidence:
continue
face.update({ATTR_ENTITY_ID: self.entity_id})
self.hass.async_add_job(self.hass.bus.async_fire, EVENT_DETECT_FACE, face)
# Update entity store
self.faces = faces
self.total_faces = total
| 27.846154 | 88 | 0.647709 |
06a19487fd862d143a552cb3bfb57a1fdeeef3c0 | 997 | py | Python | workalendar/europe/hungary.py | chopanpma/workalendar | 619687b9d788d0e8294d2831687d58a3444854a1 | [
"MIT"
] | null | null | null | workalendar/europe/hungary.py | chopanpma/workalendar | 619687b9d788d0e8294d2831687d58a3444854a1 | [
"MIT"
] | null | null | null | workalendar/europe/hungary.py | chopanpma/workalendar | 619687b9d788d0e8294d2831687d58a3444854a1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.registry import iso_register
@iso_register('HU')
class Hungary(WesternCalendar, ChristianMixin):
'Hungary'
include_easter_sunday = True
include_easter_monday = True
include_whit_sunday = True
whit_sunday_label = "Pentecost Sunday"
include_whit_monday = True
whit_monday_label = "Pentecost Monday"
include_boxing_day = True
boxing_day_label = "Second Day of Christmas"
include_all_saints = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(3, 15, "National Day"),
(5, 1, "Labour Day"),
(8, 20, "St Stephen's Day"),
(10, 23, "National Day"),
)
def get_variable_days(self, year):
# As of 2017, Good Friday became a holiday
self.include_good_friday = (year >= 2017)
days = super(Hungary, self).get_variable_days(year)
return days
| 30.212121 | 60 | 0.686058 |
e46178af54f54d5a05ed20eabf36a5136ebbf066 | 27,615 | py | Python | tests/test_base.py | jack1142/uvloop | cdd2218fa3bb89eeec097bf0b7828897ef185aed | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/test_base.py | jack1142/uvloop | cdd2218fa3bb89eeec097bf0b7828897ef185aed | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/test_base.py | jack1142/uvloop | cdd2218fa3bb89eeec097bf0b7828897ef185aed | [
"Apache-2.0",
"MIT"
] | null | null | null | import asyncio
import fcntl
import logging
import os
import sys
import threading
import time
import uvloop
import unittest
import weakref
from unittest import mock
from uvloop._testbase import UVTestCase, AIOTestCase
class _TestBase:
def test_close(self):
self.assertFalse(self.loop._closed)
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop._closed)
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future()
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = self.loop.call_soon(lambda: None)
wd['h'] = h # Would fail without __weakref__ slot.
def test_call_soon_1(self):
calls = []
def cb(inc):
calls.append(inc)
self.loop.stop()
self.loop.call_soon(cb, 10)
h = self.loop.call_soon(cb, 100)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_soon(cb, 1)
self.loop.run_forever()
self.assertEqual(calls, [10, 1])
def test_call_soon_2(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_3(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_base_exc(self):
def cb():
raise KeyboardInterrupt()
self.loop.call_soon(cb)
with self.assertRaises(KeyboardInterrupt):
self.loop.run_forever()
self.assertFalse(self.loop.is_closed())
def test_calls_debug_reporting(self):
def run_test(debug, meth, stack_adj):
context = None
def handler(loop, ctx):
nonlocal context
context = ctx
self.loop.set_debug(debug)
self.loop.set_exception_handler(handler)
def cb():
1 / 0
meth(cb)
self.assertIsNone(context)
self.loop.run_until_complete(asyncio.sleep(0.05))
self.assertIs(type(context['exception']), ZeroDivisionError)
self.assertTrue(context['message'].startswith(
'Exception in callback'))
if debug:
tb = context['source_traceback']
self.assertEqual(tb[-1 + stack_adj].name, 'run_test')
else:
self.assertFalse('source_traceback' in context)
del context
for debug in (True, False):
for meth_name, meth, stack_adj in (
('call_soon',
self.loop.call_soon, 0),
('call_later', # `-1` accounts for lambda
lambda *args: self.loop.call_later(0.01, *args), -1)
):
with self.subTest(debug=debug, meth_name=meth_name):
run_test(debug, meth, stack_adj)
def test_now_update(self):
async def run():
st = self.loop.time()
time.sleep(0.05)
return self.loop.time() - st
delta = self.loop.run_until_complete(run())
self.assertTrue(delta > 0.049 and delta < 0.6)
def test_call_later_1(self):
calls = []
def cb(inc=10, stop=False):
calls.append(inc)
self.assertTrue(self.loop.is_running())
if stop:
self.loop.call_soon(self.loop.stop)
self.loop.call_later(0.05, cb)
# canceled right away
h = self.loop.call_later(0.05, cb, 100, True)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_later(0.05, cb, 1, True)
self.loop.call_later(1000, cb, 1000) # shouldn't be called
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(calls, [10, 1])
self.assertFalse(self.loop.is_running())
self.assertLess(finished - started, 0.3)
self.assertGreater(finished - started, 0.04)
def test_call_later_2(self):
# Test that loop.call_later triggers an update of
# libuv cached time.
async def main():
await asyncio.sleep(0.001)
time.sleep(0.01)
await asyncio.sleep(0.01)
started = time.monotonic()
self.loop.run_until_complete(main())
delta = time.monotonic() - started
self.assertGreater(delta, 0.019)
def test_call_later_3(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_4(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_negative(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop.stop()
self.loop.call_later(-1, cb, 'a')
self.loop.run_forever()
self.assertEqual(calls, ['a'])
def test_call_later_rounding(self):
# Refs #233, call_later() and call_at() shouldn't call cb early
def cb():
self.loop.stop()
for i in range(8):
self.loop.call_later(0.06 + 0.01, cb) # 0.06999999999999999
started = int(round(self.loop.time() * 1000))
self.loop.run_forever()
finished = int(round(self.loop.time() * 1000))
self.assertGreaterEqual(finished - started, 69)
def test_call_at(self):
if (os.environ.get('TRAVIS_OS_NAME')
or os.environ.get('GITHUB_WORKFLOW')):
# Time seems to be really unpredictable on Travis.
raise unittest.SkipTest('time is not monotonic on CI')
i = 0
def cb(inc):
nonlocal i
i += inc
self.loop.stop()
at = self.loop.time() + 0.05
self.loop.call_at(at, cb, 100).cancel()
self.loop.call_at(at, cb, 10)
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(i, 10)
self.assertLess(finished - started, 0.07)
self.assertGreater(finished - started, 0.045)
def test_check_thread(self):
def check_thread(loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an "
"event loop other than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = self.new_loop()
try:
asyncio.set_event_loop(loop2)
check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test_run_once_in_executor_plain(self):
called = []
def cb(arg):
called.append(arg)
async def runner():
await self.loop.run_in_executor(None, cb, 'a')
self.loop.run_until_complete(runner())
self.assertEqual(called, ['a'])
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test_run_until_complete_type_error(self):
self.assertRaises(
TypeError, self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future()
other_loop = self.new_loop()
self.addCleanup(other_loop.close)
self.assertRaises(
ValueError, other_loop.run_until_complete, task)
def test_run_until_complete_error(self):
async def foo():
raise ValueError('aaa')
with self.assertRaisesRegex(ValueError, 'aaa'):
self.loop.run_until_complete(foo())
def test_run_until_complete_loop_orphan_future_close_loop(self):
async def foo(delay):
await asyncio.sleep(delay)
def throw():
raise KeyboardInterrupt
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except KeyboardInterrupt:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_debug_slow_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_soon(lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Handle', msg)
self.assertIn('test_debug_slow_callbacks', msg)
def test_debug_slow_timer_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_later(0.01, lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0.02))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <TimerHandle', msg)
self.assertIn('test_debug_slow_timer_callbacks', msg)
def test_debug_slow_task_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
async def foo():
time.sleep(0.3)
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(foo())
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Task finished', msg)
self.assertIn('test_debug_slow_task_callbacks', msg)
def test_default_exc_handler_callback(self):
self.loop.set_exception_handler(None)
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1 / 0
logger = logging.getLogger('asyncio')
# Test call_soon (events.Handle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future()
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
# Test call_later (events.TimerHandle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future()
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
def test_set_exc_handler_custom(self):
self.loop.set_exception_handler(None)
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
errors = []
def handler(loop, exc):
errors.append(exc)
self.loop.set_debug(True)
self.assertIsNone(self.loop.get_exception_handler())
self.loop.set_exception_handler(handler)
if hasattr(self.loop, 'get_exception_handler'):
self.assertIs(self.loop.get_exception_handler(), handler)
run_loop()
self.assertEqual(len(errors), 1)
self.assertRegex(errors[-1]['message'],
'Exception in callback.*zero_error')
self.loop.set_exception_handler(None)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
self.assertEqual(len(errors), 1)
def test_set_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Unhandled error in exception handler'),
exc_info=mock.ANY)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError,
'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_set_task_name(self):
if self.implementation == 'asyncio' and sys.version_info < (3, 8, 0):
raise unittest.SkipTest('unsupported task name')
self.loop._process_events = mock.Mock()
result = None
class MyTask(asyncio.Task):
def set_name(self, name):
nonlocal result
result = name + "!"
def get_name(self):
return result
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro(), name="mytask")
self.assertFalse(isinstance(task, MyTask))
if sys.version_info >= (3, 8, 0):
self.assertEqual(task.get_name(), "mytask")
self.loop.run_until_complete(task)
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro(), name="mytask")
self.assertTrue(isinstance(task, MyTask))
self.assertEqual(result, "mytask!")
self.assertEqual(task.get_name(), "mytask!")
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
def test_shutdown_asyncgens_01(self):
finalized = list()
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
async def waiter(timeout, finalized):
try:
await asyncio.sleep(timeout)
yield 1
finally:
await asyncio.sleep(0)
finalized.append(1)
async def wait():
async for _ in waiter(1, finalized):
pass
t1 = self.loop.create_task(wait())
t2 = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1))
t1.cancel()
t2.cancel()
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(finalized, [1, 1])
for t in {t1, t2}:
try:
self.loop.run_until_complete(t)
except asyncio.CancelledError:
pass
def test_shutdown_asyncgens_02(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
logged = 0
def logger(loop, context):
nonlocal logged
expected = 'an error occurred during closing of asynchronous'
if expected in context['message']:
self.assertIn('asyncgen', context)
logged += 1
async def waiter(timeout):
try:
await asyncio.sleep(timeout)
yield 1
finally:
1 / 0
async def wait():
async for _ in waiter(1):
pass
t = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1))
self.loop.set_exception_handler(logger)
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(logged, 1)
# Silence warnings
t.cancel()
self.loop.run_until_complete(asyncio.sleep(0.1))
def test_shutdown_asyncgens_03(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
async def waiter():
yield 1
yield 2
async def foo():
# We specifically want to hit _asyncgen_finalizer_hook
# method.
await waiter().asend(None)
self.loop.run_until_complete(foo())
self.loop.run_until_complete(asyncio.sleep(0.01))
def test_inf_wait_for(self):
async def foo():
await asyncio.sleep(0.1)
return 123
res = self.loop.run_until_complete(
asyncio.wait_for(foo(), timeout=float('inf')))
self.assertEqual(res, 123)
def test_shutdown_default_executor(self):
if not hasattr(self.loop, "shutdown_default_executor"):
raise unittest.SkipTest()
async def foo():
await self.loop.run_in_executor(None, time.sleep, .1)
self.loop.run_until_complete(foo())
self.loop.run_until_complete(
self.loop.shutdown_default_executor())
class TestBaseUV(_TestBase, UVTestCase):
def test_loop_create_future(self):
fut = self.loop.create_future()
self.assertTrue(isinstance(fut, asyncio.Future))
self.assertIs(fut._loop, self.loop)
fut.cancel()
def test_loop_call_soon_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly()
self.assertFalse(handle.cancelled())
def test_loop_call_later_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly(delay=0.05)
self.assertFalse(handle.cancelled())
def test_loop_std_files_cloexec(self):
# See https://github.com/MagicStack/uvloop/issues/40 for details.
for fd in {0, 1, 2}:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
self.assertFalse(flags & fcntl.FD_CLOEXEC)
def test_default_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
_context = None
class Loop(uvloop.Loop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
self.addCleanup(lambda: asyncio.set_event_loop(None))
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_big_call_later_timeout(self):
OK, NOT_OK = 0, 0
async def sleep(delay_name, delay):
nonlocal OK, NOT_OK
try:
await asyncio.sleep(delay)
except asyncio.CancelledError:
OK += 1
except Exception:
NOT_OK += 1
async def main():
tests = [
sleep("infinity", float("inf")),
sleep("sys.maxsize", float(sys.maxsize)),
sleep("sys.maxsize", sys.maxsize),
sleep("2**55", 2**55),
sleep("2**54", 2**54),
]
tasks = [self.loop.create_task(test) for test in tests]
await asyncio.sleep(0.1)
for task in tasks:
task.cancel()
await task
self.loop.run_until_complete(main())
self.assertEqual(OK, 5)
self.assertEqual(NOT_OK, 0)
class TestBaseAIO(_TestBase, AIOTestCase):
pass
class TestPolicy(unittest.TestCase):
def test_uvloop_policy(self):
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
try:
self.assertIsInstance(loop, uvloop.Loop)
finally:
loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_running_loop_within_a_loop(self):
async def runner(loop):
loop.run_forever()
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = uvloop.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
| 31.59611 | 78 | 0.583958 |
03ef0d1a9dd9e99743c9351b4ce38b20705cfcb1 | 956 | py | Python | apps/fhirproxy/utils.py | thebureaugroup/sharemyhealth | de93793d7065d50af53f260e3347b8ed30d8c864 | [
"Apache-2.0"
] | null | null | null | apps/fhirproxy/utils.py | thebureaugroup/sharemyhealth | de93793d7065d50af53f260e3347b8ed30d8c864 | [
"Apache-2.0"
] | 4 | 2021-06-05T00:09:17.000Z | 2021-12-13T20:52:41.000Z | apps/fhirproxy/utils.py | thebureaugroup/sharemyhealth | de93793d7065d50af53f260e3347b8ed30d8c864 | [
"Apache-2.0"
] | null | null | null | import requests
import urllib
from django.conf import settings
def fhir_get_access_token_with_client_credentials():
data = urllib.parse.urlencode({"client_id": settings.BACKEND_FHIR_CLIENT_ID,
"client_secret": settings.BACKEND_FHIR_CLIENT_SECRET,
"resource": settings.BACKEND_FHIR_RESOURCE,
"grant_type": "client_credentials"})
response = requests.post(
url=settings.BACKEND_FHIR_TOKEN_ENDPOINT, data=data)
reply = response.json()
# print(reply)
return reply['access_token']
def fhir_secured_request(fhir_endpoint, access_token, params={}):
print("Secure:", fhir_endpoint, params)
# accesstoken = FhirSecurity("https://nwt-staging.azurehealthcareapis.com")
header = {"Authorization": "Bearer " + access_token}
r = requests.get(fhir_endpoint, params=params, headers=header)
print(r.url)
return r
| 38.24 | 88 | 0.669456 |
e086b5a2bf9f4b39a3781791f9fc70ddd8fe75d7 | 4,622 | py | Python | pong.py | jmsstudio/practice-python | 509c23040a6e78dd2dad62f47b80bc603d98607c | [
"MIT"
] | null | null | null | pong.py | jmsstudio/practice-python | 509c23040a6e78dd2dad62f47b80bc603d98607c | [
"MIT"
] | null | null | null | pong.py | jmsstudio/practice-python | 509c23040a6e78dd2dad62f47b80bc603d98607c | [
"MIT"
] | null | null | null | # Implementation of classic arcade game Pong
import simplegui
import random
# initialize globals - pos and vel encode vertical info for paddles
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
PAD_WIDTH = 8
PAD_HEIGHT = 80
HALF_PAD_WIDTH = PAD_WIDTH / 2
HALF_PAD_HEIGHT = PAD_HEIGHT / 2
LEFT = False
RIGHT = True
LEFT = 'LEFT'
RIGHT = 'RIGHT'
# initialize ball_pos and ball_vel for new bal in middle of table
# if direction is RIGHT, the ball's velocity is upper right, else upper left
def spawn_ball(direction):
global ball_pos, ball_vel # these are vectors stored as lists
vel_x = random.randrange(2,5)
vel_y = random.randrange(1,4)
ball_pos = [WIDTH/2, HEIGHT/2]
if direction.upper() == LEFT:
ball_vel = [-vel_x, -vel_y]
else:
ball_vel = [vel_x, -vel_y]
# define event handlers
def new_game():
global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers
global score1, score2 # these are ints
paddle1_pos = (HEIGHT / 2) - (PAD_HEIGHT / 2)
paddle2_pos = (HEIGHT / 2) - (PAD_HEIGHT / 2)
paddle1_vel = 0
paddle2_vel = 0
score1 = 0
score2 = 0
spawn_ball(LEFT)
def draw(canvas):
global score1, score2, paddle1_pos, paddle2_pos, ball_pos, ball_vel
# draw mid line and gutters
canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, "White")
canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, "White")
canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, "White")
if touch_right():
score1 += 1
spawn_ball(LEFT)
elif touch_left():
score2 += 1
spawn_ball(RIGHT)
# update ball
if touch_left() or touch_right():
ball_vel[0] = -ball_vel[0]
if touch_top() or touch_bottom():
ball_vel[1] = -ball_vel[1]
ball_pos[0] += ball_vel[0]
ball_pos[1] += ball_vel[1]
# draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, 'white', 'white')
# update paddle's vertical position, keep paddle on the screen
if (paddle1_pos + paddle1_vel >= 0 and paddle1_pos + PAD_HEIGHT + paddle1_vel <= HEIGHT):
paddle1_pos += paddle1_vel
if (paddle2_pos + paddle2_vel >= 0 and paddle2_pos + PAD_HEIGHT + paddle2_vel <= HEIGHT):
paddle2_pos += paddle2_vel
# draw paddles
canvas.draw_line([PAD_WIDTH / 2, paddle1_pos], [PAD_WIDTH / 2, paddle1_pos + PAD_HEIGHT], PAD_WIDTH, "white")
canvas.draw_line([WIDTH - (PAD_WIDTH / 2), paddle2_pos], [WIDTH - (PAD_WIDTH / 2), paddle2_pos + PAD_HEIGHT], PAD_WIDTH, "white")
# determine whether paddle and ball collide
if touch_left() and ball_pos[1] >= paddle1_pos and ball_pos[1] <= paddle1_pos + PAD_HEIGHT:
ball_vel[0] = - ball_vel[0]
if ball_vel[0] > 0:
ball_vel[0] += 1
else:
ball_vel[0] -= 1
elif touch_right() and ball_pos[1] >= paddle2_pos and ball_pos[1] <= paddle2_pos + PAD_HEIGHT:
ball_vel[0] = - ball_vel[0]
if ball_vel[0] > 0:
ball_vel[0] += 1
else:
ball_vel[0] -= 1
# draw scores
canvas.draw_text(str(score1), (WIDTH/4, HEIGHT /4), 60, 'white')
canvas.draw_text(str(score2), (WIDTH - WIDTH/4, HEIGHT /4), 60, 'white')
def keydown(key):
global paddle1_vel, paddle2_vel
if key == simplegui.KEY_MAP['w']:
paddle1_vel += -2
elif key == simplegui.KEY_MAP['s']:
paddle1_vel += 2
if key == simplegui.KEY_MAP['up']:
paddle2_vel += -2
elif key == simplegui.KEY_MAP['down']:
paddle2_vel += 2
def keyup(key):
global paddle1_vel, paddle2_vel
if key == simplegui.KEY_MAP['w'] or key == simplegui.KEY_MAP['s']:
paddle1_vel = 0
if key == simplegui.KEY_MAP['up'] or key == simplegui.KEY_MAP['down']:
paddle2_vel = 0
def touch_right():
return ball_pos[0] + ball_vel[0] + BALL_RADIUS >= WIDTH - PAD_WIDTH
def touch_left():
return ball_pos[0] + ball_vel[0] - BALL_RADIUS <= PAD_WIDTH
def touch_bottom():
return ball_pos[1] + ball_vel[1] + BALL_RADIUS >= HEIGHT
def touch_top():
return ball_pos[1] + ball_vel[1] - BALL_RADIUS <= 0
# create frame
frame = simplegui.create_frame("Pong", WIDTH, HEIGHT)
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
# start frame
new_game()
frame.start()
| 30.20915 | 134 | 0.608178 |
eb4619310fc703c8253ae278c4afed6384c66753 | 3,320 | py | Python | looking_for_group/adminutils/views.py | andrlik/looking-for-group | 0b1cecb37ef0f6d75692fd188130e2c60d09b7d2 | [
"BSD-3-Clause"
] | null | null | null | looking_for_group/adminutils/views.py | andrlik/looking-for-group | 0b1cecb37ef0f6d75692fd188130e2c60d09b7d2 | [
"BSD-3-Clause"
] | null | null | null | looking_for_group/adminutils/views.py | andrlik/looking-for-group | 0b1cecb37ef0f6d75692fd188130e2c60d09b7d2 | [
"BSD-3-Clause"
] | null | null | null | import logging
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.db.models.query_utils import Q
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from django_q.tasks import async_task
from notifications.models import Notification
from rules.contrib.views import PermissionRequiredMixin
from . import forms, tasks
from ..user_preferences.models import Preferences
from ..users.models import User
logger = logging.getLogger("gamer_profiles")
def get_filtered_user_queryset(filters_selected, filter_mode):
pref_queryset = Preferences.objects.select_related('gamer', 'gamer__user').all()
if filters_selected:
filters = []
for filter in filters_selected:
if filter_mode != "none":
filters.append(Q(**{filter: True}))
else:
filters.append(Q(**{filter: False}))
if filter_mode != "any":
first_filter = filters.pop()
for item in filters:
first_filter &= item
pref_queryset = pref_queryset.filter(first_filter)
else:
first_filter = filters.pop()
for item in filters:
first_filter |= item
pref_queryset = pref_queryset.filter(first_filter)
return User.objects.filter(id__in=[p.gamer.user.pk for p in pref_queryset])
# Create your views here.
class CreateMassNotification(LoginRequiredMixin, PermissionRequiredMixin, generic.FormView):
form_class = forms.NotificationForm
template_name = 'adminutils/send_notification.html'
permission_required = 'adminutils.send_notification'
def form_valid(self, form):
filter_selections = form.cleaned_data['filter_options']
filter_mode = form.cleaned_data['filter_mode']
message = "Announcement: {}".format(form.cleaned_data['message'])
recipients = get_filtered_user_queryset(filter_selections, filter_mode)
messages.success(self.request, _("Sending your notification to {} users.".format(recipients.count())))
async_task(tasks.send_mass_notifcation, get_current_site(self.request), message, recipients)
logger.debug("Sent notification async task")
return HttpResponseRedirect(reverse_lazy('adminutils:notification'))
class SendEmailToUsers(LoginRequiredMixin, PermissionRequiredMixin, generic.FormView):
form_class = forms.EmailForm
template_name = 'adminutils/send_email.html'
permission_required = "adminutils.send_email"
def form_valid(self, form):
filter_selections = form.cleaned_data['filter_options']
filter_mode = form.cleaned_data['filter_mode']
subject = form.cleaned_data['subject']
body_plain = form.cleaned_data['body']
recipients = get_filtered_user_queryset(filter_selections, filter_mode)
messages.success(self.request, _('Sending your email to {} users.'.format(recipients.count())))
async_task(tasks.send_mass_email, subject, body_plain, recipients)
logger.debug("Sent email async task.")
return HttpResponseRedirect(reverse_lazy('adminutils:email'))
| 42.025316 | 110 | 0.725904 |
68b5d44e89b7e6c990a0b61b5d41be69d2a832c7 | 474 | py | Python | 1. python-course-udemy/poo_avancado/evolucao_v2.py | karlscode/python-basics | 90f215de323f907cb692369b87c34659ba49f1d2 | [
"MIT"
] | null | null | null | 1. python-course-udemy/poo_avancado/evolucao_v2.py | karlscode/python-basics | 90f215de323f907cb692369b87c34659ba49f1d2 | [
"MIT"
] | null | null | null | 1. python-course-udemy/poo_avancado/evolucao_v2.py | karlscode/python-basics | 90f215de323f907cb692369b87c34659ba49f1d2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
class Humano:
# atributo de classe
especie = 'Homo Sapiens'
def __init__(self, nome):
self.nome = nome
def das_cavernas(self):
self.especie = 'Homo Neanderthalensis'
return self
if __name__ == '__main__':
jose = Humano('José')
grokn = Humano('Grokn').das_cavernas()
print(f'Humano.espcie: {Humano.especie}')
print(f'jose.especie: {jose.especie}')
print(f'grokn.especie: {grokn.especie}')
| 20.608696 | 46 | 0.628692 |
936fa3533cc2a831ac92dea63a9d84a1bb266b83 | 4,889 | py | Python | updated/vgg.py | dtocci1/Keras-FRCNN-TF2 | 6f19509fd9097510ccd6a815530554118e189d5c | [
"Apache-2.0"
] | null | null | null | updated/vgg.py | dtocci1/Keras-FRCNN-TF2 | 6f19509fd9097510ccd6a815530554118e189d5c | [
"Apache-2.0"
] | null | null | null | updated/vgg.py | dtocci1/Keras-FRCNN-TF2 | 6f19509fd9097510ccd6a815530554118e189d5c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import warnings
from keras.models import Model
from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed
from tensorflow.keras.utils import get_source_inputs
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras import backend as K
from RoiPoolingConv import RoiPoolingConv
def get_weight_path():
if K.image_data_format() == 'channels_first':
print('pretrained weights not available for VGG with theano backend')
return
else:
return 'vgg16_weights_tf_dim_ordering_tf_kernels.h5'
def get_img_output_length(width, height):
def get_output_length(input_length):
return input_length//16
return get_output_length(width), get_output_length(height)
def nn_base(input_tensor=None, trainable=False):
# Determine proper input shape
if K.image_data_format() == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
return x
def rpn(base_layers, num_anchors):
x = Conv2D(512, (3, 3), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv1')(base_layers)
x_class = Conv2D(num_anchors, (1, 1), activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x)
x_regr = Conv2D(num_anchors * 4, (1, 1), activation='linear', kernel_initializer='zero', name='rpn_out_regress')(x)
return [x_class, x_regr, base_layers]
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False):
# compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
if K.backend() == 'tensorflow':
pooling_regions = 7
input_shape = (num_rois,7,7,512)
elif K.backend() == 'theano':
pooling_regions = 7
input_shape = (num_rois,512,7,7)
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])
out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)
out = TimeDistributed(Dropout(0.5))(out)
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
| 39.747967 | 154 | 0.67928 |
aaf378b489a274e27cdc1f8760400d9a181c64a5 | 159 | py | Python | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_ConstantTrend_BestCycle_ARX.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_ConstantTrend_BestCycle_ARX.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_ConstantTrend_BestCycle_ARX.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['ConstantTrend'] , ['BestCycle'] , ['ARX'] ); | 39.75 | 86 | 0.754717 |
7b76c692ba8a45ed0a877390ae0261a848e2cd9a | 2,819 | py | Python | test/lint/check-doc.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | test/lint/check-doc.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | test/lint/check-doc.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The shitecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"'
REGEX_DOC = r'AddArg\("(-[^"=]+?)(?:=|")'
CMD_ROOT_DIR = '$(git rev-parse --show-toplevel)/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_WALLET_ARGS = r"git grep --function-context 'void WalletInit::AddWalletOptions' -- {} | grep AddArg".format(CMD_ROOT_DIR)
CMD_GREP_WALLET_HIDDEN_ARGS = r"git grep --function-context 'void DummyWalletInit::AddWalletOptions' -- {}".format(CMD_ROOT_DIR)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb', '-zapwallettxes'])
def lint_missing_argument_documentation():
used = check_output(CMD_GREP_ARGS, shell=True).decode('utf8').strip()
docd = check_output(CMD_GREP_DOCS, shell=True).decode('utf8').strip()
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
assert 0 == len(args_need_doc), "Please document the following arguments: {}".format(args_need_doc)
def lint_missing_hidden_wallet_args():
wallet_args = check_output(CMD_GREP_WALLET_ARGS, shell=True).decode('utf8').strip()
wallet_hidden_args = check_output(CMD_GREP_WALLET_HIDDEN_ARGS, shell=True).decode('utf8').strip()
wallet_args = set(re.findall(re.compile(REGEX_DOC), wallet_args))
wallet_hidden_args = set(re.findall(re.compile(r' "([^"=]+)'), wallet_hidden_args))
hidden_missing = wallet_args.difference(wallet_hidden_args)
if hidden_missing:
assert 0, "Please add {} to the hidden args in DummyWalletInit::AddWalletOptions".format(hidden_missing)
def main():
lint_missing_argument_documentation()
lint_missing_hidden_wallet_args()
if __name__ == "__main__":
main()
| 42.074627 | 130 | 0.723306 |
262b1d25096021716fcc6feff64b17c6e8b78f67 | 316 | py | Python | impression_client/admin.py | gregschmit/django-impression-client | a2f4328024a67865eccaeff79567320842ab5d5c | [
"MIT"
] | 1 | 2020-01-11T02:06:59.000Z | 2020-01-11T02:06:59.000Z | impression_client/admin.py | gregschmit/django-impression-client | a2f4328024a67865eccaeff79567320842ab5d5c | [
"MIT"
] | null | null | null | impression_client/admin.py | gregschmit/django-impression-client | a2f4328024a67865eccaeff79567320842ab5d5c | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
@admin.register(models.RemoteImpressionServer)
class RemoteImpressionServerAdmin(admin.ModelAdmin):
list_filter = []
search_fields = ["name", "target", "authentication_token"]
list_display = ["name", "is_active", "target", "authentication_token"]
| 28.727273 | 74 | 0.75 |
2cbab1b2219f6dbfeed81cdd2a558a7e986beeea | 3,293 | py | Python | psico/geometry.py | speleo3/pymol-psico | 4e5402b4dca9a509b34a03691f12dc49e93c4973 | [
"BSD-2-Clause"
] | 31 | 2015-10-22T11:11:08.000Z | 2022-02-25T10:26:07.000Z | psico/geometry.py | speleo3/pymol-psico | 4e5402b4dca9a509b34a03691f12dc49e93c4973 | [
"BSD-2-Clause"
] | 11 | 2016-02-29T13:55:47.000Z | 2022-03-26T07:44:24.000Z | psico/geometry.py | speleo3/pymol-psico | 4e5402b4dca9a509b34a03691f12dc49e93c4973 | [
"BSD-2-Clause"
] | 21 | 2015-08-04T16:58:19.000Z | 2022-03-25T21:07:01.000Z | '''
(c) 2011 Thomas Holder, MPI for Develpomental Biology
License: BSD-2-Clause
'''
from __future__ import print_function
from pymol import cmd
def qdelaunay(X, n=0, m=0, options='Qt', qdelaunay_exe='qdelaunay'):
'''
Triangulation using qdelaunay. (http://www.qhull.org)
@param X: iterable object of points (not numpy.matrix). If n or m are 0, then
X must be sequence.
@returns: iterator of regions
'''
import subprocess
if not n: n = len(X[0])
if not m: m = len(X)
process = subprocess.Popen([qdelaunay_exe, 'i'] + options.split(),
universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# input
print(n, file=process.stdin)
print(m, file=process.stdin)
for coord in X:
print('%f %f %f' % tuple(coord), file=process.stdin)
process.stdin.close()
# output
out_it = iter(process.stdout)
n_regions = next(out_it)
for line in out_it:
a = line.split()
assert len(a) == n + 1, 'Wrong number of vertices in output'
yield list(map(int, a))
def delaunay(selection='enabled', name=None, cutoff=10.0, as_cgo=0,
qdelaunay_exe='qdelaunay', state=-1, quiet=1):
'''
DESCRIPTION
Full-atom Delaunay Tessalator
Creates either a molecular object with delaunay edges as bonds, or a CGO
object with edge colors according to edge length.
USAGE
delaunay [ selection [, name [, cutoff=10.0 [, as_cgo=0 ]]]]
SEE ALSO
PyDeT plugin: http://pymolwiki.org/index.php/PyDet
'''
from chempy import cpv, Bond
if name is None:
name = cmd.get_unused_name('delaunay')
cutoff = float(cutoff)
as_cgo = int(as_cgo)
state, quiet = int(state), int(quiet)
if state < 1:
state = cmd.get_state()
model = cmd.get_model(selection, state)
regions_iter = qdelaunay((a.coord for a in model.atom), 3, len(model.atom),
qdelaunay_exe=qdelaunay_exe)
edges = set(tuple(sorted([region[i-1], region[i]]))
for region in regions_iter for i in range(len(region)))
edgelist=[]
r = []
minco = 9999
maxco = 0
for edge in edges:
ii, jj = edge
a = model.atom[ii]
b = model.atom[jj]
co = cpv.distance(a.coord, b.coord)
if cutoff > 0.0 and co > cutoff:
continue
if as_cgo:
minco=min(co,minco)
maxco=max(co,maxco)
edgelist.append(a.coord + b.coord + [co])
else:
bnd = Bond()
bnd.index = [ii, jj]
model.add_bond(bnd)
r.append((a,b,co))
if not as_cgo:
cmd.load_model(model, name, 1)
return r
from pymol.cgo import CYLINDER
difco = maxco-minco
obj = []
mm = lambda x: max(min(x, 1.0), 0.0)
for e in edgelist:
co = ((e[6]-minco)/difco)**(0.75)
color = [mm(1-2*co), mm(1-abs(2*co-1)), mm(2*co-1)]
obj.extend([CYLINDER] + e[0:6] + [0.05] + color + color)
cmd.load_cgo(obj, name)
return r
cmd.extend('delaunay', delaunay)
# tab-completion of arguments
cmd.auto_arg[0]['delaunay'] = cmd.auto_arg[0]['zoom']
# vi:sw=4:expandtab:smarttab
| 28.145299 | 80 | 0.577589 |
e43c43e33a78d2263051f385707cf2989b62e4d4 | 8,978 | py | Python | tests/test_argument_parsing.py | techchad-lights/pymatrix | b18c59d288a50ebe8e4c5788a7f2d7a0670646aa | [
"MIT"
] | null | null | null | tests/test_argument_parsing.py | techchad-lights/pymatrix | b18c59d288a50ebe8e4c5788a7f2d7a0670646aa | [
"MIT"
] | null | null | null | tests/test_argument_parsing.py | techchad-lights/pymatrix | b18c59d288a50ebe8e4c5788a7f2d7a0670646aa | [
"MIT"
] | null | null | null |
import pytest
from pymatrix import pymatrix
@pytest.mark.parametrize("test_values, expected_results", [
([], 4), (["-d0"], 0), (["-d1"], 1), (["-d", "2"], 2), (["-d3"], 3),
(["-d4"], 4), (["-d 5"], 5), (["-d6"], 6), (["-d7"], 7),
(["-d8"], 8), (["-d9"], 9)
])
def test_argument_parsing_delay(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.delay == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["--test_mode"], True)
])
def test_argument_parsing_test_mode(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.test_mode == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["--test_mode_ext"], True)
])
def test_argument_parsing_test_mode_ext(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.test_mode_ext == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["-b"], True)
])
def test_argument_parsing_bold_on(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.bold_on == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["-B"], True)
])
def test_argument_parsing_bold_all(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.bold_all == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["-s"], True)
])
def test_argument_parsing_screen_save_mode(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.screen_saver == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
(["-Cred"], "red"), (["-C", "Green"], "green"), (["-C", "BLUE"], "blue"),
(["-CyeLLOW"], "yellow"), (["-C", "magenta"], "magenta"),
(["-CCyan"], "cyan"), (["-Cwhite"], "white"), ([], "green")
])
def test_argument_parsing_color(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.color == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
(["-Lred"], "red"), (["-L", "Green"], "green"), (["-L", "BLUE"], "blue"),
(["-LyeLLOW"], "yellow"), (["-L", "magenta"], "magenta"),
(["-LCyan"], "cyan"), (["-Lwhite"], "white"), ([], "white")
])
def test_argument_parsing_lead_color(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.lead_color == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], 0), (["-S1"], 1), (["-S5"], 5), (["-S", "20"], 20)
])
def test_argument_parsing_start_timer(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.start_timer == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], 0), (["-R1"], 1), (["-R5"], 5), (["-R", "20"], 20)
])
def test_argument_parsing_run_timer(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.run_timer == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["--list_colors"], True)
])
def test_argument_parsing_list_colors(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.list_colors == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["--list_commands"], True)
])
def test_argument_parsing_list_commands(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.list_commands == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["-a"], True)
])
def test_argument_parsing_async_scroll(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.async_scroll == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["-m"], True)
])
def test_argument_parsing_multiple_color_mode(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.multiple_mode == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["-M"], True)
])
def test_argument_parsing_multiple_random_color_mode(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.random_mode == expected_results
@pytest.mark.parametrize("test_values, expected_results", [
([], False), (["-c"], True)
])
def test_argument_parsing_cycle_through_colors(test_values, expected_results):
result = pymatrix.argument_parsing(test_values)
assert result.cycle == expected_results
@pytest.mark.parametrize("test_value, expected_result", [
([], False), (["-e"], True)
])
def test_argument_parsing_extended_char(test_value, expected_result):
result = pymatrix.argument_parsing(test_value)
assert result.ext == expected_result
@pytest.mark.parametrize("test_value, expected_result", [
([], False), (["-E"], True)
])
def test_argument_parsing_extended_char(test_value, expected_result):
result = pymatrix.argument_parsing(test_value)
assert result.ext_only == expected_result
@pytest.mark.parametrize("test_value, expected_result", [
([], False), (["-l"], True),
])
def test_argument_parsing_double_space_lines(test_value, expected_result):
result = pymatrix.argument_parsing(test_value)
assert result.double_space == expected_result
@pytest.mark.parametrize("test_value, expected_result", [
([], False), (["--wakeup"], True)
])
def test_argument_parsing_wakeup(test_value, expected_result):
result = pymatrix.argument_parsing(test_value)
assert result.wakeup == expected_result
@pytest.mark.parametrize("test_value, expected_result", [
([], False), (["-z"], True),
])
def test_argument_parsing_zero_one(test_value, expected_result):
result = pymatrix.argument_parsing(test_value)
assert result.zero_one == expected_result
@pytest.mark.parametrize("test_value, expected_result", [
([], False), (["--disable_keys"], True)
])
def test_argument_parsing_disable_keys(test_value, expected_result):
result = pymatrix.argument_parsing(test_value)
assert result.disable_keys == expected_result
@pytest.mark.parametrize("test_value, expected_result", [
([], "black",), (["--background", "blue"], "blue")
])
def test_argument_parsing_background_color(test_value, expected_result):
result = pymatrix.argument_parsing(test_value)
assert result.background == expected_result
@pytest.mark.parametrize("test_value, expected_result", [
([], False), (["-v"], True), (["--reverse"], True)
])
def test_argument_parsing_reverse(test_value, expected_result):
result = pymatrix.argument_parsing(test_value)
assert result.reverse == expected_result
# testing helper functions
@pytest.mark.parametrize("test_values, expected_results", [
("0", 0), ("1", 1), ("2", 2), ("3", 3), ("4", 4),
("5", 5), ("6", 6), ("7", 7), ("8", 8), ("9", 9)
])
def test_positive_int_zero_to_nine_normal(test_values, expected_results):
""" Tests that the delay time conversion formula is working. """
result = pymatrix.positive_int_zero_to_nine(test_values)
assert result == expected_results
@pytest.mark.parametrize("test_values", [
"-5", "10", "100", "2.5", " ", "Test", "test&*#", "",
])
def test_positive_int_zero_to_nine_error(test_values):
""" Testing delay_positive_int will raise an error. """
with pytest.raises(pymatrix.argparse.ArgumentTypeError):
pymatrix.positive_int_zero_to_nine(test_values)
@pytest.mark.parametrize("test_values, expected_results", [
("red", "red"), ("Green", "green"), ("BLUE", "blue"),
("yeLLOW", "yellow"), ("magenta", "magenta"),
("Cyan", "cyan"), ("whiTe", "white")
])
def test_color_type_normal(test_values, expected_results):
result = pymatrix.color_type(test_values)
assert result
@pytest.mark.parametrize("test_values", [
"orange", "12", "who", "<>", "", " ", "ter8934", "834DFD"
])
def test_color_type_error(test_values):
with pytest.raises(pymatrix.argparse.ArgumentTypeError):
pymatrix.color_type(test_values)
@pytest.mark.parametrize("test_values, expected_results", [
("1", 1), ("2", 2), ("6", 6), ("20", 20), ("500", 500)
])
def test_positive_int_normal(test_values, expected_results):
result = pymatrix.positive_int(test_values)
assert result == expected_results
@pytest.mark.parametrize("test_values", [
"0", "-3", "1.3", "0.4", "10.4", "a", "b", "", " ", "$", "time32"
])
def test_positive_int_error(test_values):
with pytest.raises(pymatrix.argparse.ArgumentTypeError):
pymatrix.positive_int(test_values)
| 34.933852 | 84 | 0.700267 |
eb99e77ff75de8987dbb9758d5f357c3694a05b7 | 2,613 | py | Python | personal/Tommaso/Ideas/lele.py | edervishaj/spotify-recsys-challenge | 4077201ac7e4ed9da433bd10a92c183614182437 | [
"Apache-2.0"
] | 3 | 2018-10-12T20:19:57.000Z | 2019-12-11T01:11:38.000Z | personal/Tommaso/Ideas/lele.py | kiminh/spotify-recsys-challenge | 5e7844a77ce3c26658400f161d2d74d682f30e69 | [
"Apache-2.0"
] | null | null | null | personal/Tommaso/Ideas/lele.py | kiminh/spotify-recsys-challenge | 5e7844a77ce3c26658400f161d2d74d682f30e69 | [
"Apache-2.0"
] | 4 | 2018-10-27T20:30:18.000Z | 2020-10-14T07:43:27.000Z | import pandas as pd
import numpy as np
import nltk
from nltk import stem
from nltk.tokenize import RegexpTokenizer
from utils.datareader import Datareader
from tqdm import tqdm
from scipy import sparse
from difflib import SequenceMatcher
from difflib import get_close_matches
from utils.pre_processing import *
from utils.evaluator import Evaluator
from utils.post_processing import *
from utils.definitions import *
from utils.submitter import Submitter
datareader = Datareader(mode='online', only_load=True)
# ev = Evaluator(dr)
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[
j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def func():
# Artists
artists = list(datareader.get_df_artists()['artist_name'].as_matrix())
artists = [str(x).lower() for x in artists]
# Albums
albums = list(datareader.get_df_test_albums()['album_name'].as_matrix())
albums = [str(x).lower() for x in albums]
# Playlist titles
train_playlists_df = datareader.get_df_train_playlists()
test_playlists_df = datareader.get_df_test_playlists()
concat_df = pd.concat([train_playlists_df, test_playlists_df])
if datareader.offline():
concat_df = concat_df.sort_values(['pid'], ascending=True)
playlists = concat_df['pid'].as_matrix()
playlist_titles = concat_df['name'].as_matrix()
playlist_titles = [str(x).lower() for x in playlist_titles]
playlist_titles = np.array(playlist_titles)
cat1 = np.array(datareader.get_test_pids_indices()).astype(np.int) + 1000000
i = 0
for title in playlist_titles[cat1]:
for artist in artists:
# if len(title) > 4:
# if title[0] in artist[0:2] or title[1] in artist[0:2]:
# d = levenshtein(title, artist)
# if d <= 1:
if title == artist and len(title) > 3 and ' ' in title:
i += 1
print(title)
#print(artist)
#print('----------------')
print(i)
func()
| 31.107143 | 119 | 0.634902 |
67e2f0d00bd6d423e14d0afd8e363dea244cfd35 | 1,426 | py | Python | Easy/970.PowerfulIntegers.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 39 | 2020-07-04T11:15:13.000Z | 2022-02-04T22:33:42.000Z | Easy/970.PowerfulIntegers.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 1 | 2020-07-15T11:53:37.000Z | 2020-07-15T11:53:37.000Z | Easy/970.PowerfulIntegers.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 20 | 2020-07-14T19:12:53.000Z | 2022-03-02T06:28:17.000Z | """
Given two positive integers x and y, an integer is powerful if it is equal
to x^i + y^j for some integers i >= 0 and j >= 0.
Return a list of all powerful integers that have value less than or equal
to bound.
You may return the answer in any order. In your answer, each value should
occur at most once.
Example:
Input: x = 2, y = 3, bound = 10
Output: [2,3,4,5,7,9,10]
Explanation:
2 = 2^0 + 3^0
3 = 2^1 + 3^0
4 = 2^0 + 3^1
5 = 2^1 + 3^1
7 = 2^2 + 3^1
9 = 2^3 + 3^0
10 = 2^0 + 3^2
Example:
Input: x = 3, y = 5, bound = 15
Output: [2,4,6,8,10,14]
Note:
- 1 <= x <= 100
- 1 <= y <= 100
- 0 <= bound <= 10^6
"""
#Difficulty: Easy
#93 / 93 test cases passed.
#Runtime: 32 ms
#Memory Usage: 14 MB
#Runtime: 32 ms, faster than 61.64% of Python3 online submissions for Powerful Integers.
#Memory Usage: 14 MB, less than 63.29% of Python3 online submissions for Powerful Integers.
class Solution:
def powerfulIntegers(self, x: int, y: int, bound: int) -> List[int]:
result = set()
for j in range(20):
for i in range(20):
res = x**i + y**j
if res <= bound:
result.add(res)
else:
break
return result
| 27.960784 | 91 | 0.502104 |
056247f26dd9cc6f550d534d821f98994094229f | 4,059 | py | Python | src/radical/pilot/agent/resource_manager/slurm.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null | src/radical/pilot/agent/resource_manager/slurm.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null | src/radical/pilot/agent/resource_manager/slurm.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null |
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import hostlist
import radical.utils as ru
from .base import ResourceManager
# ------------------------------------------------------------------------------
#
class Slurm(ResourceManager):
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
ResourceManager.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def _configure(self):
slurm_nodelist = os.environ.get('SLURM_NODELIST')
if slurm_nodelist is None:
msg = "$SLURM_NODELIST not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Parse SLURM nodefile environment variable
slurm_nodes = hostlist.expand_hostlist(slurm_nodelist)
self._log.info("Found SLURM_NODELIST %s. Expanded to: %s", slurm_nodelist, slurm_nodes)
# $SLURM_NPROCS = Total number of cores allocated for the current job
slurm_nprocs_str = os.environ.get('SLURM_NPROCS')
if slurm_nprocs_str is None:
msg = "$SLURM_NPROCS not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_nprocs = int(slurm_nprocs_str)
# $SLURM_NNODES = Total number of (partial) nodes in the job's resource allocation
slurm_nnodes_str = os.environ.get('SLURM_NNODES')
if slurm_nnodes_str is None:
msg = "$SLURM_NNODES not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_nnodes = int(slurm_nnodes_str)
# $SLURM_CPUS_ON_NODE = Number of cores per node (physically)
slurm_cpus_on_node_str = os.environ.get('SLURM_CPUS_ON_NODE')
if slurm_cpus_on_node_str is None:
msg = "$SLURM_CPUS_ON_NODE not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_cpus_on_node = int(slurm_cpus_on_node_str)
# Verify that $SLURM_NPROCS <= $SLURM_NNODES * $SLURM_CPUS_ON_NODE
if not slurm_nprocs <= slurm_nnodes * slurm_cpus_on_node:
self._log.warning("$SLURM_NPROCS(%d) <= $SLURM_NNODES(%d) * $SLURM_CPUS_ON_NODE(%d)",
slurm_nprocs, slurm_nnodes, slurm_cpus_on_node)
# Verify that $SLURM_NNODES == len($SLURM_NODELIST)
if slurm_nnodes != len(slurm_nodes):
self._log.error("$SLURM_NNODES(%d) != len($SLURM_NODELIST)(%d)",
slurm_nnodes, len(slurm_nodes))
# Report the physical number of cores or the total number of cores
# in case of a single partial node allocation.
self.cores_per_node = self._cfg.get('cores_per_node', 0)
self.gpus_per_node = self._cfg.get('gpus_per_node', 0) # FIXME GPU
self.mem_per_node = self._cfg.get('mem_per_node', 0)
self._log.debug('lfs path %s', ru.expand_env(
self._cfg.get('lfs_path_per_node')))
self.lfs_per_node = {'path' : ru.expand_env(
self._cfg.get('lfs_path_per_node')),
'size' : self._cfg.get('lfs_size_per_node', 0)
}
if not self.cores_per_node:
self.cores_per_node = min(slurm_cpus_on_node, slurm_nprocs)
# node names are unique, so can serve as node uids
self.node_list = [[node, node] for node in slurm_nodes]
self.lm_info['cores_per_node'] = self.cores_per_node
# # once we are done, we remove all `SLURM_*` and `SBATCH_*` env variables
# # as to not confuse srun executions
# for k in list(os.environ.keys()):
# if k.startswith('SLURM_') or k.startswith('SBATCH_'):
# print 'unset %s' % k
# del(os.environ[k])
# ------------------------------------------------------------------------------
| 37.583333 | 97 | 0.557034 |
77669895f04167dcaebf87799b8a5bfd28ef9c65 | 7,778 | py | Python | pngFS.py | kugo12/pngFS | 1b4a031cea1418b2063ccecb24924cc23c8b0dfa | [
"MIT"
] | null | null | null | pngFS.py | kugo12/pngFS | 1b4a031cea1418b2063ccecb24924cc23c8b0dfa | [
"MIT"
] | null | null | null | pngFS.py | kugo12/pngFS | 1b4a031cea1418b2063ccecb24924cc23c8b0dfa | [
"MIT"
] | 1 | 2020-09-22T19:41:22.000Z | 2020-09-22T19:41:22.000Z | import pyfuse3
import pyfuse3_asyncio
import asyncio
import os
import errno
import stat
import pngdata
import pickle
from time import time_ns
from argparse import ArgumentParser
pyfuse3_asyncio.enable()
class SingleShotTimer:
__slots__ = ['_callback', '_delay', '_task']
def __init__(self, callback, delay):
self._callback = callback
self._delay = delay
self._task = None
async def _run(self):
await asyncio.sleep(self._delay)
self._callback()
self._task = None
def start(self):
if self._task is not None:
self._task.cancel()
self._task = asyncio.create_task(self._run())
class File:
__slots__ = ['name', 'inode', 'size', 'content', 'mode', 'is_dir', 'parent']
def __init__(self, name, parent_inode, content, is_dir):
self.name = name
self.inode = None
self.parent = parent_inode
self.is_dir = is_dir
if is_dir:
self.mode = (stat.S_IFDIR | 0o755)
self.content = {} # directory childrens
self.size = 0
else:
self.mode = (stat.S_IFREG | 0o644)
self.content = content if content else b''
self.size = len(self.content)
class FS:
__slots__ = ['stat', 'files', 'inodes_created']
def __init__(self):
self.stat = pyfuse3.EntryAttributes()
t = time_ns()
self.stat.st_atime_ns = t
self.stat.st_ctime_ns = t
self.stat.st_mtime_ns = t
self.stat.st_gid = os.getgid()
self.stat.st_uid = os.getuid()
self.files = {}
self.inodes_created = pyfuse3.ROOT_INODE
root = File(b'..', 0, None, True)
self.add_file(root)
def add_file(self, file):
file.inode = self.inodes_created
self.files[file.inode] = file
if file.parent:
self.files[file.parent].content[file.name] = file.inode
self.inodes_created += 1
def getattr(self, inode):
file = self.get_file(inode)
self.stat.st_mode = file.mode
self.stat.st_size = file.size
self.stat.st_ino = file.inode
return self.stat
def getattr_from_file(self, file):
self.stat.st_mode = file.mode
self.stat.st_size = file.size
self.stat.st_ino = file.inode
return self.stat
def get_file(self, inode):
try:
return self.files[inode]
except KeyError:
raise pyfuse3.FUSEError(errno.ENOENT)
def get_inode(self, parent_inode, name):
try:
return self.files[parent_inode].content[name]
except KeyError:
raise pyfuse3.FUSEError(errno.ENOENT)
def clean_files_without_parent(self):
for inode, file in list(self.files.items()):
if inode != pyfuse3.ROOT_INODE:
if file.parent not in self.files:
del self.files[inode]
class pngFS(pyfuse3.Operations):
__slots__ = ['written', 'files', 'png_file']
def __init__(self, args):
super().__init__()
try:
self.files = pickle.loads(pngdata.decode(args.png_file, False))
self.files.clean_files_without_parent()
except (FileNotFoundError, AttributeError, pickle.UnpicklingError):
self.files = FS()
self.png_file = args.png_file
self.write_timer = SingleShotTimer(self.write_to_png, args.delay)
if args.save_at_exit:
self.write_timer.start = lambda: ''
def write_to_png(self):
pngdata.encode(pickle.dumps(self.files, 4), self.png_file)
async def getattr(self, inode, ctx=None):
return self.files.getattr(inode)
async def lookup(self, parent_inode, name, ctx=None):
inode = self.files.get_inode(parent_inode, name)
return await self.getattr(inode)
async def opendir(self, inode, ctx):
file = self.files.get_file(inode)
if file.is_dir:
return inode
else:
raise pyfuse3.FUSEError(errno.ENOTDIR)
async def readdir(self, inode, off, token):
childs = list(self.files.get_file(inode).content.values())
for i in filter(lambda i: i > off, childs):
child = self.files.get_file(i)
r = pyfuse3.readdir_reply(
token, child.name,
self.files.getattr_from_file(child), i
)
if not r:
return
async def mkdir(self, inode_parent, name, mode, ctx):
directory = File(name, inode_parent, None, True)
self.files.add_file(directory)
self.write_timer.start()
return self.files.getattr_from_file(directory)
async def create(self, inode_parent, name, mode, flags, ctx):
file = File(name, inode_parent, None, False)
self.files.add_file(file)
self.write_timer.start()
return (
pyfuse3.FileInfo(fh=file.inode),
self.files.getattr_from_file(file)
)
async def open(self, inode, flags, ctx):
return pyfuse3.FileInfo(fh=inode)
async def read(self, inode, offset, length):
data = self.files.get_file(inode).content
return data[offset:offset+length]
async def write(self, inode, offset, buf):
file = self.files.get_file(inode)
data = memoryview(file.content)
file.content = bytes(data[:offset]) + buf + bytes(data[offset+len(buf):])
file.size = len(file.content)
self.write_timer.start()
return len(buf)
async def rmdir(self, parent_inode, name, ctx):
parent = self.files.get_file(parent_inode)
inode = parent.content[name]
directory = self.files.get_file(inode)
if directory.content:
raise pyfuse3.FUSEError(errno.ENOTEMPTY)
del parent.content[name]
del self.files.files[inode]
self.write_timer.start()
async def unlink(self, parent_inode, name, ctx):
parent = self.files.get_file(parent_inode)
inode = parent.content[name]
del self.files.files[inode]
del parent.content[name]
self.write_timer.start()
async def rename(self, parent_inode_old, name_old, parent_inode_new, name_new, flags, ctx):
old_parent = self.files.get_file(parent_inode_old)
new_parent = self.files.get_file(parent_inode_new)
inode = old_parent.content[name_old]
del old_parent.content[name_old]
file = self.files.get_file(inode)
file.name = name_new
new_parent.content[name_new] = inode
self.write_timer.start()
def parse_args():
parser = ArgumentParser()
parser.add_argument('png_file', type=str)
parser.add_argument('mountpoint', type=str)
parser.add_argument('-e', '--save-at-exit', action='store_true',
default=False, dest='save_at_exit',
help='save to png file at exit')
parser.add_argument('-d', '--delay', default=2, dest='delay', type=int,
help='delay between last file modification and saving to png')
parser.add_argument('--debug-fuse', default=False, action='store_true',
help='show fuse debug log', dest='fuse_debug')
return parser.parse_args()
def main():
args = parse_args()
pngfs = pngFS(args)
fuse_options = set(pyfuse3.default_options)
fuse_options.add('fsname=pngFS')
if args.fuse_debug:
fuse_options.add('debug')
pyfuse3.init(pngfs, args.mountpoint, fuse_options)
try:
asyncio.run(pyfuse3.main())
except KeyboardInterrupt:
pass
pngfs.files.clean_files_without_parent()
pngfs.write_to_png()
pyfuse3.close()
if __name__ == '__main__':
main()
| 28.386861 | 95 | 0.614811 |
59814edd8cf491e3c361c930fd050c394d12350a | 1,007 | py | Python | Leetcode/247.strobogrammatic-number-ii.py | EdwaRen/Competitve-Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | 1 | 2021-05-03T21:48:25.000Z | 2021-05-03T21:48:25.000Z | Leetcode/247.strobogrammatic-number-ii.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | Leetcode/247.strobogrammatic-number-ii.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | class Solution(object):
def findStrobogrammatic(self, n):
"""
:type n: int
:rtype: List[str]
"""
def recurse(n, orig):
# Handle base causes
if n == 0:
return ['']
elif n == 1:
return ['1', '8', '0']
prev = recurse(n-2, orig)
res = []
# Once we have recursed the n-2th solution, we append all possible combos
for i in prev:
# Do not append 0 to the edge of the string
if n != orig:
res.append('0' + i + '0')
res.append('1' + i +'1')
res.append('8' + i +'8')
res.append('6' + i +'9')
res.append('9' + i +'6')
return res
# Handle Null case
if not n:
return None
return recurse(n, n)
z = Solution()
n = 1
print(z.findStrobogrammatic(n))
| 23.97619 | 85 | 0.397219 |
c2de33ac60e5505487b2a818cd89fdcbeb42aab1 | 2,977 | py | Python | eval/main.py | MaxyLee/Style-AttnGAN | d33d0df061c94b75ad4af5c750b8d6f37ee1a35a | [
"MIT"
] | 36 | 2020-05-31T14:38:32.000Z | 2022-03-26T12:36:00.000Z | eval/main.py | MaxyLee/Style-AttnGAN | d33d0df061c94b75ad4af5c750b8d6f37ee1a35a | [
"MIT"
] | 2 | 2021-10-30T01:13:05.000Z | 2022-03-15T01:41:45.000Z | eval/main.py | MaxyLee/Style-AttnGAN | d33d0df061c94b75ad4af5c750b8d6f37ee1a35a | [
"MIT"
] | 10 | 2021-07-21T14:58:34.000Z | 2022-02-24T20:26:24.000Z | # Copyright (c) 2018 Tao Xu
import os
import time
import random
from eval import *
from flask import Flask, jsonify, request, abort
from applicationinsights import TelemetryClient
from applicationinsights.requests import WSGIApplication
from applicationinsights.exceptions import enable
from miscc.config import cfg
#from werkzeug.contrib.profiler import ProfilerMiddleware
enable(os.environ["TELEMETRY"])
app = Flask(__name__)
app.wsgi_app = WSGIApplication(os.environ["TELEMETRY"], app.wsgi_app)
@app.route('/api/v1.0/bird', methods=['POST'])
def create_bird():
if not request.json or not 'caption' in request.json:
abort(400)
caption = request.json['caption']
t0 = time.time()
urls = generate(caption, wordtoix, ixtoword, text_encoder, netG, blob_service)
t1 = time.time()
response = {
'small': urls[0],
'medium': urls[1],
'large': urls[2],
'map1': urls[3],
'map2': urls[4],
'caption': caption,
'elapsed': t1 - t0
}
return jsonify({'bird': response}), 201
@app.route('/api/v1.0/birds', methods=['POST'])
def create_birds():
if not request.json or not 'caption' in request.json:
abort(400)
caption = request.json['caption']
t0 = time.time()
urls = generate(caption, wordtoix, ixtoword, text_encoder, netG, blob_service, copies=6)
t1 = time.time()
response = {
'bird1' : { 'small': urls[0], 'medium': urls[1], 'large': urls[2] },
'bird2' : { 'small': urls[3], 'medium': urls[4], 'large': urls[5] },
'bird3' : { 'small': urls[6], 'medium': urls[7], 'large': urls[8] },
'bird4' : { 'small': urls[9], 'medium': urls[10], 'large': urls[11] },
'bird5' : { 'small': urls[12], 'medium': urls[13], 'large': urls[14] },
'bird6' : { 'small': urls[15], 'medium': urls[16], 'large': urls[17] },
'caption': caption,
'elapsed': t1 - t0
}
return jsonify({'bird': response}), 201
@app.route('/', methods=['GET'])
def get_bird():
return 'Version 1'
if __name__ == '__main__':
t0 = time.time()
tc = TelemetryClient(os.environ["TELEMETRY"])
# gpu based
cfg.CUDA = os.environ["GPU"].lower() == 'true'
tc.track_event('container initializing', {"CUDA": str(cfg.CUDA)})
# load word dictionaries
wordtoix, ixtoword = word_index()
# lead models
text_encoder, netG = models(len(wordtoix))
# load blob service
blob_service = BlockBlobService(account_name='attgan', account_key=os.environ["BLOB_KEY"])
seed = 100
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if cfg.CUDA:
torch.cuda.manual_seed_all(seed)
#app.config['PROFILE'] = True
#app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
#app.run(host='0.0.0.0', port=8080, debug = True)
t1 = time.time()
tc.track_event('container start', {"starttime": str(t1-t0)})
app.run(host='0.0.0.0', port=8080)
| 31.010417 | 94 | 0.624454 |
e362de5950e9c651cf1ed0a5b7aabd599f461f79 | 4,287 | py | Python | scripts/slave/recipes/infra/try_recipe.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipes/infra/try_recipe.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipes/infra/try_recipe.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T10:57:32.000Z | 2020-07-23T10:57:32.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A recipe for actually running recipe on build/ repo patches as tryjobs.
For usage - see
https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/recipes/infra/try_recipe.md
"""
import base64
import json
import zlib
DEPS = [
'depot_tools/bot_update',
'file',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'depot_tools/tryserver'
]
def decode(x):
return json.loads(zlib.decompress(base64.b64decode(x)))
def encode(x):
return base64.b64encode(zlib.compress(json.dumps(x), 9))
def RunSteps(api):
"""Check out itself, maybe apply patch, and then execute_inner real itself."""
api.gclient.set_config('build')
api.bot_update.ensure_checkout(force=True, patch_root='build')
try:
# Windows machine often fails to fetch deps because of some weird git.bat
# errors. So, try it here.
api.python(
'fetch recipe engine deps',
api.path['checkout'].join('scripts', 'slave', 'recipes.py'),
['fetch'])
except api.step.StepFailure:
# Delete the whole .deps just to be certain.
recipe_deps = api.path['checkout'].join('scripts', 'slave', '.recipe_deps')
# api.file.rmtree('Remove recipe deps.', recipe_deps)
api.python.inline('remove repo workaround for http://crbug.com/589201',
"""
import shutil, sys, os
shutil.rmtree(sys.argv[1], ignore_errors=True)
""", args=[str(recipe_deps)])
# Retry
api.python(
'fetch recipe engine deps from scratch.',
api.path['checkout'].join('scripts', 'slave', 'recipes.py'),
['fetch'])
recipe = str(api.properties['try_recipe'])
level = int(api.properties.get('try_level', '0'))
# Escaping multiple layers of json is hell, so wrap them with base64.
raw_properties = api.properties.get('try_props', encode({}))
properties = decode(raw_properties)
properties = dict((str(k), str(v)) for k, v in properties.iteritems())
properties.setdefault('try_level', level + 1)
for attr in ['buildername', 'mastername', 'buildnumber', 'slavename']:
properties.setdefault(attr, api.properties.get(attr))
step = api.step('properties (%d)' % level, cmd=None)
step.presentation.logs['properties'] = (
json.dumps(properties, sort_keys=True, indent=2)).splitlines()
return api.python(
name='%s run' % (recipe.replace('/', '.')),
script=api.path['checkout'].join('scripts', 'tools',
'annotee_indenter.py'),
args=[
'--base-level', str(level + 1),
'--use-python-executable',
'--',
api.path['checkout'].join('scripts', 'slave', 'recipes.py'),
'run',
'--properties-file',
api.json.input(properties),
recipe,
],
allow_subannotations=True,
)
def GenTests(api):
yield (
api.test('default') +
api.properties.tryserver(
mastername='tryserver.infra',
buildername='recipe_try',
try_recipe='infra/build_repo_real_try',
try_props=encode({
'prop1': 'value1',
'prop2': 'value2',
})
)
)
yield (
api.test('recursion') +
api.properties.tryserver(
mastername='tryserver.infra',
buildername='recipe_try',
try_recipe='infra/try_other_recipe',
try_level='1',
try_props=encode({
'try_recipe': 'infra/build_repo_real_try',
'try_props': encode({
'prop1': 'value1',
'prop2': 'value2',
}),
}),
)
)
yield (
api.test('broken_win') +
api.platform('win', 64) +
api.properties.tryserver(
mastername='tryserver.infra',
buildername='recipe_try',
try_recipe='infra/build_repo_real_try',
try_props=encode({
'prop1': 'value1',
'prop2': 'value2',
})
) +
api.override_step_data('fetch recipe engine deps', retcode=1)
)
| 30.404255 | 105 | 0.614416 |
9b872fdc2e8ab1a2260de0d66608182a7f72185d | 1,234 | py | Python | apps/users/models.py | ansonsry/Freshshop | 79ab8beb1aa993f6365182c8d3bb478ee4e028f8 | [
"MIT"
] | null | null | null | apps/users/models.py | ansonsry/Freshshop | 79ab8beb1aa993f6365182c8d3bb478ee4e028f8 | [
"MIT"
] | 1 | 2021-02-08T20:31:28.000Z | 2021-02-08T20:31:28.000Z | apps/users/models.py | ansonsry/Freshshop | 79ab8beb1aa993f6365182c8d3bb478ee4e028f8 | [
"MIT"
] | null | null | null | from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
"""
用户
"""
name = models.CharField(max_length=30, null=True, blank=True, verbose_name="姓名")
birthday = models.DateField(null=True, blank=True, verbose_name="出生年月")
gender = models.CharField(max_length=6, choices=(("male", u"男"), ("female", "女")), default="female", \
verbose_name="性别")
mobile = models.CharField(null=True, blank=True, max_length=11, verbose_name="电话")
email = models.EmailField(max_length=100, null=True, blank=True, verbose_name="邮箱")
class Meta:
verbose_name = "用户"
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class VerifyCode(models.Model):
"""
短信验证码
"""
code = models.CharField(max_length=10, verbose_name="验证码")
mobile = models.CharField(max_length=11, verbose_name="电话")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "短信验证码"
verbose_name_plural = verbose_name
def __str__(self):
return self.code | 31.641026 | 106 | 0.670178 |
64513e5deecadb5820c5cc640ab69abf4df4015a | 9,979 | py | Python | core/controllers/question_editor.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | 2 | 2021-03-07T18:39:15.000Z | 2021-03-29T20:09:11.000Z | core/controllers/question_editor.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | null | null | null | core/controllers/question_editor.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the questions editor, from where questions are edited
and are created.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import fs_services
from core.domain import html_cleaner
from core.domain import image_validation_services
from core.domain import question_domain
from core.domain import question_services
from core.domain import skill_domain
from core.domain import skill_fetchers
import feconf
import utils
class QuestionCreationHandler(base.BaseHandler):
"""A handler that creates the question model given a question dict."""
@acl_decorators.can_manage_question_skill_status
def post(self):
"""Handles POST requests."""
skill_ids = self.payload.get('skill_ids')
if not skill_ids:
raise self.InvalidInputException(
'skill_ids parameter isn\'t present in the payload')
if len(skill_ids) > constants.MAX_SKILLS_PER_QUESTION:
raise self.InvalidInputException(
'More than %d QuestionSkillLinks for one question '
'is not supported.' % constants.MAX_SKILLS_PER_QUESTION)
try:
for skill_id in skill_ids:
skill_domain.Skill.require_valid_skill_id(skill_id)
except Exception as e:
raise self.InvalidInputException('Skill ID(s) aren\'t valid: ', e)
try:
skill_fetchers.get_multi_skills(skill_ids)
except Exception as e:
raise self.PageNotFoundException(e)
question_dict = self.payload.get('question_dict')
if (
(question_dict['id'] is not None) or
('question_state_data' not in question_dict) or
('language_code' not in question_dict) or
(question_dict['version'] != 0)):
raise self.InvalidInputException(
'Question Data should contain id, state data, language code, ' +
'and its version should be set as 0')
question_dict['question_state_data_schema_version'] = (
feconf.CURRENT_STATE_SCHEMA_VERSION)
question_dict['id'] = question_services.get_new_question_id()
question_dict['linked_skill_ids'] = skill_ids
try:
question = question_domain.Question.from_dict(question_dict)
except Exception as e:
raise self.InvalidInputException(
'Question structure is invalid:', e)
skill_difficulties = self.payload.get('skill_difficulties')
if not skill_difficulties:
raise self.InvalidInputException(
'skill_difficulties not present in the payload')
if len(skill_ids) != len(skill_difficulties):
raise self.InvalidInputException(
'Skill difficulties don\'t match up with skill IDs')
try:
skill_difficulties = [
float(difficulty) for difficulty in skill_difficulties]
except (ValueError, TypeError):
raise self.InvalidInputException(
'Skill difficulties must be a float value')
if any((
difficulty < 0 or difficulty > 1)
for difficulty in skill_difficulties):
raise self.InvalidInputException(
'Skill difficulties must be between 0 and 1')
question_services.add_question(self.user_id, question)
question_services.link_multiple_skills_for_question(
self.user_id,
question.id,
skill_ids,
skill_difficulties)
html_list = question.question_state_data.get_all_html_content_strings()
filenames = (
html_cleaner.get_image_filenames_from_html_strings(html_list))
image_validation_error_message_suffix = (
'Please go to the question editor for question with id %s and edit '
'the image.' % question.id)
for filename in filenames:
image = self.request.get(filename)
if not image:
logging.error(
'Image not provided for file with name %s when the question'
' with id %s was created.' % (filename, question.id))
raise self.InvalidInputException(
'No image data provided for file with name %s. %s'
% (filename, image_validation_error_message_suffix))
try:
file_format = (
image_validation_services.validate_image_and_filename(
image, filename))
except utils.ValidationError as e:
e = '%s %s' % (e, image_validation_error_message_suffix)
raise self.InvalidInputException(e)
image_is_compressible = (
file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS)
fs_services.save_original_and_compressed_versions_of_image(
filename, feconf.ENTITY_TYPE_QUESTION, question.id, image,
'image', image_is_compressible)
self.values.update({
'question_id': question.id
})
self.render_json(self.values)
class QuestionSkillLinkHandler(base.BaseHandler):
"""A handler for linking and unlinking questions to or from a skill."""
@acl_decorators.can_manage_question_skill_status
def put(self, question_id):
"""Updates the QuestionSkillLink models with respect to the given
question.
"""
skill_ids_task_list = self.payload.get('skill_ids_task_list')
if skill_ids_task_list is None:
raise self.InvalidInputException(
'Missing fields \'skill_ids_task_list\'in payload')
for task_dict in skill_ids_task_list:
if not 'id' in task_dict:
raise self.InvalidInputException(
'Missing skill ID.')
if task_dict['task'] == 'remove':
question_services.delete_question_skill_link(
self.user_id, question_id, task_dict['id'])
elif task_dict['task'] == 'add':
question_services.create_new_question_skill_link(
self.user_id, question_id, task_dict['id'],
task_dict['difficulty'])
elif task_dict['task'] == 'update_difficulty':
question_services.update_question_skill_link_difficulty(
question_id, task_dict['id'],
float(task_dict['difficulty']))
else:
raise self.InvalidInputException('Invalid task.')
self.render_json(self.values)
class EditableQuestionDataHandler(base.BaseHandler):
"""A data handler for questions which supports writing."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_view_question_editor
def get(self, question_id):
"""Gets the data for the question overview page."""
question = question_services.get_question_by_id(
question_id, strict=False)
associated_skill_dicts = [
skill.to_dict() for skill in skill_fetchers.get_multi_skills(
question.linked_skill_ids)]
self.values.update({
'question_dict': question.to_dict(),
'associated_skill_dicts': associated_skill_dicts
})
self.render_json(self.values)
@acl_decorators.can_edit_question
def put(self, question_id):
"""Updates properties of the given question."""
commit_message = self.payload.get('commit_message')
if not commit_message:
raise self.PageNotFoundException
if (commit_message is not None and
len(commit_message) > constants.MAX_COMMIT_MESSAGE_LENGTH):
raise self.InvalidInputException(
'Commit messages must be at most %s characters long.'
% constants.MAX_COMMIT_MESSAGE_LENGTH)
if not self.payload.get('change_list'):
raise self.PageNotFoundException
change_list = [
question_domain.QuestionChange(change)
for change in self.payload.get('change_list')
]
for change in change_list:
if (
change.cmd ==
question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION):
raise self.InvalidInputException
question_services.update_question(
self.user_id, question_id, change_list,
commit_message)
question_dict = question_services.get_question_by_id(
question_id).to_dict()
self.render_json({
'question_dict': question_dict
})
@acl_decorators.can_delete_question
def delete(self, question_id):
"""Handles Delete requests."""
question = question_services.get_question_by_id(
question_id, strict=False)
if question is None:
raise self.PageNotFoundException(
'The question with the given id doesn\'t exist.')
question_services.delete_question(self.user_id, question_id)
self.render_json(self.values)
| 39.916 | 80 | 0.644253 |
f5185e23fa4b3034d20ece2b2ff69e427f396ff9 | 11,649 | py | Python | tests/unit_tests/test_capi.py | johnnyliu27/openmc | d7359f151cc9eece99fb155e80f73a1b3393f7f7 | [
"MIT"
] | null | null | null | tests/unit_tests/test_capi.py | johnnyliu27/openmc | d7359f151cc9eece99fb155e80f73a1b3393f7f7 | [
"MIT"
] | null | null | null | tests/unit_tests/test_capi.py | johnnyliu27/openmc | d7359f151cc9eece99fb155e80f73a1b3393f7f7 | [
"MIT"
] | null | null | null | from collections.abc import Mapping
import os
import numpy as np
import pytest
import openmc
import openmc.exceptions as exc
import openmc.capi
from tests import cdtemp
@pytest.fixture(scope='module')
def pincell_model():
"""Set up a model to test with and delete files when done"""
openmc.reset_auto_ids()
pincell = openmc.examples.pwr_pin_cell()
pincell.settings.verbosity = 1
# Add a tally
filter1 = openmc.MaterialFilter(pincell.materials)
filter2 = openmc.EnergyFilter([0.0, 1.0, 1.0e3, 20.0e6])
mat_tally = openmc.Tally()
mat_tally.filters = [filter1, filter2]
mat_tally.nuclides = ['U235', 'U238']
mat_tally.scores = ['total', 'elastic', '(n,gamma)']
pincell.tallies.append(mat_tally)
# Add an expansion tally
zernike_tally = openmc.Tally()
filter3 = openmc.ZernikeFilter(5, r=.63)
cells = pincell.geometry.root_universe.cells
filter4 = openmc.CellFilter(list(cells.values()))
zernike_tally.filters = [filter3, filter4]
zernike_tally.scores = ['fission']
pincell.tallies.append(zernike_tally)
# Write XML files in tmpdir
with cdtemp():
pincell.export_to_xml()
yield
@pytest.fixture(scope='module')
def capi_init(pincell_model):
openmc.capi.init()
yield
openmc.capi.finalize()
@pytest.fixture(scope='module')
def capi_simulation_init(capi_init):
openmc.capi.simulation_init()
yield
@pytest.fixture(scope='module')
def capi_run(capi_simulation_init):
openmc.capi.run()
def test_cell_mapping(capi_init):
cells = openmc.capi.cells
assert isinstance(cells, Mapping)
assert len(cells) == 3
for cell_id, cell in cells.items():
assert isinstance(cell, openmc.capi.Cell)
assert cell_id == cell.id
def test_cell(capi_init):
cell = openmc.capi.cells[1]
assert isinstance(cell.fill, openmc.capi.Material)
cell.fill = openmc.capi.materials[1]
assert str(cell) == 'Cell[1]'
def test_new_cell(capi_init):
with pytest.raises(exc.AllocationError):
openmc.capi.Cell(1)
new_cell = openmc.capi.Cell()
new_cell_with_id = openmc.capi.Cell(10)
assert len(openmc.capi.cells) == 5
def test_material_mapping(capi_init):
mats = openmc.capi.materials
assert isinstance(mats, Mapping)
assert len(mats) == 3
for mat_id, mat in mats.items():
assert isinstance(mat, openmc.capi.Material)
assert mat_id == mat.id
def test_material(capi_init):
m = openmc.capi.materials[3]
assert m.nuclides == ['H1', 'O16', 'B10', 'B11']
old_dens = m.densities
test_dens = [1.0e-1, 2.0e-1, 2.5e-1, 1.0e-3]
m.set_densities(m.nuclides, test_dens)
assert m.densities == pytest.approx(test_dens)
assert m.volume is None
m.volume = 10.0
assert m.volume == 10.0
with pytest.raises(exc.InvalidArgumentError):
m.set_density(1.0, 'goblins')
rho = 2.25e-2
m.set_density(rho)
assert sum(m.densities) == pytest.approx(rho)
def test_new_material(capi_init):
with pytest.raises(exc.AllocationError):
openmc.capi.Material(1)
new_mat = openmc.capi.Material()
new_mat_with_id = openmc.capi.Material(10)
assert len(openmc.capi.materials) == 5
def test_nuclide_mapping(capi_init):
nucs = openmc.capi.nuclides
assert isinstance(nucs, Mapping)
assert len(nucs) == 12
for name, nuc in nucs.items():
assert isinstance(nuc, openmc.capi.Nuclide)
assert name == nuc.name
def test_load_nuclide(capi_init):
openmc.capi.load_nuclide('Pu239')
with pytest.raises(exc.DataError):
openmc.capi.load_nuclide('Pu3')
def test_settings(capi_init):
settings = openmc.capi.settings
assert settings.batches == 10
settings.batches = 10
assert settings.inactive == 5
assert settings.generations_per_batch == 1
assert settings.particles == 100
assert settings.seed == 1
settings.seed = 11
assert settings.run_mode == 'eigenvalue'
settings.run_mode = 'volume'
settings.run_mode = 'eigenvalue'
def test_tally_mapping(capi_init):
tallies = openmc.capi.tallies
assert isinstance(tallies, Mapping)
assert len(tallies) == 2
for tally_id, tally in tallies.items():
assert isinstance(tally, openmc.capi.Tally)
assert tally_id == tally.id
def test_tally(capi_init):
t = openmc.capi.tallies[1]
assert len(t.filters) == 2
assert isinstance(t.filters[0], openmc.capi.MaterialFilter)
assert isinstance(t.filters[1], openmc.capi.EnergyFilter)
# Create new filter and replace existing
with pytest.raises(exc.AllocationError):
openmc.capi.MaterialFilter(uid=1)
mats = openmc.capi.materials
f = openmc.capi.MaterialFilter([mats[2], mats[1]])
assert f.bins[0] == mats[2]
assert f.bins[1] == mats[1]
t.filters = [f]
assert t.filters == [f]
assert t.nuclides == ['U235', 'U238']
with pytest.raises(exc.DataError):
t.nuclides = ['Zr2']
t.nuclides = ['U234', 'Zr90']
assert t.nuclides == ['U234', 'Zr90']
assert t.scores == ['total', '(n,elastic)', '(n,gamma)']
new_scores = ['scatter', 'fission', 'nu-fission', '(n,2n)']
t.scores = new_scores
assert t.scores == new_scores
t2 = openmc.capi.tallies[2]
assert len(t2.filters) == 2
assert isinstance(t2.filters[0], openmc.capi.ZernikeFilter)
assert isinstance(t2.filters[1], openmc.capi.CellFilter)
assert len(t2.filters[1].bins) == 3
assert t2.filters[0].order == 5
def test_new_tally(capi_init):
with pytest.raises(exc.AllocationError):
openmc.capi.Material(1)
new_tally = openmc.capi.Tally()
new_tally.scores = ['flux']
new_tally_with_id = openmc.capi.Tally(10)
new_tally_with_id.scores = ['flux']
assert len(openmc.capi.tallies) == 4
def test_tally_activate(capi_simulation_init):
t = openmc.capi.tallies[1]
assert not t.active
t.active = True
assert t.active
def test_tally_results(capi_run):
t = openmc.capi.tallies[1]
assert t.num_realizations == 10 # t was made active in test_tally
assert np.all(t.mean >= 0)
nonzero = (t.mean > 0.0)
assert np.all(t.std_dev[nonzero] >= 0)
assert np.all(t.ci_width()[nonzero] >= 1.95*t.std_dev[nonzero])
t2 = openmc.capi.tallies[2]
n = 5
assert t2.mean.size == (n + 1) * (n + 2) // 2 * 3 # Number of Zernike coeffs * 3 cells
def test_global_tallies(capi_run):
assert openmc.capi.num_realizations() == 5
gt = openmc.capi.global_tallies()
for mean, std_dev in gt:
assert mean >= 0
def test_statepoint(capi_run):
openmc.capi.statepoint_write('test_sp.h5')
assert os.path.exists('test_sp.h5')
def test_source_bank(capi_run):
source = openmc.capi.source_bank()
assert np.all(source['E'] > 0.0)
assert np.all(source['wgt'] == 1.0)
def test_by_batch(capi_run):
openmc.capi.hard_reset()
# Running next batch before simulation is initialized should raise an
# exception
with pytest.raises(exc.AllocationError):
openmc.capi.next_batch()
openmc.capi.simulation_init()
try:
for _ in openmc.capi.iter_batches():
# Make sure we can get k-effective during inactive/active batches
mean, std_dev = openmc.capi.keff()
assert 0.0 < mean < 2.5
assert std_dev > 0.0
assert openmc.capi.num_realizations() == 5
for i in range(3):
openmc.capi.next_batch()
assert openmc.capi.num_realizations() == 8
finally:
openmc.capi.simulation_finalize()
def test_reset(capi_run):
# Init and run 10 batches.
openmc.capi.hard_reset()
openmc.capi.simulation_init()
try:
for i in range(10):
openmc.capi.next_batch()
# Make sure there are 5 realizations for the 5 active batches.
assert openmc.capi.num_realizations() == 5
assert openmc.capi.tallies[2].num_realizations == 5
_, keff_sd1 = openmc.capi.keff()
tally_sd1 = openmc.capi.tallies[2].std_dev[0]
# Reset and run 3 more batches. Check the number of realizations.
openmc.capi.reset()
for i in range(3):
openmc.capi.next_batch()
assert openmc.capi.num_realizations() == 3
assert openmc.capi.tallies[2].num_realizations == 3
# Check the tally std devs to make sure results were cleared.
_, keff_sd2 = openmc.capi.keff()
tally_sd2 = openmc.capi.tallies[2].std_dev[0]
assert keff_sd2 > keff_sd1
assert tally_sd2 > tally_sd1
finally:
openmc.capi.simulation_finalize()
def test_reproduce_keff(capi_init):
# Get k-effective after run
openmc.capi.hard_reset()
openmc.capi.run()
keff0 = openmc.capi.keff()
# Reset, run again, and get k-effective again. they should match
openmc.capi.hard_reset()
openmc.capi.run()
keff1 = openmc.capi.keff()
assert keff0 == pytest.approx(keff1)
def test_find_cell(capi_init):
cell, instance = openmc.capi.find_cell((0., 0., 0.))
assert cell is openmc.capi.cells[1]
cell, instance = openmc.capi.find_cell((0.4, 0., 0.))
assert cell is openmc.capi.cells[2]
with pytest.raises(exc.GeometryError):
openmc.capi.find_cell((100., 100., 100.))
def test_find_material(capi_init):
mat = openmc.capi.find_material((0., 0., 0.))
assert mat is openmc.capi.materials[1]
mat = openmc.capi.find_material((0.4, 0., 0.))
assert mat is openmc.capi.materials[2]
def test_mesh(capi_init):
mesh = openmc.capi.Mesh()
mesh.dimension = (2, 3, 4)
assert mesh.dimension == (2, 3, 4)
with pytest.raises(exc.AllocationError):
mesh2 = openmc.capi.Mesh(mesh.id)
# Make sure each combination of parameters works
ll = (0., 0., 0.)
ur = (10., 10., 10.)
width = (1., 1., 1.)
mesh.set_parameters(lower_left=ll, upper_right=ur)
assert mesh.lower_left == pytest.approx(ll)
assert mesh.upper_right == pytest.approx(ur)
mesh.set_parameters(lower_left=ll, width=width)
assert mesh.lower_left == pytest.approx(ll)
assert mesh.width == pytest.approx(width)
mesh.set_parameters(upper_right=ur, width=width)
assert mesh.upper_right == pytest.approx(ur)
assert mesh.width == pytest.approx(width)
meshes = openmc.capi.meshes
assert isinstance(meshes, Mapping)
assert len(meshes) == 1
for mesh_id, mesh in meshes.items():
assert isinstance(mesh, openmc.capi.Mesh)
assert mesh_id == mesh.id
mf = openmc.capi.MeshFilter(mesh)
assert mf.mesh == mesh
msf = openmc.capi.MeshSurfaceFilter(mesh)
assert msf.mesh == mesh
def test_restart(capi_init):
# Finalize and re-init to make internal state consistent with XML.
openmc.capi.hard_reset()
openmc.capi.finalize()
openmc.capi.init()
openmc.capi.simulation_init()
# Run for 7 batches then write a statepoint.
for i in range(7):
openmc.capi.next_batch()
openmc.capi.statepoint_write('restart_test.h5', True)
# Run 3 more batches and copy the keff.
for i in range(3):
openmc.capi.next_batch()
keff0 = openmc.capi.keff()
# Restart the simulation from the statepoint and the 3 remaining active batches.
openmc.capi.simulation_finalize()
openmc.capi.hard_reset()
openmc.capi.finalize()
openmc.capi.init(args=('-r', 'restart_test.h5'))
openmc.capi.simulation_init()
for i in range(3):
openmc.capi.next_batch()
keff1 = openmc.capi.keff()
openmc.capi.simulation_finalize()
# Compare the keff values.
assert keff0 == pytest.approx(keff1)
| 29.416667 | 90 | 0.665808 |
9dfc87296331cff2428c9fcf88824a31c02df1ec | 23,111 | py | Python | improver_tests/calibration/reliability_calibration/test_ConstructReliabilityCalibrationTables.py | nivnac/improver | c16c794f62598017cebc6ae4f99af8f317219a77 | [
"BSD-3-Clause"
] | null | null | null | improver_tests/calibration/reliability_calibration/test_ConstructReliabilityCalibrationTables.py | nivnac/improver | c16c794f62598017cebc6ae4f99af8f317219a77 | [
"BSD-3-Clause"
] | null | null | null | improver_tests/calibration/reliability_calibration/test_ConstructReliabilityCalibrationTables.py | nivnac/improver | c16c794f62598017cebc6ae4f99af8f317219a77 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the ConstructReliabilityCalibrationTables plugin."""
import unittest
from datetime import datetime
import iris
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from improver.calibration.reliability_calibration import (
ConstructReliabilityCalibrationTables as Plugin,
)
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
from improver.utilities.cube_manipulation import MergeCubes
class Test_Setup(unittest.TestCase):
"""Test class for the Test_ConstructReliabilityCalibrationTables tests,
setting up cubes to use as inputs."""
def setUp(self):
"""Create forecast and truth cubes for use in testing the reliability
calibration plugin. Two forecast and two truth cubes are created, each
pair containing the same data but given different forecast reference
times and validity times. These times maintain the same forecast period
for each forecast cube.
The truth data for reliability calibration is thresholded data, giving
fields of zeroes and ones.
Each forecast cube in conjunction with the contemporaneous truth cube
will be used to produce a reliability calibration table. When testing
the process method here we expect the final reliability calibration
table for a given threshold (we are only using 283K in the value
comparisons) to be the sum of two of these identical tables."""
thresholds = [283, 288]
forecast_data = np.arange(9, dtype=np.float32).reshape(3, 3) / 8.0
forecast_data = np.stack([forecast_data, forecast_data])
truth_data = np.linspace(281, 285, 9, dtype=np.float32).reshape(3, 3)
# Threshold the truths, giving fields of zeroes and ones.
truth_data_a = (truth_data > thresholds[0]).astype(int)
truth_data_b = (truth_data > thresholds[1]).astype(int)
truth_data = np.stack([truth_data_a, truth_data_b])
self.forecast_1 = set_up_probability_cube(forecast_data, thresholds)
self.forecast_2 = set_up_probability_cube(
forecast_data,
thresholds,
time=datetime(2017, 11, 11, 4, 0),
frt=datetime(2017, 11, 11, 0, 0),
)
self.forecasts = MergeCubes()([self.forecast_1, self.forecast_2])
self.truth_1 = set_up_probability_cube(
truth_data, thresholds, frt=datetime(2017, 11, 10, 4, 0)
)
self.truth_2 = set_up_probability_cube(
truth_data,
thresholds,
time=datetime(2017, 11, 11, 4, 0),
frt=datetime(2017, 11, 11, 4, 0),
)
self.truths = MergeCubes()([self.truth_1, self.truth_2])
masked_array = np.zeros(truth_data.shape, dtype=bool)
masked_array[:, 0, :2] = True
masked_truth_data_1 = np.ma.array(truth_data, mask=masked_array)
masked_array = np.zeros(truth_data.shape, dtype=bool)
masked_array[:, :2, 0] = True
masked_truth_data_2 = np.ma.array(truth_data, mask=masked_array)
self.masked_truth_1 = set_up_probability_cube(
masked_truth_data_1, thresholds, frt=datetime(2017, 11, 10, 4, 0)
)
self.masked_truth_2 = set_up_probability_cube(
masked_truth_data_2,
thresholds,
time=datetime(2017, 11, 11, 4, 0),
frt=datetime(2017, 11, 11, 4, 0),
)
self.masked_truths = MergeCubes()([self.masked_truth_1, self.masked_truth_2])
self.expected_threshold_coord = self.forecasts.coord(var_name="threshold")
self.expected_table_shape = (3, 5, 3, 3)
self.expected_attributes = {
"title": "Reliability calibration data table",
"source": "IMPROVER",
"institution": "unknown",
}
# Note the structure of the expected_table is non-trivial to interpret
# due to the dimension ordering.
self.expected_table = np.array(
[
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.125, 0.25], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.375, 0.5, 0.625], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.75, 0.875, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
[
[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
],
dtype=np.float32,
)
self.expected_table_for_mask = np.array(
[
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.25], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.375, 0.5, 0.625], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.75, 0.875, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
],
dtype=np.float32,
)
class Test__init__(unittest.TestCase):
"""Test the __init__ method."""
def test_using_defaults(self):
"""Test without providing any arguments."""
plugin = Plugin()
self.assertEqual(len(plugin.probability_bins), 5)
self.assertEqual(plugin.expected_table_shape, (3, 5))
def test_with_arguments(self):
"""Test with specified arguments."""
plugin = Plugin(
n_probability_bins=4,
single_value_lower_limit=False,
single_value_upper_limit=False,
)
self.assertEqual(len(plugin.probability_bins), 4)
self.assertEqual(plugin.expected_table_shape, (3, 4))
class Test__repr__(unittest.TestCase):
"""Test the __repr__ method."""
def test_basic(self):
"""Test repr is as expected."""
plugin = Plugin(
n_probability_bins=2,
single_value_lower_limit=False,
single_value_upper_limit=False,
)
self.assertEqual(
str(plugin),
"<ConstructReliabilityCalibrationTables: probability_bins: "
"[0.00 --> 0.50], [0.50 --> 1.00]>",
)
class Test__define_probability_bins(unittest.TestCase):
"""Test the _define_probability_bins method."""
@staticmethod
def test_without_single_value_limits():
"""Test the generation of probability bins without single value end
bins. The range 0 to 1 will be divided into 4 equally sized bins."""
expected = np.array(
[[0.0, 0.24999999], [0.25, 0.49999997], [0.5, 0.74999994], [0.75, 1.0]]
)
result = Plugin()._define_probability_bins(
n_probability_bins=4,
single_value_lower_limit=False,
single_value_upper_limit=False,
)
assert_allclose(result, expected)
@staticmethod
def test_with_both_single_value_limits():
"""Test the generation of probability bins with both upper and lower
single value end bins. The range 0 to 1 will be divided into 2 equally
sized bins, with 2 end bins holding values approximately equal to 0 and 1."""
expected = np.array(
[
[0.0000000e00, 1.0000000e-06],
[1.0000001e-06, 4.9999997e-01],
[5.0000000e-01, 9.9999893e-01],
[9.9999899e-01, 1.0000000e00],
]
)
result = Plugin()._define_probability_bins(
n_probability_bins=4,
single_value_lower_limit=True,
single_value_upper_limit=True,
)
assert_allclose(result, expected)
@staticmethod
def test_with_lower_single_value_limit():
"""Test the generation of probability bins with only the lower single value
limit bin. The range 0 to 1 will be divided into 4 equally sized bins,
with 1 lower bin holding values approximately equal to 0."""
expected = np.array(
[
[0.0000000e00, 1.0000000e-06],
[1.0000001e-06, 3.3333331e-01],
[3.3333334e-01, 6.6666663e-01],
[6.6666669e-01, 1.0000000e00],
],
dtype=np.float32,
)
result = Plugin()._define_probability_bins(
n_probability_bins=4,
single_value_lower_limit=True,
single_value_upper_limit=False,
)
assert_allclose(result, expected)
@staticmethod
def test_with_upper_single_value_limit():
"""Test the generation of probability bins with only the upper single value
limit bin. The range 0 to 1 will be divided into 4 equally sized bins,
with 1 upper bin holding values approximately equal to 1."""
expected = np.array(
[
[0.0, 0.3333333],
[0.33333334, 0.6666666],
[0.6666667, 0.9999989],
[0.999999, 1.0],
],
dtype=np.float32,
)
result = Plugin()._define_probability_bins(
n_probability_bins=4,
single_value_lower_limit=False,
single_value_upper_limit=True,
)
assert_allclose(result, expected)
def test_with_both_single_value_limits_too_few_bins(self):
"""In this test both lower and uppper single_value_limits are requested
whilst also trying to use 2 bins. This would leave no bins to cover the
range 0 to 1, so an error is raised."""
msg = (
"Cannot use both single_value_lower_limit and "
"single_value_upper_limit with 2 or fewer probability bins."
)
with self.assertRaisesRegex(ValueError, msg):
Plugin()._define_probability_bins(
n_probability_bins=2,
single_value_lower_limit=True,
single_value_upper_limit=True,
)
class Test__create_probability_bins_coord(unittest.TestCase):
"""Test the _create_probability_bins_coord method."""
def test_coordinate_no_single_value_bins(self):
"""Test the probability_bins coordinate has the expected values and
type with no single value lower and upper bins."""
expected_bounds = np.array([[0, 0.5], [0.5, 1]])
expected_points = np.mean(expected_bounds, axis=1)
plugin = Plugin(n_probability_bins=2,)
result = plugin._create_probability_bins_coord()
self.assertIsInstance(result, iris.coords.DimCoord)
assert_allclose(result.points, expected_points)
assert_allclose(result.bounds, expected_bounds)
def test_coordinate_single_value_bins(self):
"""Test the probability_bins coordinate has the expected values and
type when using the single value lower and upper bins."""
expected_bounds = np.array(
[
[0.0000000e00, 1.0000000e-06],
[1.0000001e-06, 4.9999997e-01],
[5.0000000e-01, 9.9999893e-01],
[9.9999899e-01, 1.0000000e00],
]
)
expected_points = np.mean(expected_bounds, axis=1)
plugin = Plugin(
n_probability_bins=4,
single_value_lower_limit=True,
single_value_upper_limit=True,
)
result = plugin._create_probability_bins_coord()
self.assertIsInstance(result, iris.coords.DimCoord)
assert_allclose(result.points, expected_points)
assert_allclose(result.bounds, expected_bounds)
class Test__create_reliability_table_coords(unittest.TestCase):
"""Test the _create_reliability_table_coords method."""
def test_coordinates(self):
"""Test the reliability table coordinates have the expected values and
type."""
expected_indices = np.array([0, 1, 2], dtype=np.int32)
expected_names = np.array(
["observation_count", "sum_of_forecast_probabilities", "forecast_count"]
)
index_coord, name_coord = Plugin()._create_reliability_table_coords()
self.assertIsInstance(index_coord, iris.coords.DimCoord)
self.assertIsInstance(name_coord, iris.coords.AuxCoord)
assert_array_equal(index_coord.points, expected_indices)
assert_array_equal(name_coord.points, expected_names)
class Test__define_metadata(Test_Setup):
"""Test the _define_metadata method."""
def test_metadata_with_complete_inputs(self):
"""Test the metadata returned is complete and as expected when the
forecast cube contains the required metadata to copy."""
self.forecast_1.attributes["institution"] = "Kitten Inc"
self.expected_attributes["institution"] = "Kitten Inc"
result = Plugin._define_metadata(self.forecast_1)
self.assertIsInstance(result, dict)
self.assertEqual(result, self.expected_attributes)
def test_metadata_with_incomplete_inputs(self):
"""Test the metadata returned is complete and as expected when the
forecast cube does not contain all the required metadata to copy."""
result = Plugin._define_metadata(self.forecast_1)
self.assertIsInstance(result, dict)
self.assertEqual(result, self.expected_attributes)
class Test__create_reliability_table_cube(Test_Setup):
"""Test the _create_reliability_table_cube method."""
def test_valid_inputs(self):
"""Test the cube returned has the structure expected."""
forecast_slice = next(self.forecast_1.slices_over("air_temperature"))
result = Plugin()._create_reliability_table_cube(
forecast_slice, forecast_slice.coord(var_name="threshold")
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertSequenceEqual(result.shape, self.expected_table_shape)
self.assertEqual(result.name(), "reliability_calibration_table")
self.assertEqual(result.attributes, self.expected_attributes)
class Test__populate_reliability_bins(Test_Setup):
"""Test the _populate_reliability_bins method."""
def test_table_values(self):
"""Test the reliability table returned has the expected values for the
given inputs."""
forecast_slice = next(self.forecast_1.slices_over("air_temperature"))
truth_slice = next(self.truth_1.slices_over("air_temperature"))
result = Plugin(
single_value_lower_limit=True, single_value_upper_limit=True
)._populate_reliability_bins(forecast_slice.data, truth_slice.data)
self.assertSequenceEqual(result.shape, self.expected_table_shape)
assert_array_equal(result, self.expected_table)
class Test__populate_masked_reliability_bins(Test_Setup):
"""Test the _populate_masked_reliability_bins method."""
def test_table_values_masked_truth(self):
"""Test the reliability table returned has the expected values when a
masked truth is input."""
forecast_slice = next(self.forecast_1.slices_over("air_temperature"))
truth_slice = next(self.masked_truth_1.slices_over("air_temperature"))
result = Plugin(
single_value_lower_limit=True, single_value_upper_limit=True
)._populate_masked_reliability_bins(forecast_slice.data, truth_slice.data)
self.assertSequenceEqual(result.shape, self.expected_table_shape)
self.assertTrue(np.ma.is_masked(result))
assert_array_equal(result.data, self.expected_table_for_mask)
expected_mask = np.zeros(self.expected_table_for_mask.shape, dtype=bool)
expected_mask[:, :, 0, :2] = True
assert_array_equal(result.mask, expected_mask)
class Test_process(Test_Setup):
"""Test the process method."""
def test_return_type(self):
"""Test the process method returns a reliability table cube."""
result = Plugin().process(self.forecasts, self.truths)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.name(), "reliability_calibration_table")
self.assertEqual(result.coord("air_temperature"), self.expected_threshold_coord)
self.assertEqual(result.coord_dims("air_temperature")[0], 0)
def test_table_values(self):
"""Test that cube values are as expected when process has sliced the
inputs up for processing and then summed the contributions from the
two dates. Note that the values tested here are for only one of the
two processed thresholds (283K). The results contain contributions
from two forecast/truth pairs."""
expected = np.sum([self.expected_table, self.expected_table], axis=0)
result = Plugin(
single_value_lower_limit=True, single_value_upper_limit=True
).process(self.forecasts, self.truths)
assert_array_equal(result[0].data, expected)
def test_table_values_masked_truth(self):
"""Test, similar to test_table_values, using masked arrays. The
mask is different for different timesteps, reflecting the potential
for masked areas in e.g. a radar truth to differ between timesteps.
At timestep 1, two grid points are masked. At timestep 2, two
grid points are also masked with one masked grid point in common
between timesteps. As a result, only one grid point is masked (
within the upper left corner) within the resulting reliability table."""
expected_table_for_second_mask = np.array(
[
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.125, 0.25], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.5, 0.625], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.75, 0.875, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
],
],
dtype=np.float32,
)
expected = np.sum(
[self.expected_table_for_mask, expected_table_for_second_mask], axis=0
)
expected_mask = np.zeros(expected.shape, dtype=bool)
expected_mask[:, :, 0, 0] = True
result = Plugin(
single_value_lower_limit=True, single_value_upper_limit=True
).process(self.forecasts, self.masked_truths)
self.assertIsInstance(result.data, np.ma.MaskedArray)
assert_array_equal(result[0].data.data, expected)
assert_array_equal(result[0].data.mask, expected_mask)
# Different thresholds must have the same mask.
assert_array_equal(result[0].data.mask, result[1].data.mask)
def test_mismatching_threshold_coordinates(self):
"""Test that an exception is raised if the forecast and truth cubes
have differing threshold coordinates."""
self.truths = self.truths[:, 0, ...]
msg = "Threshold coordinates differ between forecasts and truths."
with self.assertRaisesRegex(ValueError, msg):
Plugin().process(self.forecasts, self.truths)
if __name__ == "__main__":
unittest.main()
| 42.327839 | 88 | 0.599022 |
6e032bf896ba8def0c3627f6a35dc54484cb4ccf | 3,646 | py | Python | tern/formats/spdx/spdxjson/consumer.py | ReconPangolin/tern | ed8b2b721397358f5ff8c4253aa4f0aa70a55afe | [
"BSD-2-Clause"
] | null | null | null | tern/formats/spdx/spdxjson/consumer.py | ReconPangolin/tern | ed8b2b721397358f5ff8c4253aa4f0aa70a55afe | [
"BSD-2-Clause"
] | null | null | null | tern/formats/spdx/spdxjson/consumer.py | ReconPangolin/tern | ed8b2b721397358f5ff8c4253aa4f0aa70a55afe | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
SPDXJSON document consumer
"""
import json
import logging
import os
from tern.classes.image_layer import ImageLayer
from tern.classes.package import Package
from tern.formats import consumer
from tern.utils import constants
# global logger
logger = logging.getLogger(constants.logger_name)
class ConsumerError(Exception):
"""Exception raised if a critical error has occured"""
def get_package_from_dict(pkg_dict):
"""The SPDX JSON format contains a list of dictionaries, each containing
the package metadata. For one package dictionary, return a Package
object"""
pkg_obj = Package(pkg_dict['name'])
pkg_obj.version = ("" if pkg_dict['versionInfo'] == 'NOASSERTION'
else pkg_dict['versionInfo'])
pkg_obj.proj_url = ("" if pkg_dict['downloadLocation'] == 'NONE'
else pkg_dict['downloadLocation'])
pkg_obj.copyright = ("" if pkg_dict['copyrightText'] == 'NONE'
else pkg_dict['copyrightText'])
return pkg_obj
def get_license_refs_dict(license_refs_list):
"""In SPDX, if the license strings extracted from package metadata is
not a license expression it will be listed separately. Given such a
list, return a dictionary containing license ref to extracted text"""
license_ref_dict = {}
if license_refs_list:
for ref_dict in license_refs_list:
license_ref_dict[ref_dict['licenseId']] = ref_dict['extractedText']
return license_ref_dict
def create_image_layer(report):
"""Given a report file, create an ImageLayer object with the metadata"""
# expect a json input, raise an error if it is not
content = {}
try:
f = open(os.path.abspath(report))
content = json.load(f)
except OSError as err:
logger.critical("Cannot access file %s: %s", report, err)
raise ConsumerError(f"Error with given report file: {report}")
except json.JSONDecodeError as err:
logger.critical("Cannot parse JSON in file %s: %s", report, err)
raise ConsumerError(f"Error with given report file: {report}")
# we should have some content but it may be empty
if not content:
raise ConsumerError("No content consumed from given report file")
# instantiate a layer and fill it
layer = ImageLayer("")
# if there are license refs, make a dictionary with license refs to
# extracted content
refs_license = get_license_refs_dict(
content.get('hasExtractedLicensingInfos', []))
try:
# we ignore the document level information and go straight
# to the packages
for pkg in content['packages']:
pkg_obj = get_package_from_dict(pkg)
pkg_obj.pkg_license = refs_license.get(pkg['licenseDeclared'])
layer.add_package(pkg_obj)
return layer
except ValueError as err:
logger.critical("Cannot find required data in report: %s", err)
return None
class SpdxJSON(consumer.Consume):
def consume_layer(self, reports):
"""Given a list of report files in the SPDX JSON format, created by
the spdxjson generator, create a total list of image layer objects.
We assume the layers are ordered in the order or report files"""
layer_list = []
layer_count = 1
for report in reports:
layer = create_image_layer(report)
layer.layer_index = layer_count
layer_list.append(layer)
layer_count += 1
return layer_list
| 36.46 | 79 | 0.675535 |
2698faa01b1c2caf81d86e99eaf9f446b5c88955 | 6,167 | py | Python | tests/contrib/timeseries/test_gp.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 2 | 2020-06-05T20:40:50.000Z | 2020-09-05T15:39:48.000Z | tests/contrib/timeseries/test_gp.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 1 | 2020-05-12T16:26:21.000Z | 2020-05-12T17:23:13.000Z | tests/contrib/timeseries/test_gp.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 1 | 2020-06-04T18:25:38.000Z | 2020-06-04T18:25:38.000Z | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
from tests.common import assert_equal
import pyro
from pyro.contrib.timeseries import (IndependentMaternGP, LinearlyCoupledMaternGP, GenericLGSSM,
GenericLGSSMWithGPNoiseModel, DependentMaternGP)
from pyro.ops.tensor_utils import block_diag_embed
import pytest
@pytest.mark.parametrize('model,obs_dim,nu_statedim', [('ssmgp', 3, 1.5), ('ssmgp', 2, 2.5),
('lcmgp', 3, 1.5), ('lcmgp', 2, 2.5),
('imgp', 1, 0.5), ('imgp', 2, 0.5),
('imgp', 1, 1.5), ('imgp', 3, 1.5),
('imgp', 1, 2.5), ('imgp', 3, 2.5),
('dmgp', 1, 1.5), ('dmgp', 2, 1.5),
('dmgp', 3, 1.5),
('glgssm', 1, 3), ('glgssm', 3, 1)])
@pytest.mark.parametrize('T', [11, 37])
def test_timeseries_models(model, nu_statedim, obs_dim, T):
torch.set_default_tensor_type('torch.DoubleTensor')
dt = 0.1 + torch.rand(1).item()
if model == 'lcmgp':
num_gps = 2
gp = LinearlyCoupledMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt, num_gps=num_gps,
length_scale_init=0.5 + torch.rand(num_gps),
kernel_scale_init=0.5 + torch.rand(num_gps),
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'imgp':
gp = IndependentMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt,
length_scale_init=0.5 + torch.rand(obs_dim),
kernel_scale_init=0.5 + torch.rand(obs_dim),
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'glgssm':
gp = GenericLGSSM(state_dim=nu_statedim, obs_dim=obs_dim,
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'ssmgp':
state_dim = {0.5: 4, 1.5: 3, 2.5: 2}[nu_statedim]
gp = GenericLGSSMWithGPNoiseModel(nu=nu_statedim, state_dim=state_dim, obs_dim=obs_dim,
obs_noise_scale_init=0.5 + torch.rand(obs_dim))
elif model == 'dmgp':
linearly_coupled = bool(torch.rand(1).item() > 0.5)
gp = DependentMaternGP(nu=nu_statedim, obs_dim=obs_dim, dt=dt, linearly_coupled=linearly_coupled,
length_scale_init=0.5 + torch.rand(obs_dim))
targets = torch.randn(T, obs_dim)
gp_log_prob = gp.log_prob(targets)
if model == 'imgp':
assert gp_log_prob.shape == (obs_dim,)
else:
assert gp_log_prob.dim() == 0
# compare matern log probs to vanilla GP result via multivariate normal
if model == 'imgp':
times = dt * torch.arange(T).double()
for dim in range(obs_dim):
lengthscale = gp.kernel.length_scale[dim]
variance = gp.kernel.kernel_scale.pow(2)[dim]
obs_noise = gp.obs_noise_scale.pow(2)[dim]
kernel = {0.5: pyro.contrib.gp.kernels.Exponential,
1.5: pyro.contrib.gp.kernels.Matern32,
2.5: pyro.contrib.gp.kernels.Matern52}[nu_statedim]
kernel = kernel(input_dim=1, lengthscale=lengthscale, variance=variance)
# XXX kernel(times) loads old parameters from param store
kernel = kernel.forward(times) + obs_noise * torch.eye(T)
mvn = torch.distributions.MultivariateNormal(torch.zeros(T), kernel)
mvn_log_prob = mvn.log_prob(targets[:, dim])
assert_equal(mvn_log_prob, gp_log_prob[dim], prec=1e-4)
for S in [1, 5]:
if model in ['imgp', 'lcmgp', 'dmgp', 'lcdgp']:
dts = torch.rand(S).cumsum(dim=-1)
predictive = gp.forecast(targets, dts)
else:
predictive = gp.forecast(targets, S)
assert predictive.loc.shape == (S, obs_dim)
if model == 'imgp':
assert predictive.scale.shape == (S, obs_dim)
# assert monotonic increase of predictive noise
if S > 1:
delta = predictive.scale[1:S, :] - predictive.scale[0:S-1, :]
assert (delta > 0.0).sum() == (S - 1) * obs_dim
else:
assert predictive.covariance_matrix.shape == (S, obs_dim, obs_dim)
# assert monotonic increase of predictive noise
if S > 1:
dets = predictive.covariance_matrix.det()
delta = dets[1:S] - dets[0:S-1]
assert (delta > 0.0).sum() == (S - 1)
if model in ['imgp', 'lcmgp', 'dmgp', 'lcdgp']:
# the distant future
dts = torch.tensor([500.0])
predictive = gp.forecast(targets, dts)
# assert mean reverting behavior for GP models
assert_equal(predictive.loc, torch.zeros(1, obs_dim))
@pytest.mark.parametrize('obs_dim', [1, 3])
def test_dependent_matern_gp(obs_dim):
dt = 0.5 + torch.rand(1).item()
gp = DependentMaternGP(nu=1.5, obs_dim=obs_dim, dt=dt,
length_scale_init=0.5 + torch.rand(obs_dim))
# make sure stationary covariance matrix satisfies the relevant
# matrix riccati equation
lengthscale = gp.kernel.length_scale.unsqueeze(-1).unsqueeze(-1)
F = torch.tensor([[0.0, 1.0], [0.0, 0.0]])
mask1 = torch.tensor([[0.0, 0.0], [-3.0, 0.0]])
mask2 = torch.tensor([[0.0, 0.0], [0.0, -math.sqrt(12.0)]])
F = block_diag_embed(F + mask1 / lengthscale.pow(2.0) + mask2 / lengthscale)
stat_cov = gp._stationary_covariance()
wiener_cov = gp._get_wiener_cov()
wiener_cov *= torch.tensor([[0.0, 0.0], [0.0, 1.0]]).repeat(obs_dim, obs_dim)
expected_zero = torch.matmul(F, stat_cov) + torch.matmul(stat_cov, F.transpose(-1, -2)) + wiener_cov
assert_equal(expected_zero, torch.zeros(gp.full_state_dim, gp.full_state_dim))
| 48.944444 | 105 | 0.558618 |
4fa35d9086ca14e73c3c6178edd156ac63a5de83 | 1,187 | py | Python | tests/bugs/core_1334_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_1334_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_1334_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: bugs.core_1334
# title: Joins with NULL RDB$DB_KEY crash the server
# decription:
# tracker_id: CORE-1334
# min_versions: []
# versions: 2.1
# qmid: bugs.core_1334
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1
# resources: None
substitutions_1 = []
init_script_1 = """create table t1 (id integer primary key);
create table t2 (id integer references t1);
COMMIT;
insert into t1 values (1);
insert into t1 values (2);
insert into t2 values (2);
COMMIT;"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """select *
from t1
left join t2
on (t2.id = t1.id)
left join t2 t3
on (t3.rdb$db_key = t2.rdb$db_key);
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ID ID ID
============ ============ ============
1 <null> <null>
2 2 2
"""
@pytest.mark.version('>=2.1')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 22.396226 | 70 | 0.613311 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.