hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
08bf881f4842232cb8798ea3f0bf2d197bb47152
68
py
Python
utils/__init__.py
demid5111/VoTT2COCO
1ad18ac283923928d0c5f566181d566889305c21
[ "Apache-2.0" ]
6
2021-08-30T08:40:33.000Z
2022-03-17T08:58:40.000Z
utils/__init__.py
demid5111/VoTT2COCO
1ad18ac283923928d0c5f566181d566889305c21
[ "Apache-2.0" ]
1
2021-11-09T01:34:49.000Z
2021-11-09T01:34:49.000Z
utils/__init__.py
demid5111/VoTT2COCO
1ad18ac283923928d0c5f566181d566889305c21
[ "Apache-2.0" ]
3
2021-05-07T09:27:49.000Z
2021-12-15T05:38:49.000Z
from .vott_utils import VOTTReader from .coco_utils import COCOSaver
34
34
0.867647
10
68
5.7
0.7
0.385965
0
0
0
0
0
0
0
0
0
0
0.102941
68
2
35
34
0.934426
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
08d8abca63134262b8b0b1f70b07d44547ba91a5
147
py
Python
cpgames/modules/core/greedysnake/modules/__init__.py
Wasabii88/Games
33262ca1958207a24e57e3532feded7e275b1dd1
[ "MIT" ]
1
2022-02-27T10:33:41.000Z
2022-02-27T10:33:41.000Z
cpgames/modules/core/greedysnake/modules/__init__.py
beiwei365/Games
f6499f378802d3212a08aeca761191b58714b7f0
[ "MIT" ]
null
null
null
cpgames/modules/core/greedysnake/modules/__init__.py
beiwei365/Games
f6499f378802d3212a08aeca761191b58714b7f0
[ "MIT" ]
null
null
null
'''initialize''' from .food import Apple from .snake import Snake from .endinterface import EndInterface from .utils import drawGameGrid, showScore
29.4
42
0.809524
18
147
6.611111
0.555556
0
0
0
0
0
0
0
0
0
0
0
0.115646
147
5
42
29.4
0.915385
0.068027
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
08e5bb6e6593cf591ccc2ee45f14bbf833442789
55
py
Python
secure_config_manager/__init__.py
RomanSviastyn/security_config_manager
9dbb5c02299238f66424c628ad39956710a3334c
[ "MIT" ]
null
null
null
secure_config_manager/__init__.py
RomanSviastyn/security_config_manager
9dbb5c02299238f66424c628ad39956710a3334c
[ "MIT" ]
null
null
null
secure_config_manager/__init__.py
RomanSviastyn/security_config_manager
9dbb5c02299238f66424c628ad39956710a3334c
[ "MIT" ]
null
null
null
from .secure_config_manager import SecureConfigManager
27.5
54
0.909091
6
55
8
1
0
0
0
0
0
0
0
0
0
0
0
0.072727
55
1
55
55
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3e9b531687ee933c1a1438ef62bbc17751305597
165
py
Python
tolteca/web/templates/dataprod/__init__.py
dennis-l/tolteca
1dffaffb585eb7027e26b34ae01e8632bef134cb
[ "BSD-3-Clause" ]
2
2021-09-28T18:51:37.000Z
2021-12-28T00:25:51.000Z
tolteca/web/templates/dataprod/__init__.py
dennis-l/tolteca
1dffaffb585eb7027e26b34ae01e8632bef134cb
[ "BSD-3-Clause" ]
2
2021-11-04T22:32:03.000Z
2022-01-11T21:40:34.000Z
tolteca/web/templates/dataprod/__init__.py
dennis-l/tolteca
1dffaffb585eb7027e26b34ae01e8632bef134cb
[ "BSD-3-Clause" ]
2
2021-07-23T14:00:51.000Z
2021-07-27T15:34:48.000Z
#! /usr/bin/env python import dash_html_components as html from dasha.web.templates import ComponentTemplate from .fts import FTS from .efficiency import Efficiency
27.5
49
0.830303
24
165
5.625
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.115152
165
5
50
33
0.924658
0.127273
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3ea9bfe3655b9cd935a50bd18e522d1b9e523a8d
158
py
Python
file_handling/__init__.py
netotz/p-dispersion-problem
123a6110dbf64d19a221da545c0590f7efc500dc
[ "MIT" ]
1
2021-09-23T06:31:47.000Z
2021-09-23T06:31:47.000Z
file_handling/__init__.py
binary-hideout/p-dispersion-problem
123a6110dbf64d19a221da545c0590f7efc500dc
[ "MIT" ]
1
2021-08-31T15:15:08.000Z
2021-08-31T15:15:08.000Z
file_handling/__init__.py
netotz/p-dispersion-problem
123a6110dbf64d19a221da545c0590f7efc500dc
[ "MIT" ]
1
2020-05-19T04:46:47.000Z
2020-05-19T04:46:47.000Z
''' Package for handling files and directories (folders). ''' from .file_io import write_instance, read_instance, write_results from .path import list_files
22.571429
65
0.791139
22
158
5.454545
0.772727
0
0
0
0
0
0
0
0
0
0
0
0.126582
158
6
66
26.333333
0.869565
0.335443
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3ef2bbdd07e193899354dff7eebf6543c9014a2c
139
py
Python
dorado/__init__.py
wrightky/dorado
96bd9b84e3a3fb4ba43833caf770ec010eb38e40
[ "MIT" ]
19
2020-08-04T07:22:54.000Z
2022-03-21T15:09:56.000Z
dorado/__init__.py
wrightky/dorado
96bd9b84e3a3fb4ba43833caf770ec010eb38e40
[ "MIT" ]
22
2020-08-11T18:56:33.000Z
2022-03-07T15:58:07.000Z
dorado/__init__.py
wrightky/dorado
96bd9b84e3a3fb4ba43833caf770ec010eb38e40
[ "MIT" ]
4
2020-07-30T12:54:12.000Z
2020-10-26T09:24:36.000Z
__version__ = "2.5.0" from . import lagrangian_walker from . import parallel_routing from . import particle_track from . import routines
17.375
31
0.784173
19
139
5.368421
0.684211
0.392157
0
0
0
0
0
0
0
0
0
0.025424
0.151079
139
7
32
19.857143
0.838983
0
0
0
0
0
0.035971
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
4107f84ea29b4eae7c0c3354dc3ec9c9fee316e9
129
py
Python
EduSim/Envs/KSS/__init__.py
bigdata-ustc/EduSim
849eed229c24615e5f2c3045036311e83c22ea68
[ "MIT" ]
18
2019-11-11T03:45:35.000Z
2022-02-09T15:31:51.000Z
EduSim/Envs/KSS/__init__.py
ghzhao78506/EduSim
cb10e952eb212d8a9344143f889207b5cd48ba9d
[ "MIT" ]
3
2020-10-23T01:05:57.000Z
2021-03-16T12:12:24.000Z
EduSim/Envs/KSS/__init__.py
bigdata-ustc/EduSim
849eed229c24615e5f2c3045036311e83c22ea68
[ "MIT" ]
6
2020-06-09T21:32:00.000Z
2022-03-12T00:25:18.000Z
# coding: utf-8 # 2020/4/29 @ tongshiwei from .Env import KSSEnv from .Agent import KSSAgent from .kss_os import kss_train_eval
18.428571
34
0.767442
22
129
4.363636
0.772727
0
0
0
0
0
0
0
0
0
0
0.073395
0.155039
129
6
35
21.5
0.807339
0.27907
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
410e7d35e6d9ce1f7d73558f3d863dfde08ebb28
158
py
Python
server/src/groups/admin.py
MLH-Fellowship/Fellowship-Companion
5df542c3b228692040fa57f0927c6e727d990661
[ "MIT" ]
2
2020-07-17T10:52:31.000Z
2020-07-17T15:43:01.000Z
server/src/groups/admin.py
MLH-Fellowship/Fellowship-Companion
5df542c3b228692040fa57f0927c6e727d990661
[ "MIT" ]
46
2020-07-16T05:46:27.000Z
2022-02-27T08:14:25.000Z
server/src/groups/admin.py
LakshyaKhatri/Fellowship-Companion
5df542c3b228692040fa57f0927c6e727d990661
[ "MIT" ]
3
2020-07-17T12:48:17.000Z
2021-09-09T15:00:59.000Z
from django.contrib import admin from .models import GithubUser, Team # Register your models here. admin.site.register(GithubUser) admin.site.register(Team)
22.571429
36
0.810127
22
158
5.818182
0.545455
0.140625
0.265625
0
0
0
0
0
0
0
0
0
0.107595
158
6
37
26.333333
0.907801
0.164557
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
4122c52cf96b86b2173d583518f73d5bfdbfd71b
80
py
Python
python/evalrescallers/tasks/version.py
martinghunt/tb-amr-benchmarking
276f4f7f30639dacc62b3e8e395b2d2ce8675089
[ "MIT" ]
6
2018-11-09T14:43:19.000Z
2020-04-12T02:13:18.000Z
python/evalrescallers/tasks/version.py
martinghunt/tb-amr-benchmarking
276f4f7f30639dacc62b3e8e395b2d2ce8675089
[ "MIT" ]
null
null
null
python/evalrescallers/tasks/version.py
martinghunt/tb-amr-benchmarking
276f4f7f30639dacc62b3e8e395b2d2ce8675089
[ "MIT" ]
1
2020-06-25T05:59:39.000Z
2020-06-25T05:59:39.000Z
import evalrescallers def run(options): print(evalrescallers.__version__)
13.333333
37
0.7875
8
80
7.375
0.875
0
0
0
0
0
0
0
0
0
0
0
0.1375
80
5
38
16
0.855072
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
41257120db446692932281ca43b78b0e1253f7f2
37
py
Python
app/modules/employees/__init__.py
gurgy11/caffeinated
278d09a88162d12409f0af445797b9790a319528
[ "MIT" ]
1
2022-02-14T01:02:15.000Z
2022-02-14T01:02:15.000Z
app/modules/employees/__init__.py
gurgy11/caffeinated
278d09a88162d12409f0af445797b9790a319528
[ "MIT" ]
null
null
null
app/modules/employees/__init__.py
gurgy11/caffeinated
278d09a88162d12409f0af445797b9790a319528
[ "MIT" ]
null
null
null
from .employees_model import Employee
37
37
0.891892
5
37
6.4
1
0
0
0
0
0
0
0
0
0
0
0
0.081081
37
1
37
37
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f5c503eceec02f71e876f85755017266dd295a3d
77
py
Python
thunder/registration/__init__.py
pearsonlab/thunder
b15ba0a38642312d597a98643cf3514e2d46b69d
[ "Apache-2.0" ]
1
2017-02-02T19:14:42.000Z
2017-02-02T19:14:42.000Z
thunder/registration/__init__.py
pearsonlab/thunder
b15ba0a38642312d597a98643cf3514e2d46b69d
[ "Apache-2.0" ]
null
null
null
thunder/registration/__init__.py
pearsonlab/thunder
b15ba0a38642312d597a98643cf3514e2d46b69d
[ "Apache-2.0" ]
null
null
null
from thunder.registration.methods.crosscorr import CrossCorr, PlanarCrossCorr
77
77
0.896104
8
77
8.625
0.875
0
0
0
0
0
0
0
0
0
0
0
0.051948
77
1
77
77
0.945205
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f5e97ca575b2092372aba8d8daf5e2eb90d76427
54
py
Python
tests/io/__init__.py
ziatdinovmax/pyNSID
a7cbf2b62ad657b14d2342e694ca428b4e7c9c2f
[ "MIT" ]
1
2020-05-25T17:14:40.000Z
2020-05-25T17:14:40.000Z
tests/io/__init__.py
ziatdinovmax/pyNSID
a7cbf2b62ad657b14d2342e694ca428b4e7c9c2f
[ "MIT" ]
34
2020-06-05T20:19:02.000Z
2021-10-15T21:31:12.000Z
tests/io/__init__.py
ziatdinovmax/pyNSID
a7cbf2b62ad657b14d2342e694ca428b4e7c9c2f
[ "MIT" ]
3
2020-05-22T20:35:24.000Z
2020-09-11T19:22:41.000Z
from . import test_hdf_io __all__ = ['test_hdf_io.py']
27
28
0.759259
10
54
3.3
0.7
0.424242
0.545455
0
0
0
0
0
0
0
0
0
0.111111
54
2
28
27
0.6875
0
0
0
0
0
0.254545
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
f5ff0f6a6fb260b2aa9f9b37c103f4529e09e90b
168
py
Python
core/test.py
chenwangwww/ppython
13a2f1193714133701743bfdf1a8add61a29dd4c
[ "Apache-2.0" ]
null
null
null
core/test.py
chenwangwww/ppython
13a2f1193714133701743bfdf1a8add61a29dd4c
[ "Apache-2.0" ]
null
null
null
core/test.py
chenwangwww/ppython
13a2f1193714133701743bfdf1a8add61a29dd4c
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- def go() : return {"id":10, "sequence":"chen"} def go2(a,b,c): return str({"id":b, "sequence":"chen"}) print('chen')
16.8
40
0.565476
27
168
3.518519
0.740741
0.252632
0
0
0
0
0
0
0
0
0
0.034483
0.136905
168
10
41
16.8
0.62069
0.255952
0
0
0
0
0.258065
0
0
0
0
0
0
1
0.4
false
0
0
0.4
0.8
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
f5ffe9ffa1665d578ea60df79eae8cf6f81fa82c
108
py
Python
sklearn/tutorials/test.py
Li-Michael/learn
571e1a2d45105ff370720fe64f1d1cca4ff63358
[ "MIT" ]
null
null
null
sklearn/tutorials/test.py
Li-Michael/learn
571e1a2d45105ff370720fe64f1d1cca4ff63358
[ "MIT" ]
null
null
null
sklearn/tutorials/test.py
Li-Michael/learn
571e1a2d45105ff370720fe64f1d1cca4ff63358
[ "MIT" ]
null
null
null
#message = "Hello how are you?" #for word in message.split(): # print(word) import sys print(sys.argv)
13.5
31
0.666667
17
108
4.235294
0.764706
0
0
0
0
0
0
0
0
0
0
0
0.185185
108
7
32
15.428571
0.818182
0.675926
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
eb0317ebf0d35d981ed818b7249ded3868f9e92a
266
py
Python
app/platforms/python/__init__.py
toptal/license-cop
84f3dbf7b3632d761e423b182ce0d9927b885f41
[ "MIT" ]
24
2017-11-21T18:30:19.000Z
2021-11-08T10:52:48.000Z
app/platforms/python/__init__.py
toptal/license-cop
84f3dbf7b3632d761e423b182ce0d9927b885f41
[ "MIT" ]
27
2017-11-22T15:50:56.000Z
2021-09-30T09:03:21.000Z
app/platforms/python/__init__.py
toptal/license-cop
84f3dbf7b3632d761e423b182ce0d9927b885f41
[ "MIT" ]
5
2017-11-21T14:08:21.000Z
2021-04-07T19:30:09.000Z
from app.platforms.python.repository_matcher import PythonRepositoryMatcher from app.platforms.python.package_registry import PythonPackageRegistry from app.platform import Platform INSTANCE = Platform('Python', PythonRepositoryMatcher(), PythonPackageRegistry())
38
81
0.860902
26
266
8.730769
0.5
0.092511
0.140969
0.193833
0
0
0
0
0
0
0
0
0.071429
266
6
82
44.333333
0.919028
0
0
0
0
0
0.022556
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
eb3c950e69ca126f1113d35e3fd8fa8f00312366
43
py
Python
domain/exception.py
pdaw/spaced-repetition
44cc9f95745173baa469fba495fef568ef9dfd4e
[ "Apache-2.0" ]
2
2019-08-19T06:57:46.000Z
2021-06-02T06:10:24.000Z
domain/exception.py
pdaw/spaced-repetition
44cc9f95745173baa469fba495fef568ef9dfd4e
[ "Apache-2.0" ]
1
2019-09-26T11:20:50.000Z
2019-09-26T11:20:50.000Z
domain/exception.py
pdaw/spaced-repetition
44cc9f95745173baa469fba495fef568ef9dfd4e
[ "Apache-2.0" ]
1
2019-09-24T07:42:21.000Z
2019-09-24T07:42:21.000Z
class DomainException(Exception): pass
14.333333
33
0.767442
4
43
8.25
1
0
0
0
0
0
0
0
0
0
0
0
0.162791
43
2
34
21.5
0.916667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
de449a2d48674eff73d8b2b418a015f3e2460c6a
277
py
Python
django_query_profiler/django/contrib/gis/db/backends/postgis/base.py
sonej/django-query-profiler
4afe3694ded26d7ba0b435f5666e990b668d85b5
[ "BSD-3-Clause" ]
97
2020-03-03T01:20:35.000Z
2022-03-23T14:06:09.000Z
django_query_profiler/django/contrib/gis/db/backends/postgis/base.py
sonej/django-query-profiler
4afe3694ded26d7ba0b435f5666e990b668d85b5
[ "BSD-3-Clause" ]
24
2020-03-06T17:35:08.000Z
2022-02-09T20:06:05.000Z
django_query_profiler/django/contrib/gis/db/backends/postgis/base.py
sonej/django-query-profiler
4afe3694ded26d7ba0b435f5666e990b668d85b5
[ "BSD-3-Clause" ]
9
2020-03-22T18:17:09.000Z
2022-01-31T18:59:11.000Z
import django.contrib.gis.db.backends.postgis.base as postgis_base from django_query_profiler.django.db.backends.database_wrapper_mixin import QueryProfilerDatabaseWrapperMixin class DatabaseWrapper(postgis_base.DatabaseWrapper, QueryProfilerDatabaseWrapperMixin): pass
34.625
109
0.877256
30
277
7.9
0.633333
0.139241
0
0
0
0
0
0
0
0
0
0
0.068592
277
7
110
39.571429
0.918605
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
de4bd14f4c08374a6841e5200a6f615569901822
29
py
Python
python/uw/pulsar/__init__.py
coclar/pointlike
7088724b5a40cf787371aff69e64c9bec701f578
[ "BSD-3-Clause" ]
1
2019-03-19T14:45:28.000Z
2019-03-19T14:45:28.000Z
python/uw/pulsar/__init__.py
coclar/pointlike
7088724b5a40cf787371aff69e64c9bec701f578
[ "BSD-3-Clause" ]
1
2019-03-05T17:30:52.000Z
2019-03-05T18:12:15.000Z
python/uw/pulsar/__init__.py
coclar/pointlike
7088724b5a40cf787371aff69e64c9bec701f578
[ "BSD-3-Clause" ]
3
2018-03-14T15:34:07.000Z
2021-11-05T15:29:32.000Z
# init file for pulsar module
29
29
0.793103
5
29
4.6
1
0
0
0
0
0
0
0
0
0
0
0
0.172414
29
1
29
29
0.958333
0.931034
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
de736791a2c769db3c303f56b8163722fe2c42b6
151
py
Python
packages/dcos-history/extra/history/server.py
nkhanal0/dcos
fe0571b6519c86b6c33db4af42c63ab3e9087dcf
[ "Apache-2.0" ]
3
2017-02-05T06:58:28.000Z
2017-05-12T07:28:53.000Z
packages/dcos-history/extra/history/server.py
nkhanal0/dcos
fe0571b6519c86b6c33db4af42c63ab3e9087dcf
[ "Apache-2.0" ]
720
2017-02-08T04:04:19.000Z
2021-09-14T14:04:56.000Z
packages/dcos-history/extra/history/server.py
nkhanal0/dcos
fe0571b6519c86b6c33db4af42c63ab3e9087dcf
[ "Apache-2.0" ]
14
2017-02-08T03:57:24.000Z
2019-10-28T12:14:49.000Z
import os import history.server_util app = history.server_util.create_app() def start(): os.system("gunicorn --bind 0.0.0.0:15055 server:app")
15.1
57
0.721854
25
151
4.24
0.56
0.056604
0.320755
0
0
0
0
0
0
0
0
0.069231
0.139073
151
9
58
16.777778
0.746154
0
0
0
0
0
0.264901
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
de8280b9ef27ae41bf73cc1eb75530ecd6d2eb5a
91
py
Python
enthought/mayavi/modules/surface.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/mayavi/modules/surface.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/mayavi/modules/surface.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from __future__ import absolute_import from mayavi.modules.surface import *
22.75
38
0.835165
12
91
5.916667
0.75
0
0
0
0
0
0
0
0
0
0
0
0.120879
91
3
39
30.333333
0.8875
0.131868
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
deafd5afe49753eff9241379d671bd36f4e8c79f
7,943
py
Python
util/parse_result.py
Abel-Huang/simple-image-classifier
89d2822c2b06cdec728f734d43d9638f4b601348
[ "MIT" ]
4
2017-05-17T08:01:38.000Z
2018-07-22T11:13:55.000Z
util/parse_result.py
Abel-Huang/ImageClassifier
89d2822c2b06cdec728f734d43d9638f4b601348
[ "MIT" ]
null
null
null
util/parse_result.py
Abel-Huang/ImageClassifier
89d2822c2b06cdec728f734d43d9638f4b601348
[ "MIT" ]
null
null
null
import datetime # 这个文件用来处理数据库返回的数据<class 'list'> list的元素是dict test_list =[{'mlmethod': 'svc', 'total': 52, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 12, 53), 'unitag': '1493133167337', 'id': 1, 'correct': 49, 'classify': 'glass'}, {'mlmethod': 'svc', 'total': 119, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 3), 'unitag': '1493133167337', 'id': 2, 'correct': 105, 'classify': 'car'}, {'mlmethod': 'svc', 'total': 44, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 7), 'unitag': '1493133167337', 'id': 3, 'correct': 39, 'classify': 'gun'}, {'mlmethod': 'svc', 'total': 63, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 13), 'unitag': '1493133167337', 'id': 4, 'correct': 56, 'classify': 'flowers'}, {'mlmethod': 'svc', 'total': 131, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 25), 'unitag': '1493133167337', 'id': 5, 'correct': 123, 'classify': 'worldcup'}, {'mlmethod': 'svc', 'total': 78, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 32), 'unitag': '1493133167337', 'id': 6, 'correct': 68, 'classify': 'fruits'}, {'mlmethod': 'svc', 'total': 59, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 37), 'unitag': '1493133167337', 'id': 7, 'correct': 57, 'classify': 'city'}, {'mlmethod': 'svc', 'total': 49, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 41), 'unitag': '1493133167337', 'id': 8, 'correct': 48, 'classify': 'dog'}, {'mlmethod': 'svc', 'total': 54, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 46), 'unitag': '1493133167337', 'id': 9, 'correct': 46, 'classify': 'fireworks'}, {'mlmethod': 'svc', 'total': 24, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 48), 'unitag': '1493133167337', 'id': 10, 'correct': 24, 'classify': 'earth'}, {'mlmethod': 'svc', 'total': 78, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 54), 'unitag': '1493133167337', 'id': 11, 'correct': 73, 'classify': 'sky'}, {'mlmethod': 'svc', 'total': 44, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 13, 59), 'unitag': '1493133167337', 'id': 12, 'correct': 40, 'classify': 'gold'}, {'mlmethod': 'svc', 'total': 102, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 14, 6), 'unitag': '1493133167337', 'id': 13, 'correct': 74, 'classify': 'plane'}, {'mlmethod': 'svc', 'total': 897, 'feamethod': 'sift', 'created': datetime.datetime(2017, 4, 25, 23, 14, 6), 'unitag': '1493133167337', 'id': 14, 'correct': 802, 'classify': 'total'}] # 这个函数用于解析采用不同核函数的数据 def parse_ml_result(result_list): svc_list=[] rbf_list=[] poly_list=[] liner_list=[] name = ('car', 'city', 'dog', 'fireworks', 'flowers', 'fruits', 'glass', 'gold', 'gun', 'plane', 'sky', 'worldcup') llabel = ('svc', 'rbf_svc', 'poly_svc', 'lin_svc') # 这里的每一个元素都是一条数据 for r_dict in result_list: if r_dict['mlmethod']==llabel[0]: for item in name: if item==r_dict['classify']: svc_list.insert(name.index(item),r_dict['correct']) break else: continue elif r_dict['mlmethod']==llabel[1]: for item in name: if item==r_dict['classify']: rbf_list.insert(name.index(item),r_dict['correct']) break else: continue elif r_dict['mlmethod'] == llabel[2]: for item in name: if item == r_dict['classify']: poly_list.insert(name.index(item), r_dict['correct']) else: continue elif r_dict['mlmethod'] == llabel[3]: for item in name: if item == r_dict['classify']: liner_list.insert(name.index(item), r_dict['correct']) else: continue return svc_list, rbf_list, poly_list, liner_list, name, llabel svc_list, rbf_list, poly_list, liner_list, _name, _llabel=parse_ml_result(test_list) print(svc_list) print(rbf_list) print(poly_list) print(liner_list) # 这个函数用于解析采用不同特征提取的数据 def parse_fea_result(result_list): sift_list = [] surf_list = [] orb_list = [] brisk_list = [] name = ('car', 'city', 'dog', 'fireworks', 'flowers', 'fruits', 'glass', 'gold', 'gun', 'plane', 'sky', 'worldcup') llabel = ('sift', 'surf', 'orb', 'brisk') # 这里的每一个元素都是一条数据 for r_dict in result_list: if r_dict['mlmethod'] == llabel[0]: for item in name: if item == r_dict['classify']: svc_list.insert(name.index(item), r_dict['correct']) elif r_dict['mlmethod'] == llabel[1]: for item in name: if item == r_dict['classify']: rbf_list.insert(name.index(item), r_dict['correct']) elif r_dict['mlmethod'] == llabel[2]: for item in name: if item == r_dict['classify']: poly_list.insert(name.index(item), r_dict['correct']) elif r_dict['mlmethod'] == llabel[3]: for item in name: if item == r_dict['classify']: liner_list.insert(name.index(item), r_dict['correct']) return svc_list, rbf_list, poly_list, liner_list, name, llabel # 用于解析summary表 def parse_summary(summary_list): sift_list=[] surf_list=[] orb_list=[] brisk_list=[] name = ('sift', 'surf', 'orb', 'brisk') llabel = ('svc', 'rbf', 'poly', 'lin') # 这里的每一个元素都是一条数据 for r_dict in summary_list: if r_dict['mlmethod']==llabel[0] and r_dict['classify']=='total': if r_dict['feamethod']==name[0]: sift_list.insert(0,int(r_dict['correct']/586*100)) elif r_dict['feamethod']==name[1]: surf_list.insert(0,int(r_dict['correct']/586*100)) elif r_dict['feamethod'] == name[2]: orb_list.insert(0, int(r_dict['correct']/586*100)) elif r_dict['feamethod']==name[3]: brisk_list.insert(0,int(r_dict['correct']/586*100)) elif r_dict['mlmethod']==llabel[1] and r_dict['classify']=='total': if r_dict['feamethod']==name[0]: sift_list.insert(1,int(r_dict['correct']/586*100)) elif r_dict['feamethod']==name[1]: surf_list.insert(1,int(r_dict['correct']/586*100)) elif r_dict['feamethod'] == name[2]: orb_list.insert(1, int(r_dict['correct']/586*100)) elif r_dict['feamethod']==name[3]: brisk_list.insert(1,int(r_dict['correct']/586*100)) elif r_dict['mlmethod'] == llabel[2] and r_dict['classify']=='total': if r_dict['feamethod'] == name[0]: sift_list.insert(2, int(r_dict['correct']/586*100)) elif r_dict['feamethod'] == name[1]: surf_list.insert(2, int(r_dict['correct']/586*100)) elif r_dict['feamethod'] == name[2]: orb_list.insert(2, int(r_dict['correct']/586*100)) elif r_dict['feamethod'] == name[3]: brisk_list.insert(2, int(r_dict['correct']/586*100)) elif r_dict['mlmethod'] == llabel[3] and r_dict['classify']=='total': if r_dict['feamethod'] == name[0]: sift_list.insert(3, int(r_dict['correct']/586*100)) elif r_dict['feamethod'] == name[1]: surf_list.insert(3, int(r_dict['correct']/586*100)) elif r_dict['feamethod'] == name[2]: orb_list.insert(3, int(r_dict['correct']/586*100)) elif r_dict['feamethod'] == name[3]: brisk_list.insert(3, int(r_dict['correct']/586*100)) return sift_list, surf_list, orb_list, brisk_list, name, llabel
54.40411
193
0.565026
1,006
7,943
4.319085
0.11332
0.0771
0.066283
0.066283
0.739241
0.733717
0.733026
0.727043
0.727043
0.697123
0
0.096294
0.239078
7,943
146
194
54.40411
0.622601
0.017626
0
0.582677
0
0
0.229861
0
0
0
0
0
0
1
0.023622
false
0
0.007874
0
0.055118
0.031496
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
deb0df44011592e9e3ce309d636b77d96dd90b38
112
py
Python
hnerror.py
sandeepbhat/shna
556f6948b12fee933f51583e3b82daf9882937fb
[ "MIT" ]
null
null
null
hnerror.py
sandeepbhat/shna
556f6948b12fee933f51583e3b82daf9882937fb
[ "MIT" ]
null
null
null
hnerror.py
sandeepbhat/shna
556f6948b12fee933f51583e3b82daf9882937fb
[ "MIT" ]
null
null
null
"""Custom shna errors.""" class ShnaError(Exception): """Custom shna exception class.""" pass
14
39
0.598214
11
112
6.090909
0.636364
0.298507
0
0
0
0
0
0
0
0
0
0
0.25
112
7
40
16
0.797619
0.428571
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
deb4e3cb77d4313270763d4d779a67f7641f89dc
148
py
Python
docs/source/Text Editor/Sublime Text/config_python_interpreter_example.py
MacHu-GWU/Dev-Exp-Share
4215d3872e5b2b26c3a37301d0dbe39c2bfecaea
[ "MIT" ]
2
2021-07-23T03:03:43.000Z
2021-10-04T12:03:54.000Z
docs/source/Text Editor/Sublime Text/config_python_interpreter_example.py
MacHu-GWU/Dev-Exp-Share
4215d3872e5b2b26c3a37301d0dbe39c2bfecaea
[ "MIT" ]
3
2021-09-23T23:32:14.000Z
2022-03-30T16:35:27.000Z
docs/source/Text Editor/Sublime Text/config_python_interpreter_example.py
MacHu-GWU/Dev-Exp-Share
4215d3872e5b2b26c3a37301d0dbe39c2bfecaea
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import sys if __name__ == "__main__": print(sys.version_info)
18.5
37
0.716216
21
148
4.380952
0.857143
0
0
0
0
0
0
0
0
0
0
0.007813
0.135135
148
8
38
18.5
0.710938
0.283784
0
0
0
0
0.07619
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
defefce6c0d794422d0c5ff60e3c2392006ace4a
645
py
Python
service_objects/errors.py
jackton1/django-service-objects
cdcaedb64154b949ab6c5e5de60b4f9835f1cc98
[ "MIT" ]
328
2017-08-13T19:09:31.000Z
2022-03-30T09:02:35.000Z
service_objects/errors.py
jackton1/django-service-objects
cdcaedb64154b949ab6c5e5de60b4f9835f1cc98
[ "MIT" ]
50
2017-08-17T02:31:49.000Z
2022-02-23T22:45:13.000Z
service_objects/errors.py
jackton1/django-service-objects
cdcaedb64154b949ab6c5e5de60b4f9835f1cc98
[ "MIT" ]
32
2017-08-15T03:29:53.000Z
2022-01-24T22:18:05.000Z
class InvalidInputsError(Exception): """ Raised during :class:`Service`'s :meth:`service_clean` method. Encapsulates both field_errors and non_field_errors into a single entity. :param dictionary errors: :class:`Services`'s ``errors`` dictionary :param dictionary non_field_errors: :class:`Service`'s ``non_field_errors`` dictionary """ def __init__(self, errors, non_field_errors): self.errors = errors self.non_field_errors = non_field_errors def __repr__(self): return '{}({}, {})'.format( type(self).__name__, repr(self.errors), repr(self.non_field_errors))
33.947368
80
0.674419
77
645
5.285714
0.402597
0.216216
0.240786
0.09828
0
0
0
0
0
0
0
0
0.203101
645
18
81
35.833333
0.791829
0.460465
0
0
0
0
0.032051
0
0
0
0
0
0
1
0.285714
false
0
0
0.142857
0.571429
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
720c991b81a10dde92bc1b5040ad766040248aa6
36,836
py
Python
model.py
thanard/dorp
3e635699018365696fb7d623cf1c519121fafa69
[ "MIT" ]
null
null
null
model.py
thanard/dorp
3e635699018365696fb7d623cf1c519121fafa69
[ "MIT" ]
null
null
null
model.py
thanard/dorp
3e635699018365696fb7d623cf1c519121fafa69
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import torch.nn.functional as F from utils.gen_utils import * from utils.dataset import * class CPC(nn.Module): def __init__(self, encoder, z_dim, batch_size, in_channels=3, temp=1, mode='single_encoder', input_dim=0, W_form=0, num_filters=32, num_onehots=2, separate_W=0, num_layers=3, normalization=None, n_key_onehots=1, # only used for key encoders n_agent_onehots=1, # only used for key encoders alpha=1, # tried 10, 100 ): super(CPC, self).__init__() self.num_onehots = num_onehots if encoder == 'cnn': self.encoder = FactoredEncoder(input_dim, in_channels=in_channels, out_onehots=num_onehots, z_dim=z_dim, num_filters=num_filters, mode=mode, temp=temp) elif encoder == 'cswm': self.encoder = CSWM(input_dim, in_channels=in_channels, num_filters=num_filters, z_dim=z_dim, out_onehots=num_onehots, mode=mode, temp=temp, num_layers=num_layers, normalization=normalization) # circular_padding=True, # downsampling_by=4, elif encoder == 'cswm-scaled-down': conv_seq = ['same']*num_layers conv_seq.append('half') conv_seq.append('half') conv_seq = '-'.join(conv_seq) self.encoder = CSWMCircular(input_dim, in_channels=in_channels, num_filters=num_filters, z_dim=z_dim, out_onehots=num_onehots, mode=mode, temp=temp, num_layers=num_layers, normalization=normalization, conv_seq=conv_seq) elif encoder == 'cswm-scaled-down-first': conv_seq = ['same']*num_layers conv_seq.insert(0, 'half') conv_seq.insert(0, 'half') conv_seq = '-'.join(conv_seq) self.encoder = CSWMCircular(input_dim, in_channels=in_channels, num_filters=num_filters, z_dim=z_dim, out_onehots=num_onehots, mode=mode, temp=temp, num_layers=num_layers, normalization=normalization, conv_seq=conv_seq) elif encoder == 'cswm-gt': self.encoder = CSWM(input_dim, in_channels=in_channels, num_filters=num_filters, z_dim=z_dim, out_onehots=num_onehots, mode=mode, temp=temp, num_layers=num_layers, normalization=normalization, gt_extractor=True) elif encoder == 'cswm-key': self.num_onehots = n_key_onehots + n_agent_onehots self.encoder = CSWMKey(input_dim, in_channels=in_channels, num_filters=num_filters, z_dim=z_dim, out_key_onehots=n_key_onehots, out_agent_onehots=n_agent_onehots, mode=mode, temp=temp, num_layers=num_layers, normalization=normalization) elif encoder == 'cswm-key-v2': self.num_onehots = n_key_onehots + n_agent_onehots self.encoder = CSWMKeyV2(input_dim, in_channels=in_channels, num_filters=num_filters, z_dim=z_dim, out_key_onehots=n_key_onehots, out_agent_onehots=n_agent_onehots, mode=mode, temp=temp, num_layers=num_layers, normalization=normalization) else: raise NotImplementedError("Encoder not recognized: {}".format(encoder)) self.encoder_type = encoder self.num_layers = num_layers self.alpha = alpha self.encoder_form = encoder self.batch_size = batch_size self.mode = mode self.z_dim = z_dim self.W_form = W_form self.separate_W = separate_W self.k = nn.Parameter(torch.tensor(1.)) self.W = nn.Parameter(torch.rand(z_dim * self.num_onehots, z_dim * self.num_onehots)) self.I = torch.eye(z_dim * self.num_onehots).cuda() def encode(self, x, vis=False, continuous=False): if vis: res = self.encoder.vis(x) return res else: x = self.encoder(x, continuous) return x def get_W(self): if self.W_form == 0: # random W matrix W = self.W elif self.W_form == 1: # identity matrix W = self.I elif self.W_form == 3: if self.separate_W: base_submatrix = 2 * torch.eye(self.z_dim) - torch.ones(self.z_dim, self.z_dim) base = torch.zeros(self.z_dim*self.num_onehots, self.z_dim*self.num_onehots) for i in range(self.num_onehots): base[self.z_dim*i:self.z_dim*i+self.z_dim, self.z_dim*i:self.z_dim*i+self.z_dim] = base_submatrix else: base = 2 * torch.eye(self.z_dim * self.num_onehots) - torch.ones(self.z_dim * self.num_onehots, self.z_dim * self.num_onehots) W = torch.exp(self.k) * ((torch.sigmoid(self.W) + self.alpha*self.I) * base.cuda()) else: raise NotImplementedError("W form %d not used" % self.W_form) return W def log_density(self, x_next, z): assert x_next.size(0) == z.size(0) # batch sizes must match if self.mode == 'double_encoder': z_next = self.encode(x_next, continuous=True) elif self.mode == 'single_encoder' or self.mode == 'continuous': z_next = self.encode(x_next) else: raise NotImplementedError("Mode not recognized: {}".format(self.mode)) z_next = z_next.view(z_next.size(0), -1) z = z.view(z.size(0), -1) z = z.unsqueeze(2) # bs x z_dim x 1 z_next = z_next.unsqueeze(2) W = self.get_W() w = W.repeat(z.size(0), 1, 1) f_out = torch.bmm(torch.bmm(z_next.permute(0, 2, 1), w), z) f_out = f_out.squeeze() return f_out def compute_logits(self, z_a, z_pos, ce_temp=1.): """ Uses logits trick from CURL: - compute (B,B) matrix z_a (W z_pos.T) - positives are all diagonal elements - negatives are all other elements - to compute loss use multiclass cross entropy with identity matrix for labels """ assert z_a.size(0) == z_pos.size(0) z_pos = z_pos.reshape(z_pos.size(0), -1) z_a = z_a.reshape(z_a.size(0), -1) W = self.get_W() Wz = torch.matmul(W, z_pos.T) # (z_dim,B) logits = torch.matmul(z_a, Wz) # (B,B) logits = logits - torch.max(logits, 1)[0][:, None] return logits/ce_temp def energy(self, state, next_state, sigma=.5): """Energy function based on normalized squared L2 norm.""" norm = 0.5 / (sigma ** 2) diff = state - next_state return norm * diff.pow(2).sum(2).mean(1) def forward(self, *input): return self.log_density(*input) class FactoredEncoder(nn.Module): def __init__(self, input_dim, out_onehots=2, in_channels=2, z_dim=8, num_filters=32, mode='single_encoder', temp=1): super(FactoredEncoder, self).__init__() self.z_dim = z_dim self.num_filters = num_filters self.temp = temp self.mode = mode self.input_dim = input_dim self.out_onehots = out_onehots self._conv_1 = nn.Conv2d(in_channels=in_channels, out_channels=num_filters, kernel_size=3, stride=1) self._conv_2 = nn.Conv2d(in_channels=num_filters, out_channels=num_filters, kernel_size=1, stride=1) self._conv_3 = nn.Conv2d(in_channels=num_filters, out_channels=num_filters, kernel_size=1, stride=1) self.h_dim = input_dim - 2 self.fc = nn.Linear(num_filters * self.h_dim * self.h_dim, z_dim * out_onehots) self.ln = nn.LayerNorm(z_dim) def vis(self, inputs): x = inputs x = F.relu(self._conv_1(x)) x = F.relu(self._conv_2(x)) x = F.relu(self._conv_3(x)) x = x.reshape(-1, self.num_filters * self.h_dim * self.h_dim) x = self.fc(x) x = x.view(-1, self.out_onehots, self.z_dim) x = self.ln(x) x = torch.argmax(x, dim=2) return x def forward(self, inputs, continuous=False): x = inputs x = F.relu(self._conv_1(x)) x = F.relu(self._conv_2(x)) x = F.relu(self._conv_3(x)) x = x.reshape(-1, self.num_filters * self.h_dim * self.h_dim) x = self.fc(x) x = x.view(-1, self.out_onehots, self.z_dim) if self.mode == 'continuous': return x elif self.mode == 'single_encoder': x = self.ln(x) x = F.gumbel_softmax(x, tau=self.temp, hard=True) return x elif self.mode == 'double_encoder': if continuous: x = F.softmax(x, dim=2) return x else: x = self.ln(x) x = F.gumbel_softmax(x, tau=self.temp, hard=True) return x return x class CircularConv2d(nn.Module): def __init__(self, size, in_channels, out_channels, circular_padding=False): super(CircularConv2d, self).__init__() self.circular_padding = circular_padding if size == 'same': self.layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=0) if circular_padding \ else nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1) elif size == 'half': self.layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=2, padding=0) if circular_padding \ else nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=4, stride=2, padding=1) def pad_circular(self, x, pad): """ :param x: shape [H, W] :param pad: int >= 0 :return: """ x = torch.cat([x, x[:, :, 0:pad]], dim=2) x = torch.cat([x, x[:, :, :, 0:pad]], dim=3) x = torch.cat([x[:, :, -2 * pad:-pad], x], dim=2) x = torch.cat([x[:, :, :, -2 * pad:-pad], x], dim=3) return x def forward(self, x): if self.circular_padding: return self.layer(self.pad_circular(x, 1)) return self.layer(x) class CSWM(nn.Module): def __init__(self, input_dim, out_onehots, in_channels, z_dim, num_filters, mode='single_encoder', temp=1, num_layers=3, normalization="none", gt_extractor=False): super(CSWM, self).__init__() self.z_dim = z_dim self.num_filters = num_filters self.temp = temp self.mode = mode self.input_dim = input_dim self.hdim = input_dim self.out_onehots = out_onehots self.in_channels = in_channels self.num_layers = num_layers self.normalization = normalization self.gt_extractor = gt_extractor self.ln = nn.LayerNorm(self.z_dim) # normalize before gumbel softmax # Object Extractor CNN self.object_extractor = nn.ModuleList() self.object_extractor.append(nn.Conv2d(in_channels=self.in_channels, out_channels=num_filters, kernel_size=3, padding=1)) self.object_extractor.append(self.get_norm_layer(normalization)) self.object_extractor.append(nn.ReLU()) for i in range(self.num_layers): self.object_extractor.append(nn.Conv2d(in_channels=num_filters, out_channels=num_filters, kernel_size=3, padding=1)) self.object_extractor.append(self.get_norm_layer(normalization)) self.object_extractor.append(nn.ReLU()) self.object_extractor.append(nn.Conv2d(in_channels=num_filters, out_channels=self.out_onehots, kernel_size=3, padding=1)) self.object_extractor.append(nn.Sigmoid()) # Object Encoder MLP self.num_hiddens = num_filters * 16 self.object_encoder = nn.ModuleList([ nn.Linear(self.hdim * self.hdim, self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.num_hiddens), nn.LayerNorm(self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.z_dim), ]) def get_norm_layer(self, normalization): if normalization == 'batchnorm': return nn.BatchNorm2d(self.num_filters) elif normalization == 'layernorm': return nn.LayerNorm([self.num_filters, self.hdim, self.hdim]) elif normalization == "none": return nn.Identity() else: raise NotImplementedError("normalization type not recognized: %s" % normalization) def expand_input(self, input): return input.repeat_interleave(4, dim=3).repeat_interleave(4, dim=2) def conv_forward(self, inputs): batch_size = inputs.size(0) x = inputs*10 # if inputs.size(2) == 16: # x = self.expand_input(x) if self.gt_extractor: attn_maps = x else: for layer in self.object_extractor: x = layer(x) attn_maps = x x = x.view(batch_size, self.out_onehots, -1) for layer in self.object_encoder: x = layer(x) return x, attn_maps def vis(self, inputs): x, attn_maps = self.conv_forward(inputs) x = torch.argmax(x, dim=2) return x def get_attn_map_reg(self, attn_maps): reg = 0 for m_k in attn_maps: reg += torch.mean(torch.min(m_k**2, (1-m_k)**2)) return -reg def forward(self, inputs, continuous=False): x, attn_maps = self.conv_forward(inputs) reg = self.get_attn_map_reg(attn_maps) if self.mode == 'continuous': return x, reg, attn_maps elif self.mode == 'single_encoder': x = self.ln(x) x = F.gumbel_softmax(x, dim=2, tau=self.temp, hard=True) return x, reg, attn_maps elif self.mode == 'double_encoder': if continuous: x = F.softmax(x, dim=2) return x, reg, attn_maps else: x = self.ln(x) x = F.gumbel_softmax(x, dim=2, tau=self.temp, hard=True) return x, reg, attn_maps else: raise NotImplementedError class CSWMCircular(nn.Module): # more expressive attention module directly on input def __init__(self, input_dim, out_onehots, in_channels, z_dim, num_filters, mode='single_encoder', temp=1, num_layers=2, normalization="none", gt_extractor=False, conv_seq='same', circular_padding=False, downsampling_by=1): super(CSWM, self).__init__() self.z_dim = z_dim self.num_filters = num_filters self.temp = temp self.mode = mode self.input_dim = input_dim self.out_onehots = out_onehots self.in_channels = 3 self.num_layers = num_layers self.normalization = normalization self.gt_extractor = gt_extractor self.conv_seq = conv_seq.split('-') if len(self.conv_seq) == 1: self.conv_seq = self.conv_seq*(2+num_layers) else: assert len(self.conv_seq) == 2+num_layers self.ln = nn.LayerNorm(self.z_dim) # normalize before gumbel softmax self.downsampling_by = downsampling_by # Object Extractor CNN self.object_extractor = nn.ModuleList() if downsampling_by > 1: self.object_extractor.append(nn.AvgPool2d( downsampling_by, downsampling_by )) self.object_extractor.append(CircularConv2d(self.conv_seq[0], self.in_channels, num_filters, circular_padding)) self.object_extractor.append(self.get_norm_layer(normalization)) self.object_extractor.append(nn.ReLU()) for i in range(self.num_layers): self.object_extractor.append(CircularConv2d(self.conv_seq[i+1], num_filters, num_filters, circular_padding)) self.object_extractor.append(self.get_norm_layer(normalization)) self.object_extractor.append(nn.ReLU()) self.object_extractor.append(CircularConv2d(self.conv_seq[-1], num_filters, self.out_onehots, circular_padding)) self.object_extractor.append(nn.Sigmoid()) # Get h_dim self.hdim = self.get_h_dim() # Object Encoder MLP self.num_hiddens = num_filters * 16 self.object_encoder = nn.ModuleList([ nn.Linear(self.hdim * self.hdim, self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.num_hiddens), nn.LayerNorm(self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.z_dim), ]) def get_h_dim(self): size = self.input_dim / self.downsampling_by for conv_size in self.conv_seq: if conv_size == 'half': size /= 2 return int(size) # return self.object_extractor(torch.zeros((1, self.in_channels, self.input_dim, self.input_dim))).shape[-1] def get_norm_layer(self, normalization): if normalization == 'batchnorm': return nn.BatchNorm2d(self.num_filters) elif normalization == 'layernorm': return nn.LayerNorm([self.num_filters, self.hdim, self.hdim]) elif normalization == "none": return nn.Identity() else: raise NotImplementedError("normalization type not recognized: %s" % normalization) return def expand_input(self, input): return input.repeat_interleave(4, dim=3).repeat_interleave(4, dim=2) def conv_forward(self, inputs): batch_size = inputs.size(0) x = inputs * 10 # if inputs.size(2) == 16: # x = self.expand_input(x) if self.gt_extractor: attn_maps = x else: for layer in self.object_extractor: x = layer(x) attn_maps = x x = x.view(batch_size, self.out_onehots, -1) for layer in self.object_encoder: x = layer(x) return x, attn_maps def vis(self, inputs): x, attn_maps = self.conv_forward(inputs) x = torch.argmax(x, dim=2) return x def get_attn_map_reg(self, attn_maps): reg = 0 for m_k in attn_maps: reg += torch.mean(torch.min(m_k**2, (1-m_k)**2)) return -reg def forward(self, inputs, continuous=False): x, attn_maps = self.conv_forward(inputs) reg = self.get_attn_map_reg(attn_maps) if self.mode == 'continuous': return x, reg, attn_maps elif self.mode == 'single_encoder': x = self.ln(x) x = F.gumbel_softmax(x, dim=2, tau=self.temp, hard=True) return x, reg, attn_maps elif self.mode == 'double_encoder': if continuous: x = F.softmax(x, dim=2) return x, reg, attn_maps else: x = self.ln(x) x = F.gumbel_softmax(x, dim=2, tau=self.temp, hard=True) return x, reg, attn_maps else: raise NotImplementedError class CSWMKey(nn.Module): def __init__(self, input_dim, in_channels, z_dim, num_filters=32, out_agent_onehots=1, out_key_onehots=1, mode='single_encoder', temp=1, num_layers=3, normalization="none"): super(CSWMKey, self).__init__() self.z_dim = z_dim self.num_filters = num_filters self.temp = temp self.mode = mode self.input_dim = input_dim self.hdim = input_dim self.out_key_onehots = out_key_onehots self.out_agent_onehots = out_agent_onehots self.in_channels = in_channels self.num_layers = num_layers self.normalization = normalization self.out_onehots = self.out_agent_onehots + self.out_key_onehots self.ln = nn.LayerNorm(self.z_dim) # normalize before gumbel softmax self.ln_k = nn.LayerNorm(2) # for key output # Object Extractor CNN self.object_extractor = nn.ModuleList() self.object_extractor.append(nn.Conv2d(in_channels=self.in_channels, out_channels=num_filters, kernel_size=3, padding=1)) self.object_extractor.append(self.get_norm_layer(normalization)) self.object_extractor.append(nn.ReLU()) for i in range(self.num_layers): self.object_extractor.append(nn.Conv2d(in_channels=num_filters, out_channels=num_filters, kernel_size=3, padding=1)) self.object_extractor.append(self.get_norm_layer(normalization)) self.object_extractor.append(nn.ReLU()) self.object_extractor.append(nn.Conv2d(in_channels=num_filters, out_channels=self.out_agent_onehots+self.out_key_onehots, kernel_size=3, padding=1)) self.object_extractor.append(nn.Sigmoid()) # Object Encoder MLP self.num_hiddens = num_filters * 16 self.object_encoder_agent = nn.ModuleList([ nn.Linear(self.hdim * self.hdim, self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.num_hiddens), nn.LayerNorm(self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.z_dim), ]) self.object_encoder_key = nn.ModuleList([ nn.Linear(self.hdim * self.hdim, self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.num_hiddens), nn.LayerNorm(self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, 2),]) # binary for key def get_norm_layer(self, normalization): if normalization == 'batchnorm': return nn.BatchNorm2d(self.num_filters) elif normalization == 'layernorm': return nn.LayerNorm([self.num_filters, self.hdim, self.hdim]) elif normalization == "none": return nn.Identity() else: raise NotImplementedError("normalization type not recognized: %s" % normalization) def expand_input(self, input): return input.repeat_interleave(4, dim=3).repeat_interleave(4, dim=2) def conv_forward(self, inputs): batch_size = inputs.size(0) x = inputs*10 for layer in self.object_extractor: x = layer(x) attn_maps = x x = x.reshape(batch_size, self.out_agent_onehots+self.out_key_onehots, -1).contiguous() x_a = x[:, :self.out_agent_onehots, :] x_k = x[:, self.out_agent_onehots:, :] for layer in self.object_encoder_agent: x_a = layer(x_a) for layer in self.object_encoder_key: x_k = layer(x_k) return x_a, x_k, attn_maps def vis(self, inputs): x_a, x_k, attn_maps = self.conv_forward(inputs) x_a = torch.argmax(x_a, dim=2) x_k = torch.argmax(x_k, dim=2) x = torch.cat((x_a, x_k), dim=1) # [batch_size, out_onehots, z_dim] return x def forward(self, inputs, continuous=False): batch_size = inputs.size(0) x_a, x_k, attn_maps = self.conv_forward(inputs) _ = None if self.mode == 'continuous': key_padded = torch.zeros((batch_size, self.out_key_onehots, self.z_dim)).cuda() key_padded[:, :, :2] = x_k x = torch.cat((x_a, key_padded), dim=1) # [batch_size, out_onehots, z_dim] return x, _, attn_maps elif self.mode == 'single_encoder': x_a = self.ln(x_a) x_a = F.gumbel_softmax(x_a, dim=2, tau=self.temp, hard=True) x_k = F.softmax(x_k, dim=2) key_padded = torch.zeros_like(x_a) key_padded[:, :, :2] = x_k x = torch.cat((x_a, key_padded), dim=1) # [batch_size, out_onehots, z_dim] return x_a, x_k, attn_maps elif self.mode == 'double_encoder': if continuous: x_a = F.softmax(x_a, dim=2) x_k = F.softmax(x_k, dim=2) key_padded = torch.zeros((batch_size, self.out_key_onehots, self.z_dim)).cuda() key_padded[:, :, :2] = x_k x = torch.cat((x_a, key_padded), dim=1) # [batch_size, out_onehots, z_dim] return x, _, attn_maps else: x_a = self.ln(x_a) x_a = F.gumbel_softmax(x_a, dim=2, tau=self.temp, hard=True) # x_k = self.ln_k(x_k) # x_k = F.gumbel_softmax(x_k, dim=2, tau=self.temp, hard=True) x_k = F.softmax(x_k, dim=2) key_padded = torch.zeros((batch_size, self.out_key_onehots, self.z_dim)).cuda() key_padded[:, :, :2] = x_k x = torch.cat((x_a, key_padded), dim=1) # [batch_size, out_onehots, z_dim] return x, _, attn_maps else: raise NotImplementedError class CSWMKeyV2(nn.Module): ''' Same as CSWMKey but switches the order of channel input to the object encoder ''' def __init__(self, input_dim, in_channels, z_dim, num_filters=32, out_agent_onehots=1, out_key_onehots=1, mode='single_encoder', temp=1, num_layers=3, normalization="none", scope=0): super(CSWMKeyV2, self).__init__() self.z_dim = z_dim self.num_filters = num_filters self.temp = temp self.mode = mode self.input_dim = input_dim self.hdim = input_dim self.out_key_onehots = out_key_onehots self.out_agent_onehots = out_agent_onehots self.scope = scope self.in_channels = in_channels self.num_layers = num_layers self.normalization = normalization self.out_onehots = self.out_agent_onehots + self.out_key_onehots self.ln = nn.LayerNorm(self.z_dim) # normalize before gumbel softmax self.ln_k = nn.LayerNorm(2) # for key output # Object Extractor CNN self.object_extractor = nn.ModuleList() self.object_extractor.append(nn.Conv2d(in_channels=self.in_channels, out_channels=num_filters, kernel_size=3, padding=1)) self.object_extractor.append(self.get_norm_layer(normalization)) self.object_extractor.append(nn.ReLU()) for i in range(self.num_layers): self.object_extractor.append(nn.Conv2d(in_channels=num_filters, out_channels=num_filters, kernel_size=3, padding=1)) self.object_extractor.append(self.get_norm_layer(normalization)) self.object_extractor.append(nn.ReLU()) self.object_extractor.append(nn.Conv2d(in_channels=num_filters, out_channels=self.out_agent_onehots+self.out_key_onehots, kernel_size=3, padding=1)) self.object_extractor.append(nn.Sigmoid()) # Object Encoder MLP self.num_hiddens = num_filters * 16 self.object_encoder_agent = nn.ModuleList([ nn.Linear(self.hdim * self.hdim, self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.num_hiddens), nn.LayerNorm(self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.z_dim), ]) self.object_encoder_key = nn.ModuleList([ nn.Linear(self.hdim * self.hdim, self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, self.num_hiddens), nn.LayerNorm(self.num_hiddens), nn.ReLU(), nn.Linear(self.num_hiddens, 2),]) # binary for key def get_norm_layer(self, normalization): if normalization == 'batchnorm': return nn.BatchNorm2d(self.num_filters) elif normalization == 'layernorm': return nn.LayerNorm([self.num_filters, self.hdim, self.hdim]) elif normalization == "none": return nn.Identity() else: raise NotImplementedError("normalization type not recognized: %s" % normalization) def expand_input(self, input): return input.repeat_interleave(4, dim=3).repeat_interleave(4, dim=2) def conv_forward(self, inputs): batch_size = inputs.size(0) x = inputs*10 for layer in self.object_extractor: x = layer(x) attn_maps = x x = x.reshape(batch_size, self.out_agent_onehots+self.out_key_onehots, -1).contiguous() x_k = x[:, :self.out_key_onehots, :] x_a = x[:, self.out_key_onehots:, :] for layer in self.object_encoder_agent: x_a = layer(x_a) for layer in self.object_encoder_key: x_k = layer(x_k) return x_a, x_k, attn_maps def vis(self, inputs): x_a, x_k, attn_maps = self.conv_forward(inputs) x_a = torch.argmax(x_a, dim=2) x_k = torch.argmax(x_k, dim=2) x = torch.cat((x_a, x_k), dim=1) # [batch_size, out_onehots, z_dim] return x def forward(self, inputs, continuous=False): batch_size = inputs.size(0) x_a, x_k, attn_maps = self.conv_forward(inputs) _ = None if self.mode == 'continuous': key_padded = torch.zeros((batch_size, self.out_key_onehots, self.z_dim)).cuda() key_padded[:, :, :2] = x_k x = torch.cat((x_a, key_padded), dim=1) # [batch_size, out_onehots, z_dim] return x, _, attn_maps elif self.mode == 'single_encoder': x_a = self.ln(x_a) x_a = F.gumbel_softmax(x_a, dim=2, tau=self.temp, hard=True) # x_k = self.ln_k(x_k) # x_k = F.gumbel_softmax(x_k, dim=2, tau=self.temp, hard=True) x_k = F.softmax(x_k, dim=2) # key_padded = torch.zeros_like(x_a) # key_padded[:, :, :2] = x_k # x = torch.cat((x_a, key_padded), dim=1) # [batch_size, out_onehots, z_dim] return x_a, x_k, attn_maps elif self.mode == 'double_encoder': if continuous: x_a = F.softmax(x_a, dim=2) x_k = F.softmax(x_k, dim=2) key_padded = torch.zeros((batch_size, self.out_key_onehots, self.z_dim)).cuda() key_padded[:, :, :2] = x_k x = torch.cat((x_a, key_padded), dim=1) # [batch_size, out_onehots, z_dim] return x, _, attn_maps else: x_a = self.ln(x_a) x_a = F.gumbel_softmax(x_a, dim=2, tau=self.temp, hard=True) # x_k = self.ln_k(x_k) # x_k = F.gumbel_softmax(x_k, dim=2, tau=self.temp, hard=True) x_k = F.softmax(x_k, dim=2) key_padded = torch.zeros((batch_size, self.out_key_onehots, self.z_dim)).cuda() key_padded[:, :, :2] = x_k x = torch.cat((x_a, key_padded), dim=1) # [batch_size, out_onehots, z_dim] return x, _, attn_maps else: raise NotImplementedError def get_hinge_loss(model, state, next_state, hinge=1): batch_size = state.size(0) perm = np.random.permutation(batch_size) neg_state = state[perm] pos_loss = model.energy(state, next_state) zeros = torch.zeros_like(pos_loss) pos_loss = pos_loss.mean() neg_loss = torch.max( zeros, hinge - model.energy( state, neg_state)).mean() loss = pos_loss + neg_loss return loss def get_loss(loss_form, model, z_a, z_pos, ce_temp=1.): if loss_form == 'ce': CE = nn.CrossEntropyLoss() logits = model.compute_logits(z_a, z_pos, ce_temp) labels = torch.arange(logits.shape[0]).long().cuda() return CE(logits, labels) elif loss_form == 'hinge': return get_hinge_loss(model, z_a, z_pos) else: raise NotImplementedError("Loss form not recognized: " + loss_form) class init_weights_func(object): def __init__(self, scale_factor=1.): self.scale_factor=scale_factor def __call__(self, m): if type(m) == nn.Linear or type(m) == nn.Conv2d: torch.nn.init.xavier_uniform_(m.weight, self.scale_factor) def get_discrete_representation(model, sample_ims, single=False): ''' Computes and returns forward pass of CPC model for a batch of processed images :param model: CPC model :param sample_ims: batch of input images (any length) :return: np array of z outputs [sample_size, model.z_dim] ''' if single: return model.encode(np_to_var(sample_ims).unsqueeze(0).permute(0, 3, 1, 2), vis=True).squeeze( 0).cpu().numpy() max_batch_size = 64 idx = 0 z_labels = [] while idx < len(sample_ims): zs = model.encode(np_to_var(sample_ims[idx:idx + max_batch_size]).permute(0, 3, 1, 2), vis=True).cpu().numpy() z_labels.append(zs) idx += max_batch_size return np.concatenate(z_labels) def get_hamming_dists_samples(model, buffer): distances = [] for idx in range(len(buffer)): traj = buffer[idx] zs = get_discrete_representation(model, traj) for i in range(len(buffer[idx]) - 1): d = np.sum((zs[i] != zs[i+1])) distances.append(d) distances = np.array(distances) return distances
41.111607
141
0.541725
4,586
36,836
4.105321
0.061273
0.016997
0.042386
0.04382
0.771605
0.749721
0.727413
0.713443
0.69958
0.691294
0
0.01304
0.356716
36,836
896
142
41.111607
0.781482
0.057525
0
0.72332
0
0
0.022535
0.000637
0
0
0
0
0.003953
1
0.060606
false
0
0.006588
0.006588
0.16469
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
722bf2cdd22337500bae88f1f96a8f10af2d79d1
26
py
Python
beir/reranking/__init__.py
ArthurCamara/beir
2739990b719f2d4814d88473cf9965d92d4f4c18
[ "Apache-2.0" ]
24
2022-03-20T18:48:52.000Z
2022-03-31T08:28:42.000Z
beir/reranking/__init__.py
ArthurCamara/beir
2739990b719f2d4814d88473cf9965d92d4f4c18
[ "Apache-2.0" ]
9
2022-03-19T14:50:30.000Z
2022-03-30T17:31:18.000Z
beir/reranking/__init__.py
ArthurCamara/beir
2739990b719f2d4814d88473cf9965d92d4f4c18
[ "Apache-2.0" ]
3
2022-03-25T15:45:14.000Z
2022-03-25T17:51:23.000Z
from .rerank import Rerank
26
26
0.846154
4
26
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
9d0a50622613f5fdeaac5bd96116e6bbef656368
94
py
Python
testsuite/verilator_sim/gearbox/gb_66_40/data/convert.py
hanw/sonic-firmware
9761c29e294346d2c57128b0b8371b2d7e345f32
[ "Apache-2.0" ]
1
2019-06-12T20:48:56.000Z
2019-06-12T20:48:56.000Z
testsuite/verilator_sim/gearbox/gb_66_40/data/convert.py
hanw/sonic-firmware
9761c29e294346d2c57128b0b8371b2d7e345f32
[ "Apache-2.0" ]
null
null
null
testsuite/verilator_sim/gearbox/gb_66_40/data/convert.py
hanw/sonic-firmware
9761c29e294346d2c57128b0b8371b2d7e345f32
[ "Apache-2.0" ]
null
null
null
from bitstream import * conv_64_to_66 ('../../../scripts/test_vector.dat', 'test_vector.hex')
31.333333
69
0.712766
14
94
4.428571
0.857143
0.322581
0
0
0
0
0
0
0
0
0
0.045977
0.074468
94
2
70
47
0.666667
0
0
0
0
0
0.5
0.340426
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
9d28f8f0984b2e17c434b15a55bb6ceee5903696
123
py
Python
app/linear_equations/iterative/__init__.py
sgg10/arsp_solver_api
ad1d2f52eea58338d4f26128d5130eb326d529fb
[ "MIT" ]
null
null
null
app/linear_equations/iterative/__init__.py
sgg10/arsp_solver_api
ad1d2f52eea58338d4f26128d5130eb326d529fb
[ "MIT" ]
null
null
null
app/linear_equations/iterative/__init__.py
sgg10/arsp_solver_api
ad1d2f52eea58338d4f26128d5130eb326d529fb
[ "MIT" ]
null
null
null
from .jacobi import Jacobi from .gauss_seidel import GaussSeidel from .sor import SOR from .vandermonde import Vandermonde
24.6
37
0.837398
17
123
6
0.470588
0
0
0
0
0
0
0
0
0
0
0
0.130081
123
4
38
30.75
0.953271
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
19b47b47546fa47b0be773c0d3fb1bb8118e4dca
57
py
Python
esl/economics/markets/order_book/__init__.py
vishalbelsare/ESL
cea6feda1e588d5f441742dbb1e4c5479b47d357
[ "Apache-2.0" ]
37
2019-10-13T12:23:32.000Z
2022-03-19T10:40:29.000Z
esl/economics/markets/order_book/__init__.py
vishalbelsare/ESL
cea6feda1e588d5f441742dbb1e4c5479b47d357
[ "Apache-2.0" ]
3
2020-03-20T04:44:06.000Z
2021-01-12T06:18:33.000Z
esl/economics/markets/order_book/__init__.py
vishalbelsare/ESL
cea6feda1e588d5f441742dbb1e4c5479b47d357
[ "Apache-2.0" ]
10
2019-11-06T15:59:06.000Z
2021-08-09T17:28:24.000Z
from esl._esl._economics._markets._order_book import *
14.25
54
0.807018
8
57
5.125
0.875
0
0
0
0
0
0
0
0
0
0
0
0.105263
57
3
55
19
0.803922
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
19db7784a7ea8c425195f73119871a7438375092
189
py
Python
src/brands/admin.py
Bakhtiyar-Habib/CSE327-Project
4126b40eb398e4cf13b49136e552775c5f3b0635
[ "bzip2-1.0.6" ]
null
null
null
src/brands/admin.py
Bakhtiyar-Habib/CSE327-Project
4126b40eb398e4cf13b49136e552775c5f3b0635
[ "bzip2-1.0.6" ]
null
null
null
src/brands/admin.py
Bakhtiyar-Habib/CSE327-Project
4126b40eb398e4cf13b49136e552775c5f3b0635
[ "bzip2-1.0.6" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import Brands admin.site.register(Brands) from .models import Brands_detail admin.site.register(Brands_detail)
18.9
34
0.814815
27
189
5.62963
0.444444
0.131579
0.210526
0.289474
0
0
0
0
0
0
0
0
0.116402
189
10
34
18.9
0.91018
0.137566
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
19e65112554ba2eb02bf578110a545436717cf47
60
py
Python
factum/__init__.py
propername/blink
fd9a9fe5a461d2ee74850aed53e7a7e8f6672b5e
[ "CC0-1.0" ]
null
null
null
factum/__init__.py
propername/blink
fd9a9fe5a461d2ee74850aed53e7a7e8f6672b5e
[ "CC0-1.0" ]
4
2020-10-03T22:55:13.000Z
2020-10-04T22:35:05.000Z
factum/__init__.py
propername/factum
fd9a9fe5a461d2ee74850aed53e7a7e8f6672b5e
[ "CC0-1.0" ]
null
null
null
from factum.lib.objects import DataFact, MindlessFact, Fact
30
59
0.833333
8
60
6.25
1
0
0
0
0
0
0
0
0
0
0
0
0.1
60
1
60
60
0.925926
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
19fd2790c308ff3107f71aba554fab5dde2be39b
138
py
Python
app/logic/logger/admin.py
imvu/bluesteel
ab52133249a693b3cd2d8593c5d47408a3b0fce6
[ "MIT" ]
10
2017-01-13T06:28:04.000Z
2020-11-18T13:00:26.000Z
app/logic/logger/admin.py
imvu/bluesteel
ab52133249a693b3cd2d8593c5d47408a3b0fce6
[ "MIT" ]
null
null
null
app/logic/logger/admin.py
imvu/bluesteel
ab52133249a693b3cd2d8593c5d47408a3b0fce6
[ "MIT" ]
2
2018-03-29T14:10:53.000Z
2019-11-20T08:21:57.000Z
""" Admin file """ from django.contrib import admin from app.logic.logger.models.LogModel import LogEntry admin.site.register(LogEntry)
19.714286
53
0.782609
19
138
5.684211
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.108696
138
6
54
23
0.878049
0.072464
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c221eb4db0d383cf52accc786ceef53b9de40731
176
py
Python
Global.py
TechLabCommunity/SaintPeterTalent
eb80237de4d73f3a99e82e02edb714f5057bd559
[ "MIT" ]
1
2019-01-03T12:59:19.000Z
2019-01-03T12:59:19.000Z
Global.py
TechLabCommunity/SaintPeterTalent
eb80237de4d73f3a99e82e02edb714f5057bd559
[ "MIT" ]
null
null
null
Global.py
TechLabCommunity/SaintPeterTalent
eb80237de4d73f3a99e82e02edb714f5057bd559
[ "MIT" ]
null
null
null
import xml.etree.ElementTree as ET PATH_CONFIG = './config.xml' def get_value_config(fromroot, key): return ET.parse(PATH_CONFIG).getroot().find(fromroot).find(key).text
25.142857
72
0.755682
27
176
4.777778
0.666667
0.155039
0
0
0
0
0
0
0
0
0
0
0.102273
176
6
73
29.333333
0.816456
0
0
0
0
0
0.068182
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
5f0eac62a3fc8da3fe4e9bb5995a3872c9839ab4
114
py
Python
02/x/5 - Sorting Scramble.py
Surferlul/csc-python-solutions
bea99e5e1e344d17fb2cb29d8bcbc6b108e24cee
[ "MIT" ]
null
null
null
02/x/5 - Sorting Scramble.py
Surferlul/csc-python-solutions
bea99e5e1e344d17fb2cb29d8bcbc6b108e24cee
[ "MIT" ]
null
null
null
02/x/5 - Sorting Scramble.py
Surferlul/csc-python-solutions
bea99e5e1e344d17fb2cb29d8bcbc6b108e24cee
[ "MIT" ]
null
null
null
tmp = max(x, y) x = min(x, y) y = tmp tmp = max(y, z) y = min(y, z) z = tmp tmp = max(x, y) x = min(x, y) y = tmp
11.4
15
0.473684
30
114
1.8
0.2
0.148148
0.259259
0.296296
0.666667
0.666667
0.666667
0.666667
0.666667
0.666667
0
0
0.289474
114
9
16
12.666667
0.666667
0
0
0.666667
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
1
1
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5f2ed8e292901199121bf1605f4a2d0ec2fc9f3d
1,690
py
Python
core/auth/viewsets/__init__.py
Nathan-E-White/DjangoBackend-ReactFrontend
64528a5d42c6b25347114bf5519d311ef65a5547
[ "Apache-2.0" ]
null
null
null
core/auth/viewsets/__init__.py
Nathan-E-White/DjangoBackend-ReactFrontend
64528a5d42c6b25347114bf5519d311ef65a5547
[ "Apache-2.0" ]
1
2021-10-13T07:55:16.000Z
2021-10-13T07:55:16.000Z
core/auth/viewsets/__init__.py
Nathan-E-White/DjangoBackend-ReactFrontend
64528a5d42c6b25347114bf5519d311ef65a5547
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/env python """ ------------------------------------------------------------------------------------------------------------------------ ____ __ __ __ __ __ / __ \__ __/ /_/ /_ ____ ____ / / / /__ ____ _____/ /__ _____ ____________ / /_/ / / / / __/ __ \/ __ \/ __ \ / /_/ / _ \/ __ `/ __ / _ \/ ___/ ____________ /_____/_____/ / ____/ /_/ / /_/ / / / /_/ / / / / / __ / __/ /_/ / /_/ / __/ / /_____/_____/ /_/ \__, /\__/_/ /_/\____/_/ /_/ /_/ /_/\___/\__,_/\__,_/\___/_/ /____/ ------------------------------------------------------------------------------------------------------------------------ :FILE: DjangoBackend-ReactFrontend/core/auth/viewsets/__init__.py :AUTHOR: Nathan E White, PhD :ABOUT: Initializer for the core.auth app viewsets package ------------------------------------------------------------------------------------------------------------------------ :NOTES: For more information on this file, see: https://stackoverflow.com/questions/448271/what-is-init-py-for ------------------------------------------------------------------------------------------------------------------------ """ # <BOF> # Imports --- User Package Imports: Pulls in the viewsets defined into the package into a single place from .register import RegistrationViewSet from .login import LoginViewSet from .refresh import RefreshViewSet # ---------------------------------------------------------------------------------------------------------------------- # <EOF>
58.275862
121
0.334911
74
1,690
5.459459
0.743243
0.039604
0
0
0
0
0
0
0
0
0
0.004542
0.218343
1,690
28
122
60.357143
0.301287
0.905917
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
a04e36796969dd9543a027a40c18bbf799621c08
98
py
Python
test.py
Iemane291/fluffy-dollop
6ad9de4b5a75795ea25c3e23720352abbd7912ae
[ "MIT" ]
null
null
null
test.py
Iemane291/fluffy-dollop
6ad9de4b5a75795ea25c3e23720352abbd7912ae
[ "MIT" ]
null
null
null
test.py
Iemane291/fluffy-dollop
6ad9de4b5a75795ea25c3e23720352abbd7912ae
[ "MIT" ]
null
null
null
class console: def log(*args, **kwargs): print(*args, **kwargs) console.log("Hello World")
16.333333
27
0.642857
13
98
4.846154
0.692308
0.31746
0
0
0
0
0
0
0
0
0
0
0.163265
98
5
28
19.6
0.768293
0
0
0
0
0
0.112245
0
0
0
0
0
0
1
0.25
true
0
0
0
0.5
0.25
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
5
a0bc17736911a9e0fd9daaea7021df71d5e92d65
44
py
Python
8.15.2.py
Rycarddo/livro_curso_intensivo_de_python
b90884d05018581e0a575a4c0ccdab9cdf8311b8
[ "MIT" ]
null
null
null
8.15.2.py
Rycarddo/livro_curso_intensivo_de_python
b90884d05018581e0a575a4c0ccdab9cdf8311b8
[ "MIT" ]
null
null
null
8.15.2.py
Rycarddo/livro_curso_intensivo_de_python
b90884d05018581e0a575a4c0ccdab9cdf8311b8
[ "MIT" ]
null
null
null
from modulo import * dizer_ola('Rycarddo')
11
21
0.75
6
44
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.136364
44
3
22
14.666667
0.842105
0
0
0
0
0
0.181818
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
2645ecd5fe04ec7e8ad4d5ed868f6cb058d5afbb
64
py
Python
text/_position/_bounding/_bound.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
text/_position/_bounding/_bound.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
text/_position/_bounding/_bound.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
class ElementPositionBound( ElementPositioning, ): pass
12.8
27
0.734375
4
64
11.75
1
0
0
0
0
0
0
0
0
0
0
0
0.203125
64
4
28
16
0.921569
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0
0
0.25
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
265270ef526f80903423fccc2973169fcab7462f
15
py
Python
twisted/lore/scripts/__init__.py
ioggstream/twisted
34f9b1e3f097685839000c656332c66ee85be5d8
[ "Unlicense", "MIT" ]
267
2015-03-22T15:23:48.000Z
2022-03-05T21:57:34.000Z
twisted/lore/scripts/__init__.py
ioggstream/twisted
34f9b1e3f097685839000c656332c66ee85be5d8
[ "Unlicense", "MIT" ]
133
2015-03-21T15:13:43.000Z
2021-12-11T23:37:58.000Z
twisted/lore/scripts/__init__.py
ioggstream/twisted
34f9b1e3f097685839000c656332c66ee85be5d8
[ "Unlicense", "MIT" ]
119
2015-04-28T16:07:10.000Z
2022-03-18T03:49:48.000Z
"lore scripts"
7.5
14
0.733333
2
15
5.5
1
0
0
0
0
0
0
0
0
0
0
0
0.133333
15
1
15
15
0.846154
0.8
0
0
0
0
0.8
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
2659eb37a6da6a2d6118af0d3998bfa4aeee05a6
126
py
Python
scripts/plot_forces_How/How_2d.py
jhwnkim/nanopores
98b3dbb5d36464fbdc03f59d224d38e4255324ce
[ "MIT" ]
8
2016-09-07T01:59:31.000Z
2021-03-06T12:14:31.000Z
scripts/plot_forces_How/How_2d.py
jhwnkim/nanopores
98b3dbb5d36464fbdc03f59d224d38e4255324ce
[ "MIT" ]
null
null
null
scripts/plot_forces_How/How_2d.py
jhwnkim/nanopores
98b3dbb5d36464fbdc03f59d224d38e4255324ce
[ "MIT" ]
4
2017-12-06T17:43:01.000Z
2020-05-01T05:41:14.000Z
import numpy as np X_How_2d = np.array([[1.,4.5],[2.5,4.5],[2.5,1.1],[10.,1.1],[10.,-1.1],[2.5,-1.1],[2.5,-4.5],[1.,-4.5]])
25.2
104
0.47619
37
126
1.567568
0.351351
0.137931
0.103448
0.137931
0.172414
0
0
0
0
0
0
0.264957
0.071429
126
4
105
31.5
0.230769
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
2684c82c4a118f3fdeb6074da143562bb9c2e22a
58
py
Python
nameko_tracer/__init__.py
joeeeeey/nameko-tracer
0f9e3215134df8e39fed30aebd79e033178df680
[ "Apache-2.0" ]
12
2019-03-15T03:40:27.000Z
2022-02-11T17:21:41.000Z
nameko_tracer/__init__.py
joeeeeey/nameko-tracer
0f9e3215134df8e39fed30aebd79e033178df680
[ "Apache-2.0" ]
9
2017-08-21T08:37:43.000Z
2018-09-10T17:06:59.000Z
nameko_tracer/__init__.py
joeeeeey/nameko-tracer
0f9e3215134df8e39fed30aebd79e033178df680
[ "Apache-2.0" ]
5
2017-08-25T18:02:57.000Z
2022-01-24T04:11:10.000Z
from nameko_tracer.dependency import Tracer # noqa: F401
29
57
0.810345
8
58
5.75
0.875
0
0
0
0
0
0
0
0
0
0
0.06
0.137931
58
1
58
58
0.86
0.172414
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cd196be36feb403d5cff8dcff5272c144fbb6d95
114
py
Python
petadopt/petapp/admin.py
Jenaleigh/172final
923b0b29c50c9e108018c9fab7d24c669e18e208
[ "Apache-2.0" ]
null
null
null
petadopt/petapp/admin.py
Jenaleigh/172final
923b0b29c50c9e108018c9fab7d24c669e18e208
[ "Apache-2.0" ]
null
null
null
petadopt/petapp/admin.py
Jenaleigh/172final
923b0b29c50c9e108018c9fab7d24c669e18e208
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import Pet admin.site.register(Pet)
14.25
32
0.780702
17
114
5.235294
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.149123
114
7
33
16.285714
0.917526
0.22807
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cd1b9e1fe6c17ed18128804e9dc0c45d6d6771f5
194
wsgi
Python
dataviva.wsgi
dogobox/datavivamaster
c89596778e2d8d01a2193b02ca5960bd17f4468d
[ "MIT" ]
null
null
null
dataviva.wsgi
dogobox/datavivamaster
c89596778e2d8d01a2193b02ca5960bd17f4468d
[ "MIT" ]
null
null
null
dataviva.wsgi
dogobox/datavivamaster
c89596778e2d8d01a2193b02ca5960bd17f4468d
[ "MIT" ]
null
null
null
import sys sys.path.insert(0, '/web/dataviva.info') from dataviva import app as application from werkzeug.debug import DebuggedApplication application = DebuggedApplication(application, True)
24.25
52
0.819588
24
194
6.625
0.666667
0.377358
0
0
0
0
0
0
0
0
0
0.005747
0.103093
194
7
53
27.714286
0.908046
0
0
0
0
0
0.092784
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
cd6bf065937417373a4c4c7587b1b1979613959c
39
py
Python
eg.py
paigelegustadoggo/codecraftlab-python
428c3f1ff614242c5c1179ae13179e8c7b2c585e
[ "bzip2-1.0.6" ]
null
null
null
eg.py
paigelegustadoggo/codecraftlab-python
428c3f1ff614242c5c1179ae13179e8c7b2c585e
[ "bzip2-1.0.6" ]
null
null
null
eg.py
paigelegustadoggo/codecraftlab-python
428c3f1ff614242c5c1179ae13179e8c7b2c585e
[ "bzip2-1.0.6" ]
null
null
null
def eg(): print ("eggo") eg()
7.8
19
0.410256
5
39
3.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.358974
39
4
20
9.75
0.64
0
0
0
0
0
0.117647
0
0
0
0
0
0
1
0.333333
true
0
0
0
0.333333
0.333333
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
5
cd74d7f6f51e6afe50085f79dc5c7494ed6f974f
63
py
Python
bnls/__init__.py
dkuwahara/bncs.py
ca88ae244df4d39316d81d1e894c657e1a980a6e
[ "MIT" ]
1
2020-02-09T11:02:28.000Z
2020-02-09T11:02:28.000Z
bnls/__init__.py
dkuwahara/bncs.py
ca88ae244df4d39316d81d1e894c657e1a980a6e
[ "MIT" ]
null
null
null
bnls/__init__.py
dkuwahara/bncs.py
ca88ae244df4d39316d81d1e894c657e1a980a6e
[ "MIT" ]
null
null
null
from bnls.packets import * from bnls.client import BnlsClient
15.75
34
0.809524
9
63
5.666667
0.666667
0.313725
0
0
0
0
0
0
0
0
0
0
0.142857
63
3
35
21
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
26d1ca12fed816bf5d9f35ef45392fac17e36689
120
py
Python
Chapter 8/08/PaxHeader/recipe66.py
robert0714/Python-Testing-Cookbook-Second-Edition
c7c5d59e42e9ca2874faf12a6dd201736a45ca83
[ "MIT" ]
null
null
null
Chapter 8/08/PaxHeader/recipe66.py
robert0714/Python-Testing-Cookbook-Second-Edition
c7c5d59e42e9ca2874faf12a6dd201736a45ca83
[ "MIT" ]
null
null
null
Chapter 8/08/PaxHeader/recipe66.py
robert0714/Python-Testing-Cookbook-Second-Edition
c7c5d59e42e9ca2874faf12a6dd201736a45ca83
[ "MIT" ]
null
null
null
15 uid=2057284 20 ctime=1296431511 20 atime=1296485951 24 SCHILY.dev=234881026 23 SCHILY.ino=30638049 18 SCHILY.nlink=1
17.142857
23
0.825
21
120
4.714286
0.857143
0
0
0
0
0
0
0
0
0
0
0.527778
0.1
120
6
24
20
0.388889
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
26e15d21b653afa7b82a0764e2d1cb27f1994954
117
py
Python
canvas/canvas_point.py
TriumGroup/3d-cubes
6e91dbac9b9fcaca53acdb58d033210b21532b27
[ "MIT" ]
null
null
null
canvas/canvas_point.py
TriumGroup/3d-cubes
6e91dbac9b9fcaca53acdb58d033210b21532b27
[ "MIT" ]
null
null
null
canvas/canvas_point.py
TriumGroup/3d-cubes
6e91dbac9b9fcaca53acdb58d033210b21532b27
[ "MIT" ]
null
null
null
class CanvasPoint: def __init__(self, z_index, color): self.z_index = z_index self.color = color
23.4
39
0.641026
16
117
4.25
0.5
0.264706
0.294118
0
0
0
0
0
0
0
0
0
0.273504
117
4
40
29.25
0.8
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
26e1ee28b8ab32825ca01241906a2f621e1f350e
96
py
Python
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/kdf/concatkdf.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
1
2021-11-07T22:40:27.000Z
2021-11-07T22:40:27.000Z
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/kdf/concatkdf.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/kdf/concatkdf.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/1e/b3/d6/69d4e1220ca4b581b0af3e8c28b8a75970d94f3d5cfa4c683a69a9adc3
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.385417
0
96
1
96
96
0.510417
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
f86b00df3e580ea49dc39ec00e5040a8b7812044
224
py
Python
pycbrf/__init__.py
suhanoves/pycbrf
5a040b67567d98b264b10c75b49f6f3c2ea88731
[ "BSD-3-Clause" ]
51
2016-07-04T15:16:38.000Z
2022-03-05T10:08:51.000Z
pycbrf/__init__.py
suhanoves/pycbrf
5a040b67567d98b264b10c75b49f6f3c2ea88731
[ "BSD-3-Clause" ]
8
2018-07-01T08:12:31.000Z
2022-02-28T10:39:48.000Z
pycbrf/__init__.py
suhanoves/pycbrf
5a040b67567d98b264b10c75b49f6f3c2ea88731
[ "BSD-3-Clause" ]
10
2018-06-01T09:58:13.000Z
2022-03-22T18:50:39.000Z
from .banks import Banks from .rates import * from .rates import ExchangeRates VERSION = (1, 1, 0) """Application version number tuple.""" VERSION_STR = '.'.join(map(str, VERSION)) """Application version number string."""
22.4
41
0.714286
29
224
5.482759
0.517241
0.113208
0.188679
0
0
0
0
0
0
0
0
0.015544
0.138393
224
9
42
24.888889
0.80829
0
0
0
0
0
0.006897
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
f873cb2421ca2f2b5aa9537e2bf5caff42366e98
32
py
Python
python/testData/inspections/PyPep8NamingInspection/classNameWithTwoUnderscores.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/inspections/PyPep8NamingInspection/classNameWithTwoUnderscores.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/inspections/PyPep8NamingInspection/classNameWithTwoUnderscores.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
class __MyPrivateClass: pass
16
23
0.78125
3
32
7.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.1875
32
2
24
16
0.884615
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
f88fed92d01fbc945a62055c0aa934f38e434aa9
129
py
Python
digsby/src/msn/p8/__init__.py
ifwe/digsby
f5fe00244744aa131e07f09348d10563f3d8fa99
[ "Python-2.0" ]
35
2015-08-15T14:32:38.000Z
2021-12-09T16:21:26.000Z
digsby/src/msn/p8/__init__.py
niterain/digsby
16a62c7df1018a49eaa8151c0f8b881c7e252949
[ "Python-2.0" ]
4
2015-09-12T10:42:57.000Z
2017-02-27T04:05:51.000Z
digsby/src/msn/p8/__init__.py
niterain/digsby
16a62c7df1018a49eaa8151c0f8b881c7e252949
[ "Python-2.0" ]
15
2015-07-10T23:58:07.000Z
2022-01-23T22:16:33.000Z
from MSNP8Switchboard import MSNP8Switchboard as Switchboard from MSNP8Notification import MSNP8Notification as Notification
43
64
0.875969
12
129
9.416667
0.583333
0
0
0
0
0
0
0
0
0
0
0.035398
0.124031
129
2
65
64.5
0.964602
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6ef85667870657bc45b0007446a7e7a26da6dfca
272
py
Python
source/base/helper.py
raldenprog/electronic_vote
9e55507bfd2fa51fee6cf3b11fbffe64f705cf01
[ "MIT" ]
null
null
null
source/base/helper.py
raldenprog/electronic_vote
9e55507bfd2fa51fee6cf3b11fbffe64f705cf01
[ "MIT" ]
null
null
null
source/base/helper.py
raldenprog/electronic_vote
9e55507bfd2fa51fee6cf3b11fbffe64f705cf01
[ "MIT" ]
null
null
null
def header_option(): return {'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Headers': '*', 'Access-Control-Allow-Methods': '*'} def check_session(header): session = header.get('session') or header.get('Session') if session: return session
30.222222
121
0.665441
32
272
5.59375
0.46875
0.217877
0.301676
0
0
0
0
0
0
0
0
0
0.154412
272
8
122
34
0.778261
0
0
0
0
0
0.367647
0.305147
0
0
0
0
0
1
0.333333
false
0
0
0.166667
0.666667
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
3e3f5838ccf7f8ee4bba4983994b5414f1f6f1f1
328
py
Python
cd_perf_promotion/engines/__init__.py
CDKGlobal/cd-performance-plugin
58176139ef744535b156b8ef5f187f38b683b2a5
[ "MIT" ]
null
null
null
cd_perf_promotion/engines/__init__.py
CDKGlobal/cd-performance-plugin
58176139ef744535b156b8ef5f187f38b683b2a5
[ "MIT" ]
null
null
null
cd_perf_promotion/engines/__init__.py
CDKGlobal/cd-performance-plugin
58176139ef744535b156b8ef5f187f38b683b2a5
[ "MIT" ]
null
null
null
from cd_perf_promotion.engines.argumentengine import ArgumentEngine from cd_perf_promotion.engines.configengine import ConfigEngine from cd_perf_promotion.engines.dataengine import DataEngine from cd_perf_promotion.engines.comparisonengine import ComparisonEngine from cd_perf_promotion.engines.outputengine import OutputEngine
54.666667
71
0.908537
40
328
7.2
0.275
0.104167
0.173611
0.329861
0.451389
0
0
0
0
0
0
0
0.060976
328
5
72
65.6
0.935065
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3e4354b9d0f66e9bfa50701c59d2ed0ecfb53576
114
py
Python
plugins/pcr/plugins/pcrjjc/plugins/__init__.py
liangzimiao/miyubot
c2788712255e39348c8980c8ace2f6f75fb6621c
[ "Apache-2.0" ]
null
null
null
plugins/pcr/plugins/pcrjjc/plugins/__init__.py
liangzimiao/miyubot
c2788712255e39348c8980c8ace2f6f75fb6621c
[ "Apache-2.0" ]
null
null
null
plugins/pcr/plugins/pcrjjc/plugins/__init__.py
liangzimiao/miyubot
c2788712255e39348c8980c8ace2f6f75fb6621c
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ @Time : 2021/12/20 14:23 @Author : 物述有栖 @File : __init__.py.py @DES : """
14.25
27
0.482456
16
114
3.1875
0.9375
0
0
0
0
0
0
0
0
0
0
0.156627
0.27193
114
7
28
16.285714
0.457831
0.912281
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
e47e32dd086651ab4575447c5f295ce924979bd5
130
py
Python
src/emails/admin.py
StefanGriffin/MVP1
94117b08a80ba7d430d96950bfeecf99710d041b
[ "MIT" ]
null
null
null
src/emails/admin.py
StefanGriffin/MVP1
94117b08a80ba7d430d96950bfeecf99710d041b
[ "MIT" ]
null
null
null
src/emails/admin.py
StefanGriffin/MVP1
94117b08a80ba7d430d96950bfeecf99710d041b
[ "MIT" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import EmailEntry admin.site.register(EmailEntry)
13
32
0.792308
17
130
6.058824
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.146154
130
9
33
14.444444
0.927928
0.2
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e47e9b94c8be3a07608360a622d08bef80984836
210
py
Python
app/model/accountmanager.py
Stanford-PERTS/neptune
20b945adf7b62e67db60be3cc451ffb16113fe33
[ "CC0-1.0" ]
null
null
null
app/model/accountmanager.py
Stanford-PERTS/neptune
20b945adf7b62e67db60be3cc451ffb16113fe33
[ "CC0-1.0" ]
null
null
null
app/model/accountmanager.py
Stanford-PERTS/neptune
20b945adf7b62e67db60be3cc451ffb16113fe33
[ "CC0-1.0" ]
null
null
null
"""AccountManager: A convenience object for describing for which Projects a user is the account manager in their UI. """ from gae_models import DatastoreModel class AccountManager(DatastoreModel): pass
19.090909
75
0.785714
27
210
6.074074
0.851852
0
0
0
0
0
0
0
0
0
0
0
0.161905
210
10
76
21
0.931818
0.538095
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
e4bd21e18398dea240fb700b89cd1c29b9c67407
195
py
Python
src/sage/combinat/ncsym/all.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
null
null
null
src/sage/combinat/ncsym/all.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
null
null
null
src/sage/combinat/ncsym/all.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
1
2020-07-24T12:20:37.000Z
2020-07-24T12:20:37.000Z
""" Features that are imported by default in the interpreter namespace """ from ncsym import SymmetricFunctionsNonCommutingVariables from dual import SymmetricFunctionsNonCommutingVariablesDual
27.857143
66
0.861538
18
195
9.333333
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.107692
195
6
67
32.5
0.965517
0.338462
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
902db6b70731fcec25708520c51ece67f79ac198
122
py
Python
tiktok/utils/__init__.py
hackertogether/tiktok-crawler
eba5bb2b0ecf9e9d82084609d04ab53fc1747121
[ "MIT" ]
37
2019-05-07T05:02:09.000Z
2022-01-12T06:14:57.000Z
tiktok/utils/__init__.py
hackertogether/tiktok-crawler
eba5bb2b0ecf9e9d82084609d04ab53fc1747121
[ "MIT" ]
4
2019-05-23T05:27:25.000Z
2020-04-23T18:39:38.000Z
tiktok/utils/__init__.py
hackertogether/tiktok-crawler
eba5bb2b0ecf9e9d82084609d04ab53fc1747121
[ "MIT" ]
17
2019-05-06T09:15:18.000Z
2022-03-14T15:58:04.000Z
import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) from tiktok.utils.fetch import fetch
20.333333
67
0.868852
14
122
7.5
0.714286
0
0
0
0
0
0
0
0
0
0
0.026549
0.07377
122
5
68
24.4
0.902655
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5f4212c355bdca2c912e3cb620bd746549cd53c3
37
py
Python
dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyxb/bundles/dc/dc.py
jeikabu/lumberyard
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
[ "AML" ]
123
2015-01-12T06:43:22.000Z
2022-03-20T18:06:46.000Z
dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyxb/bundles/dc/dc.py
jeikabu/lumberyard
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
[ "AML" ]
103
2015-01-08T18:35:57.000Z
2022-01-18T01:44:14.000Z
dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/pyxb/bundles/dc/dc.py
jeikabu/lumberyard
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
[ "AML" ]
54
2015-02-15T17:12:00.000Z
2022-03-07T23:02:32.000Z
from pyxb.bundles.dc.raw.dc import *
18.5
36
0.756757
7
37
4
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.108108
37
1
37
37
0.848485
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
5f61d1cb422d8351e8c2ae1f3c0ff705c8d4ae96
305
py
Python
tests/test_bilding.py
memowe/miniciti
2a90cccad5672be26aa1fb7c848a19bda611fcd0
[ "MIT" ]
null
null
null
tests/test_bilding.py
memowe/miniciti
2a90cccad5672be26aa1fb7c848a19bda611fcd0
[ "MIT" ]
null
null
null
tests/test_bilding.py
memowe/miniciti
2a90cccad5672be26aa1fb7c848a19bda611fcd0
[ "MIT" ]
null
null
null
import pytest from miniciti.bilding import Bilding from anglr import Angle def testBildingHeight(): assert Bilding.floor_height == 3 assert Bilding(stories=17).height() == 17 * 3 def testAngle(): assert Bilding().is_upright() assert not Bilding(angle=Angle(42, "degrees")).is_upright()
23.461538
63
0.72459
40
305
5.45
0.525
0.178899
0
0
0
0
0
0
0
0
0
0.03125
0.160656
305
12
64
25.416667
0.820313
0
0
0
0
0
0.022951
0
0
0
0
0
0.444444
1
0.222222
true
0
0.333333
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
1
0
1
0
0
5
5f75d722092974fe80af08478ca057fb2cf345a8
217
py
Python
processing/structures/__init__.py
CN-TU/ntarc-spec
c92bc98d7affa46ce9cc66b4e2aab220bb584bf8
[ "MIT" ]
null
null
null
processing/structures/__init__.py
CN-TU/ntarc-spec
c92bc98d7affa46ce9cc66b4e2aab220bb584bf8
[ "MIT" ]
15
2018-02-15T21:18:33.000Z
2018-11-28T13:13:52.000Z
processing/structures/__init__.py
CN-TU/ntarc-spec
c92bc98d7affa46ce9cc66b4e2aab220bb584bf8
[ "MIT" ]
1
2022-01-07T16:23:50.000Z
2022-01-07T16:23:50.000Z
try: from conf import PROJECT_PATH, API_KEY, MAPS_API_KEY except ImportError: PROJECT_PATH = '' API_KEY = '' MAPS_API_KEY = '' from .features import * from .high_level import * from .metadata import *
21.7
56
0.700461
30
217
4.766667
0.5
0.167832
0.195804
0.237762
0.377622
0.377622
0.377622
0
0
0
0
0
0.211982
217
9
57
24.111111
0.836257
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.555556
0
0.555556
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
5f928e07c99135b48fa0c614fbbd26f1f242c46d
27
py
Python
spikeinterface/widgets.py
Shawn-Guo-CN/spikeinterface
38fdf393b5e953fca30f5f33115d5e0e64c2137b
[ "MIT" ]
null
null
null
spikeinterface/widgets.py
Shawn-Guo-CN/spikeinterface
38fdf393b5e953fca30f5f33115d5e0e64c2137b
[ "MIT" ]
null
null
null
spikeinterface/widgets.py
Shawn-Guo-CN/spikeinterface
38fdf393b5e953fca30f5f33115d5e0e64c2137b
[ "MIT" ]
null
null
null
from spikewidgets import *
13.5
26
0.814815
3
27
7.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
398851048ff7ffb094a9bf2d1cd20481551b6a5c
50
py
Python
book/models/__init__.py
shaun-emburse/django-book
42bdbe02c551bb43597507602024e159ca35c5ae
[ "CC-BY-2.0" ]
null
null
null
book/models/__init__.py
shaun-emburse/django-book
42bdbe02c551bb43597507602024e159ca35c5ae
[ "CC-BY-2.0" ]
null
null
null
book/models/__init__.py
shaun-emburse/django-book
42bdbe02c551bb43597507602024e159ca35c5ae
[ "CC-BY-2.0" ]
null
null
null
from .author import Author from .book import Book
16.666667
26
0.8
8
50
5
0.5
0
0
0
0
0
0
0
0
0
0
0
0.16
50
2
27
25
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8466de83d5c90c57806f385c137523be67df3aef
206
py
Python
openapi_core/validation/response/exceptions.py
sthagen/p1c2u-openapi-core
16278893f1be570b7e643a088c81d6c9bd7d76b2
[ "BSD-3-Clause" ]
1
2021-11-05T19:02:04.000Z
2021-11-05T19:02:04.000Z
openapi_core/validation/response/exceptions.py
sthagen/openapi-core
16278893f1be570b7e643a088c81d6c9bd7d76b2
[ "BSD-3-Clause" ]
null
null
null
openapi_core/validation/response/exceptions.py
sthagen/openapi-core
16278893f1be570b7e643a088c81d6c9bd7d76b2
[ "BSD-3-Clause" ]
null
null
null
from dataclasses import dataclass from typing import Any from typing import Dict from typing import List @dataclass class HeadersError(Exception): headers: Dict[str, Any] context: List[Exception]
18.727273
33
0.781553
27
206
5.962963
0.518519
0.186335
0.298137
0
0
0
0
0
0
0
0
0
0.165049
206
10
34
20.6
0.936047
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.875
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ffdb261b085841a0e2f815a135a1fb29b69182bd
146
py
Python
Capitulo 1/15 - startswith.py
mmmacedo/python
2e7d99021342a5c7c31fe644ff194b6a8fa88a88
[ "MIT" ]
null
null
null
Capitulo 1/15 - startswith.py
mmmacedo/python
2e7d99021342a5c7c31fe644ff194b6a8fa88a88
[ "MIT" ]
null
null
null
Capitulo 1/15 - startswith.py
mmmacedo/python
2e7d99021342a5c7c31fe644ff194b6a8fa88a88
[ "MIT" ]
null
null
null
nome = "Daniel Moreno" print ("O nome inicia-se com Dan?", nome.startswith("Dan")) print ("O nome inicia-se com Mor?", nome.startswith("Mor"))
36.5
60
0.671233
23
146
4.26087
0.478261
0.122449
0.204082
0.326531
0.428571
0.428571
0
0
0
0
0
0
0.143836
146
4
61
36.5
0.784
0
0
0
0
0
0.479167
0
0
0
0
0
0
1
0
false
0
0
0
0
0.666667
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
080debddf702365c13a2b20efddae544e4d6e8de
68
py
Python
python_basics/Module & packages/my_pack/subpackage/subscript.py
alok8765/basic_python_practicse
9bd61f0b03fc1e703a75df39862a24692bb3fdb7
[ "MIT" ]
null
null
null
python_basics/Module & packages/my_pack/subpackage/subscript.py
alok8765/basic_python_practicse
9bd61f0b03fc1e703a75df39862a24692bb3fdb7
[ "MIT" ]
null
null
null
python_basics/Module & packages/my_pack/subpackage/subscript.py
alok8765/basic_python_practicse
9bd61f0b03fc1e703a75df39862a24692bb3fdb7
[ "MIT" ]
null
null
null
def sub_report(): print('hey i am function inside my subscript')
34
50
0.720588
11
68
4.363636
1
0
0
0
0
0
0
0
0
0
0
0
0.176471
68
2
50
34
0.857143
0
0
0
0
0
0.536232
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
5
081f8fd5ae6d1628fe2618d3d7b9231647531670
374
py
Python
python/testData/debug/stepping/test_smart_step_into_unary_operator.py
Sajaki/intellij-community
6748af2c40567839d11fd652ec77ba263c074aad
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/debug/stepping/test_smart_step_into_unary_operator.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2022-02-19T09:45:05.000Z
2022-02-27T20:32:55.000Z
python/testData/debug/stepping/test_smart_step_into_unary_operator.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
class A(object): def __init__(self, x): self.x = x def __neg__(self): return A(-self.x) def __pos__(self): return A(abs(self.x)) def __invert__(self): return A(~self.x) def __add__(self, other): return A(self.x + other.x) a1 = A(1) a2 = A(2) a3 = A(3) a5 = a1 + a2 + (-a3) + (+A(-4)) + (~a1) # breakpoint
15.583333
53
0.505348
59
374
2.864407
0.389831
0.177515
0.195266
0.213018
0.224852
0.224852
0
0
0
0
0
0.046875
0.315508
374
23
54
16.26087
0.613281
0.026738
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.266667
0.666667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
082cf12558069dde713795d4023b875ddffc3215
19
py
Python
GUI.py
jgutierrezh/demo-2018-1
bb4684a69e0b067f712500296a4e2a9f76ea2326
[ "MIT" ]
null
null
null
GUI.py
jgutierrezh/demo-2018-1
bb4684a69e0b067f712500296a4e2a9f76ea2326
[ "MIT" ]
null
null
null
GUI.py
jgutierrezh/demo-2018-1
bb4684a69e0b067f712500296a4e2a9f76ea2326
[ "MIT" ]
null
null
null
print("GUI first")
9.5
18
0.684211
3
19
4.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.105263
19
1
19
19
0.764706
0
0
0
0
0
0.473684
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
08558fac0867659c2304186d5dded8f83832ad7a
95
py
Python
mpdiag/tests/test_dummy.py
mheikenfeld/wrfmpdiag
56622f6816fcb995ffd5aade5fbf2b789d7d6fa1
[ "BSD-3-Clause" ]
1
2019-01-07T23:19:49.000Z
2019-01-07T23:19:49.000Z
mpdiag/tests/test_dummy.py
mheikenfeld/wrfmpdiag
56622f6816fcb995ffd5aade5fbf2b789d7d6fa1
[ "BSD-3-Clause" ]
null
null
null
mpdiag/tests/test_dummy.py
mheikenfeld/wrfmpdiag
56622f6816fcb995ffd5aade5fbf2b789d7d6fa1
[ "BSD-3-Clause" ]
null
null
null
import os import pytest import iris import mpdiag def test_dummy_function(): assert 1==1
10.555556
26
0.757895
15
95
4.666667
0.733333
0
0
0
0
0
0
0
0
0
0
0.025974
0.189474
95
8
27
11.875
0.883117
0
0
0
0
0
0
0
0
0
0
0
0.166667
1
0.166667
true
0
0.666667
0
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f26c8dff99fc11ee7940a723a092cd0cd9ae7913
203
py
Python
tests/test_app.py
nabetama/wercker-test
539017f7d20b7613263bbb5a3898857fa04406e3
[ "MIT" ]
null
null
null
tests/test_app.py
nabetama/wercker-test
539017f7d20b7613263bbb5a3898857fa04406e3
[ "MIT" ]
null
null
null
tests/test_app.py
nabetama/wercker-test
539017f7d20b7613263bbb5a3898857fa04406e3
[ "MIT" ]
null
null
null
# coding: utf-8 class TestApp(object): def setup(self): pass def teardown(self): pass def test_app(self): assert True def test_app2(self): assert True
13.533333
24
0.561576
26
203
4.307692
0.615385
0.142857
0.196429
0
0
0
0
0
0
0
0
0.015152
0.349754
203
14
25
14.5
0.833333
0.064039
0
0.444444
0
0
0
0
0
0
0
0
0.222222
1
0.444444
false
0.222222
0
0
0.555556
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
f2adf51d9de2cce1b4390177499e07e62158f9a3
65
py
Python
scripts/__init__.py
samnewhook/python_scripts
5c39ee5906f14ff400ddaafcb4345d46fcead2d0
[ "MIT" ]
null
null
null
scripts/__init__.py
samnewhook/python_scripts
5c39ee5906f14ff400ddaafcb4345d46fcead2d0
[ "MIT" ]
null
null
null
scripts/__init__.py
samnewhook/python_scripts
5c39ee5906f14ff400ddaafcb4345d46fcead2d0
[ "MIT" ]
null
null
null
from updatetodo import get_latest_date, date_time_from_filename
32.5
64
0.892308
10
65
5.3
0.8
0
0
0
0
0
0
0
0
0
0
0
0.092308
65
1
65
65
0.898305
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f2b17d14eadd26addfd9d4f2756f211bdd68240b
98
py
Python
data analysis/pyecharts/pyecharts/exceptions.py
mrxgavin/Coursework
6a1c87767d61f0865345dcdd4e963498856f05f3
[ "MIT" ]
3
2019-06-29T11:40:29.000Z
2019-09-07T02:15:09.000Z
data analysis/pyecharts/pyecharts/exceptions.py
mrxgavin/Coursework
6a1c87767d61f0865345dcdd4e963498856f05f3
[ "MIT" ]
null
null
null
data analysis/pyecharts/pyecharts/exceptions.py
mrxgavin/Coursework
6a1c87767d61f0865345dcdd4e963498856f05f3
[ "MIT" ]
null
null
null
class InvalidConfiguration(Exception): pass class RegionNotFound(Exception): pass
14
39
0.714286
8
98
8.75
0.625
0.371429
0
0
0
0
0
0
0
0
0
0
0.22449
98
6
40
16.333333
0.921053
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
4b2de4b13acdb5f8daf6b8c597cbadea31cff0ef
343
py
Python
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/{{ cookiecutter.app_name }}/admin.py
pythdasch/cookiecutter-django
c998afe16cc7632af329e623d29e7fb7e6b3795a
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/{{ cookiecutter.app_name }}/admin.py
pythdasch/cookiecutter-django
c998afe16cc7632af329e623d29e7fb7e6b3795a
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/{{ cookiecutter.app_name }}/admin.py
pythdasch/cookiecutter-django
c998afe16cc7632af329e623d29e7fb7e6b3795a
[ "BSD-3-Clause" ]
null
null
null
from django.contrib import admin from django.contrib.auth import admin as auth_admin from django.contrib.auth import get_user_model from {{ cookiecutter.project_slug }}.users.forms import UserChangeForm, UserCreationForm User = get_user_model() @admin.register(User) class {{ cookiecutter.name_of_model }}Admin(admin.ModelAdmin): pass
26.384615
88
0.804665
47
343
5.702128
0.489362
0.11194
0.190299
0.164179
0.238806
0.238806
0
0
0
0
0
0
0.110787
343
12
89
28.583333
0.878689
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0.125
0.5
null
null
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
1
0
0
0
0
5
4b38e35bd2db2a0a7ed4383861707790a1168f25
106
py
Python
movers/sitemovers.py
virthead/COMPASS-multijob-pilot
beac49ec432d24382d4d23aacfe6c9674a59e118
[ "Apache-2.0" ]
null
null
null
movers/sitemovers.py
virthead/COMPASS-multijob-pilot
beac49ec432d24382d4d23aacfe6c9674a59e118
[ "Apache-2.0" ]
null
null
null
movers/sitemovers.py
virthead/COMPASS-multijob-pilot
beac49ec432d24382d4d23aacfe6c9674a59e118
[ "Apache-2.0" ]
null
null
null
""" This file contains the list of ENABLED site movers """ from .xrdcp_sitemover import xrdcpSiteMover
17.666667
52
0.764151
14
106
5.714286
1
0
0
0
0
0
0
0
0
0
0
0
0.169811
106
5
53
21.2
0.909091
0.471698
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4b3a86efe234b527220441532ca71b5a78fd9b63
123
py
Python
dash-snapshot-report/passenger_wsgi.py
nocdoggo/Regional-Snapshot
c57032a455fdcbfd03ca2acc9d993ce55f86a9f7
[ "MIT" ]
null
null
null
dash-snapshot-report/passenger_wsgi.py
nocdoggo/Regional-Snapshot
c57032a455fdcbfd03ca2acc9d993ce55f86a9f7
[ "MIT" ]
null
null
null
dash-snapshot-report/passenger_wsgi.py
nocdoggo/Regional-Snapshot
c57032a455fdcbfd03ca2acc9d993ce55f86a9f7
[ "MIT" ]
null
null
null
import imp import os import sys sys.path.insert(0, os.path.dirname(__file__)) from appTop import server as application
12.3
45
0.780488
20
123
4.6
0.7
0
0
0
0
0
0
0
0
0
0
0.009524
0.146341
123
9
46
13.666667
0.866667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.8
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4b3dfa5a65857ea37983908d536bbea586edd72c
18
py
Python
nets/enet/__init__.py
SpatialPerceptionNeuralNetwork/SOA_DORN_TF
33814467e9135036abf28f2da19c5984c8744089
[ "Unlicense" ]
17
2019-02-17T07:39:39.000Z
2021-08-17T05:20:19.000Z
nets/enet/__init__.py
SpatialPerceptionNeuralNetwork/SOA_DORN_TF
33814467e9135036abf28f2da19c5984c8744089
[ "Unlicense" ]
6
2019-03-04T14:17:22.000Z
2019-11-07T15:06:55.000Z
nets/enet/__init__.py
SpatialPerceptionNeuralNetwork/SOA_DORN_TF
33814467e9135036abf28f2da19c5984c8744089
[ "Unlicense" ]
4
2019-02-17T07:39:47.000Z
2019-08-13T17:13:23.000Z
from . import enet
18
18
0.777778
3
18
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
18
1
18
18
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
4b3fa494b2c9441e1bddee54e86e9e37d88abb01
194
py
Python
setup.py
thisistrivial/intellijournal
2f25d72c29159ddfc9bf2ebe7de27d887d001f70
[ "MIT" ]
null
null
null
setup.py
thisistrivial/intellijournal
2f25d72c29159ddfc9bf2ebe7de27d887d001f70
[ "MIT" ]
null
null
null
setup.py
thisistrivial/intellijournal
2f25d72c29159ddfc9bf2ebe7de27d887d001f70
[ "MIT" ]
null
null
null
import src.sentiment_analysis import src.keyword_extraction if __name__ == '__main__': src.sentiment_analysis.load_en_sentiment_classifier() src.keyword_extraction.load_keyword_extractor()
27.714286
55
0.845361
24
194
6.125
0.541667
0.122449
0.272109
0
0
0
0
0
0
0
0
0
0.07732
194
6
56
32.333333
0.821229
0
0
0
0
0
0.041237
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
4b591436b96d8b23659d67456a9cf06ddaf91238
102
py
Python
prla/assignments/a0/boom.py
AegirAexx/python-sandbox
fa1f584f615c6ed04f80b9dd92d2b241248c9ebe
[ "Unlicense" ]
null
null
null
prla/assignments/a0/boom.py
AegirAexx/python-sandbox
fa1f584f615c6ed04f80b9dd92d2b241248c9ebe
[ "Unlicense" ]
null
null
null
prla/assignments/a0/boom.py
AegirAexx/python-sandbox
fa1f584f615c6ed04f80b9dd92d2b241248c9ebe
[ "Unlicense" ]
null
null
null
def boom(i): return ['boom!' if x % 7 == 0 or '7' in str(x) else str(x) for x in range(1, i + 1)]
34
88
0.529412
24
102
2.25
0.625
0.148148
0
0
0
0
0
0
0
0
0
0.066667
0.264706
102
2
89
51
0.653333
0
0
0
0
0
0.058824
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
4b6209ce99528bf24d0e5ff0d50c7f691d003686
11,458
py
Python
plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py
jandayanan/indy-plenum
2815e994404c77ad87eddcfd09062d5fe6efc1c5
[ "Apache-2.0" ]
148
2017-07-11T19:05:25.000Z
2022-03-16T21:31:20.000Z
plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py
jandayanan/indy-plenum
2815e994404c77ad87eddcfd09062d5fe6efc1c5
[ "Apache-2.0" ]
561
2017-06-29T17:59:56.000Z
2022-03-09T15:47:14.000Z
plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py
jandayanan/indy-plenum
2815e994404c77ad87eddcfd09062d5fe6efc1c5
[ "Apache-2.0" ]
378
2017-06-29T17:45:27.000Z
2022-03-26T07:27:59.000Z
import sys import pytest from plenum.common.constants import DOMAIN_LEDGER_ID, COMMIT from plenum.server.replica import Replica from plenum.test import waits from plenum.test.checkpoints.helper import check_num_quorumed_received_checkpoints, check_num_unstable_checkpoints from plenum.test.delayers import cDelay, chk_delay, msg_rep_delay from plenum.test.helper import sdk_send_random_requests, assertExp, sdk_send_random_and_check, assert_eq, get_pp_seq_no, \ check_last_ordered_3pc_backup from stp_core.loop.eventually import eventually nodeCount = 4 CHK_FREQ = 6 LOG_SIZE = 3 * CHK_FREQ first_run = True @pytest.fixture(scope="module") def tconf(tconf): old = tconf.Max3PCBatchesInFlight # This test requires lots of batches in flight (actually 8) in order to function properly, # so we allow any number to simplify things tconf.Max3PCBatchesInFlight = None yield tconf tconf.Max3PCBatchesInFlight = old def test_backup_replica_resumes_ordering_on_lag_in_checkpoints( looper, chkFreqPatched, reqs_for_checkpoint, one_replica_and_others_in_backup_instance, sdk_pool_handle, sdk_wallet_client, view_change_done, txnPoolNodeSet): """ Verifies resumption of ordering 3PC-batches on a backup replica on detection of a lag in checkpoints """ slow_replica, other_replicas = one_replica_and_others_in_backup_instance view_no = slow_replica.viewNo batches_count = slow_replica.last_ordered_3pc[1] # Send a request and ensure that the replica orders the batch for it sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) batches_count += 1 low_watermark = slow_replica.h looper.run( eventually(lambda: assert_eq(slow_replica.last_ordered_3pc, (view_no, batches_count)), retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount))) # Don't receive Commits from two replicas slow_replica.node.nodeIbStasher.delay( cDelay(instId=1, sender_filter=other_replicas[0].node.name)) slow_replica.node.nodeIbStasher.delay( cDelay(instId=1, sender_filter=other_replicas[1].node.name)) # Send a request for which the replica will not be able to order the batch # due to an insufficient count of Commits sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Recover reception of Commits slow_replica.node.nodeIbStasher.drop_delayeds() slow_replica.node.nodeIbStasher.resetDelays() # Send requests but in a quantity insufficient # for catch-up number of checkpoints reqs_until_checkpoints = reqs_for_checkpoint - other_replicas[0].last_ordered_3pc[1] sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_until_checkpoints) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Ensure that the replica has not ordered any batches # after the very first one assert slow_replica.last_ordered_3pc == (view_no, batches_count) # Ensure that the watermarks have not been shifted since the view start assert slow_replica.h == low_watermark assert slow_replica.H == low_watermark + LOG_SIZE # Ensure that the collections related to requests, batches and # own checkpoints are not empty. # (Note that a primary replica removes requests from requestQueues # when creating a batch with them.) if slow_replica.isPrimary: assert slow_replica._ordering_service.sent_preprepares else: assert slow_replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID] assert slow_replica._ordering_service.prePrepares assert slow_replica._ordering_service.prepares assert slow_replica._ordering_service.commits assert slow_replica._ordering_service.batches check_num_unstable_checkpoints(slow_replica, 0) check_num_quorumed_received_checkpoints(slow_replica, 1) # Send more requests to reach catch-up number of checkpoints sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_checkpoint) batches_count += 1 batches_count += reqs_until_checkpoints batches_count += reqs_for_checkpoint # Ensure that the replica has adjusted last_ordered_3pc to the end # of the last checkpoint looper.run( eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == \ (view_no, batches_count)), slow_replica, retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount))) # Ensure that the watermarks have been shifted so that the lower watermark # has the same value as last_ordered_3pc assert slow_replica.h == low_watermark + (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ assert slow_replica.H == low_watermark + (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE # Ensure that the collections related to requests, batches and # own checkpoints have been cleared assert not slow_replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID] assert not slow_replica._ordering_service.sent_preprepares assert not slow_replica._ordering_service.prePrepares assert not slow_replica._ordering_service.prepares assert not slow_replica._ordering_service.commits assert not slow_replica._ordering_service.batches check_num_unstable_checkpoints(slow_replica, 0) check_num_quorumed_received_checkpoints(slow_replica, 0) # Send a request and ensure that the replica orders the batch for it sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) batches_count += 1 looper.run( eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, batches_count)), slow_replica, retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount))) slow_replica._checkpointer._received_checkpoints.clear() batches_count = get_pp_seq_no(txnPoolNodeSet) def test_backup_replica_resumes_ordering_on_lag_if_checkpoints_belate( looper, chkFreqPatched, reqs_for_checkpoint, one_replica_and_others_in_backup_instance, sdk_pool_handle, sdk_wallet_client, view_change_done, txnPoolNodeSet): """ Verifies resumption of ordering 3PC-batches on a backup replica on detection of a lag in checkpoints in case it is detected after some batch in the next checkpoint has already been committed but cannot be ordered out of turn """ def check_last_ordered(replica, lo): assert replica.last_ordered_3pc == lo slow_replica, other_replicas = one_replica_and_others_in_backup_instance view_no = slow_replica.viewNo check_last_ordered_3pc_backup(slow_replica.node, other_replicas[0].node) batches_count = slow_replica.last_ordered_3pc[1] low_watermark = slow_replica.h # Send a request and ensure that the replica orders the batch for it sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) batches_count += 1 looper.run( eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, batches_count)), slow_replica, retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount))) # Don't receive Commits from two replicas slow_replica.node.nodeIbStasher.delay( cDelay(instId=1, sender_filter=other_replicas[0].node.name)) slow_replica.node.nodeIbStasher.delay( cDelay(instId=1, sender_filter=other_replicas[1].node.name)) slow_replica.node.nodeIbStasher.delay( msg_rep_delay(types_to_delay=[COMMIT]) ) # Send a request for which the replica will not be able to order the batch # due to an insufficient count of Commits sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Receive further Commits from now on slow_replica.node.nodeIbStasher.drop_delayeds() slow_replica.node.nodeIbStasher.resetDelays() looper.run( eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, batches_count)), slow_replica, timeout=waits.expectedTransactionExecutionTime(nodeCount))) # Send requests but in a quantity insufficient # for catch-up number of checkpoints reqs_until_checkpoints = reqs_for_checkpoint - other_replicas[0].last_ordered_3pc[1] sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_until_checkpoints) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Don't receive Checkpoints slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1)) # Send more requests to reach catch-up number of checkpoints sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, reqs_for_checkpoint) # Send a request that starts a new checkpoint sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) looper.runFor(waits.expectedTransactionExecutionTime(nodeCount)) # Ensure that the replica has not ordered any batches # after the very first one assert slow_replica.last_ordered_3pc == (view_no, batches_count) # Ensure that the watermarks have not been shifted since the view start assert slow_replica.h == low_watermark assert slow_replica.H == low_watermark + LOG_SIZE # Ensure that there are some quorumed stashed checkpoints check_num_quorumed_received_checkpoints(slow_replica, 1) # Receive belated Checkpoints slow_replica.node.nodeIbStasher.reset_delays_and_process_delayeds() batches_count += 1 batches_count += reqs_until_checkpoints batches_count += reqs_for_checkpoint batches_count += 1 # Ensure that the replica has ordered the batch for the last sent request looper.run( eventually(check_last_ordered, slow_replica, (view_no, batches_count), timeout=waits.expectedTransactionExecutionTime(nodeCount))) # Ensure that the watermarks have been shifted so that the lower watermark # now equals to the end of the last stable checkpoint in the instance assert slow_replica.h == low_watermark + (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ assert slow_replica.H == low_watermark + (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE # Ensure that now there are no quorumed stashed checkpoints check_num_quorumed_received_checkpoints(slow_replica, 0) # Send a request and ensure that the replica orders the batch for it sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) batches_count += 1 looper.run( eventually(lambda: assertExp(slow_replica.last_ordered_3pc == (view_no, batches_count)), retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount)))
44.933333
122
0.742014
1,469
11,458
5.486045
0.156569
0.083261
0.029532
0.02581
0.801712
0.770071
0.724159
0.708649
0.689167
0.666832
0
0.007744
0.19986
11,458
254
123
45.110236
0.871291
0.225083
0
0.660256
0
0
0.000683
0
0
0
0
0
0.192308
1
0.025641
false
0
0.057692
0
0.083333
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4b745b35d224d33ce6b94d86e325280471e1dfa8
278
py
Python
miprometheus/problems/seq_to_seq/algorithmic/dual_ignore/__init__.py
vincentalbouy/mi-prometheus
99a0c94b0d0f3476fa021213b3246fda0db8b2db
[ "Apache-2.0" ]
null
null
null
miprometheus/problems/seq_to_seq/algorithmic/dual_ignore/__init__.py
vincentalbouy/mi-prometheus
99a0c94b0d0f3476fa021213b3246fda0db8b2db
[ "Apache-2.0" ]
null
null
null
miprometheus/problems/seq_to_seq/algorithmic/dual_ignore/__init__.py
vincentalbouy/mi-prometheus
99a0c94b0d0f3476fa021213b3246fda0db8b2db
[ "Apache-2.0" ]
null
null
null
from .interruption_not import InterruptionNot from .interruption_reverse_recall import InterruptionReverseRecall from .interruption_swap_recall import InterruptionSwapRecall __all__ = [ 'InterruptionNot', 'InterruptionReverseRecall', 'InterruptionSwapRecall' ]
27.8
66
0.820144
21
278
10.428571
0.52381
0.219178
0
0
0
0
0
0
0
0
0
0
0.129496
278
9
67
30.888889
0.904959
0
0
0
0
0
0.223022
0.169065
0
0
0
0
0
1
0
false
0
0.375
0
0.375
0
1
0
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
4ba5988acd0bdb48cc4e7999994c8ba38608a7f7
55
py
Python
nlptk/ratings/__init__.py
GarryGaller/nlp_toolkit
df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b
[ "MIT" ]
null
null
null
nlptk/ratings/__init__.py
GarryGaller/nlp_toolkit
df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b
[ "MIT" ]
null
null
null
nlptk/ratings/__init__.py
GarryGaller/nlp_toolkit
df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b
[ "MIT" ]
null
null
null
from .rake import rake from .textrank import textrank
13.75
30
0.8
8
55
5.5
0.5
0
0
0
0
0
0
0
0
0
0
0
0.163636
55
3
31
18.333333
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4bb40c0cc1f1e50d5b1c3da87f032967a906839e
805
py
Python
Z3/BPpy/execution/listeners/b_program_runner_listener.py
bThink-BGU/Papers-2019-MDETools
ca425d6934ad04bc2c1ac2d524974cc92d7946b3
[ "MIT" ]
null
null
null
Z3/BPpy/execution/listeners/b_program_runner_listener.py
bThink-BGU/Papers-2019-MDETools
ca425d6934ad04bc2c1ac2d524974cc92d7946b3
[ "MIT" ]
1
2022-02-15T13:57:42.000Z
2022-02-15T13:57:42.000Z
Z3/BPpy/execution/listeners/b_program_runner_listener.py
bThink-BGU/Papers-2019-MDETools
ca425d6934ad04bc2c1ac2d524974cc92d7946b3
[ "MIT" ]
2
2020-03-22T15:49:03.000Z
2020-07-27T12:42:58.000Z
from abc import ABC, abstractmethod class BProgramRunnerListener(ABC): @abstractmethod def starting(self, b_program): pass @abstractmethod def started(self, b_program): pass @abstractmethod def super_step_done(self, b_program): pass @abstractmethod def ended(self, b_program): pass @abstractmethod def assertion_failed(self, b_program): pass @abstractmethod def b_thread_added(self, b_program): pass @abstractmethod def b_thread_removed(self, b_program): pass @abstractmethod def b_thread_done(self, b_program): pass @abstractmethod def event_selected(self, b_program, event): pass @abstractmethod def halted(self, b_program): pass
17.5
47
0.64472
89
805
5.606742
0.280899
0.340681
0.240481
0.288577
0.587174
0.587174
0.388778
0.240481
0
0
0
0
0.286957
805
45
48
17.888889
0.869338
0
0
0.625
0
0
0
0
0
0
0
0
0.03125
1
0.3125
false
0.3125
0.03125
0
0.375
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
29a7c6603bfb385124f30bbb5b54cd07f7b33157
78
py
Python
hydroDL/model/__init__.py
csiro-hydroinformatics/hydroDL
df3fa31ffbe30d17c228b7fdd13dd719f11827f4
[ "Unlicense" ]
109
2019-06-07T03:46:33.000Z
2022-03-28T11:03:23.000Z
hydroDL/model/__init__.py
csiro-hydroinformatics/hydroDL
df3fa31ffbe30d17c228b7fdd13dd719f11827f4
[ "Unlicense" ]
5
2019-07-12T14:01:48.000Z
2022-01-27T22:34:38.000Z
hydroDL/model/__init__.py
csiro-hydroinformatics/hydroDL
df3fa31ffbe30d17c228b7fdd13dd719f11827f4
[ "Unlicense" ]
80
2019-04-24T16:18:38.000Z
2022-03-27T23:00:02.000Z
from .train import trainModel, testModel from . import rnn from . import crit
19.5
40
0.782051
11
78
5.545455
0.636364
0.327869
0
0
0
0
0
0
0
0
0
0
0.166667
78
3
41
26
0.938462
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
4b2b1d6f8cee0641a45d22722027f13a8c378daa
17
py
Python
NoBrokerScrapingTest/99acresSingle.py
HousingHeat/property-heatmap
912cc532e0567769cb36417dae2a6296a7cd58ed
[ "MIT" ]
null
null
null
NoBrokerScrapingTest/99acresSingle.py
HousingHeat/property-heatmap
912cc532e0567769cb36417dae2a6296a7cd58ed
[ "MIT" ]
null
null
null
NoBrokerScrapingTest/99acresSingle.py
HousingHeat/property-heatmap
912cc532e0567769cb36417dae2a6296a7cd58ed
[ "MIT" ]
1
2020-12-08T05:37:37.000Z
2020-12-08T05:37:37.000Z
# JSON API used.
8.5
16
0.647059
3
17
3.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.235294
17
1
17
17
0.846154
0.823529
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
d99ddbe1c555212a0add3a4cab64b8ee46bdd07d
2,635
py
Python
sdk/python/pulumi_azure_native/certificateregistration/__init__.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/certificateregistration/__init__.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/certificateregistration/__init__.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from .. import _utilities import typing # Export this package's modules as members: from ._enums import * from .app_service_certificate_order import * from .app_service_certificate_order_certificate import * from .get_app_service_certificate_order import * from .get_app_service_certificate_order_certificate import * from ._inputs import * from . import outputs # Make subpackages available: if typing.TYPE_CHECKING: import pulumi_azure_native.certificateregistration.v20150801 as __v20150801 v20150801 = __v20150801 import pulumi_azure_native.certificateregistration.v20180201 as __v20180201 v20180201 = __v20180201 import pulumi_azure_native.certificateregistration.v20190801 as __v20190801 v20190801 = __v20190801 import pulumi_azure_native.certificateregistration.v20200601 as __v20200601 v20200601 = __v20200601 import pulumi_azure_native.certificateregistration.v20200901 as __v20200901 v20200901 = __v20200901 import pulumi_azure_native.certificateregistration.v20201001 as __v20201001 v20201001 = __v20201001 import pulumi_azure_native.certificateregistration.v20201201 as __v20201201 v20201201 = __v20201201 import pulumi_azure_native.certificateregistration.v20210101 as __v20210101 v20210101 = __v20210101 import pulumi_azure_native.certificateregistration.v20210115 as __v20210115 v20210115 = __v20210115 import pulumi_azure_native.certificateregistration.v20210201 as __v20210201 v20210201 = __v20210201 else: v20150801 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20150801') v20180201 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20180201') v20190801 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20190801') v20200601 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20200601') v20200901 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20200901') v20201001 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20201001') v20201201 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20201201') v20210101 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20210101') v20210115 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20210115') v20210201 = _utilities.lazy_import('pulumi_azure_native.certificateregistration.v20210201')
52.7
95
0.826565
275
2,635
7.483636
0.232727
0.116618
0.165209
0.223518
0.686103
0.686103
0.35277
0
0
0
0
0.206261
0.114991
2,635
49
96
53.77551
0.676244
0.087666
0
0
1
0
0.22111
0.22111
0
0
0
0
0
1
0
false
0
0.707317
0
0.707317
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
d9a21298c581a1c0166a53c4f28cb7676649dcc4
6,835
py
Python
elements/dialog.py
YannThorimbert/ThorPy
1950874a35118dfdbd797b914de3d80f141be245
[ "MIT" ]
27
2018-04-05T13:06:22.000Z
2022-01-24T08:14:47.000Z
venv/Lib/site-packages/thorpy/elements/dialog.py
tsnowh/GeneticPathFinder
f2b8db467f11185ca21e626770ab5d9b50d52e9e
[ "Apache-2.0" ]
6
2019-11-23T07:02:53.000Z
2021-04-12T19:08:35.000Z
venv/Lib/site-packages/thorpy/elements/dialog.py
tsnowh/GeneticPathFinder
f2b8db467f11185ca21e626770ab5d9b50d52e9e
[ "Apache-2.0" ]
7
2018-11-20T01:01:41.000Z
2022-01-24T08:14:50.000Z
"""This module provides (non)blocking alert and choices similar to the default ones in ThorPy, but the launched element is richer (title, hline...)""" import thorpy, pygame def make_textbox(title, text, font_size=None, font_color=None, ok_text="Ok", hline=0, elements=None): from thorpy.miscgui.launchers.launcher import make_ok_box els = [] if title: els += [thorpy.make_text(title, thorpy.style.TITLE_FONT_SIZE, (255,0,0))] if hline < 0: els += [thorpy.Line.make(e_title.get_size()[0],"h")] elif hline > 0: els += [thorpy.Line.make(hline,"h")] if text: els += [thorpy.make_text(text, font_size, font_color)] if elements is None: elements = [] els += elements box = make_ok_box(els, ok_text=ok_text) return box ##def make_choice(title, text, font_size=None, font_color=None, ok_text="Ok", ## cancel_text="Cancel"): ## from thorpy.miscgui.launchers.launcher import make_ok_cancel_box ## e_title = thorpy.make_text(title, thorpy.style.TITLE_FONT_SIZE, (255,0,0)) ## e_text = thorpy.make_text(text, font_size, font_color) ## box = make_ok_cancel_box([e_title,e_text], ok_text=ok_text, ## cancel_text=cancel_text) ## return box def launch_blocking_alert(title, text, parent=None, font_size=None, font_color=None, ok_text="Ok", transp=False, alpha_dialog=200, func=None, outside_click_quit=False): if font_size is None: font_size = thorpy.style.FONT_SIZE if font_color is None: font_color = thorpy.style.FONT_COLOR box_alert = make_textbox(title, text, font_size, font_color, ok_text) box_alert.center() if transp: color_transp = tuple(list(thorpy.style.DEF_COLOR)[:3]+[alpha_dialog]) box_alert.set_main_color(color_transp) def click_quit(e): if not box_alert.get_fus_rect().collidepoint(e.pos): thorpy.functions.quit_menu_func() if outside_click_quit: box_alert.add_reaction(thorpy.Reaction(pygame.MOUSEBUTTONDOWN, click_quit)) from thorpy.menus.tickedmenu import TickedMenu m = TickedMenu(box_alert) box_alert.get_elements_by_text(ok_text)[0].user_func = thorpy.functions.quit_menu_func box_alert.get_elements_by_text(ok_text)[0].user_params = {} m.play() box_alert.unblit() if parent: parent.partial_blit(None, box_alert.get_fus_rect()) box_alert.update() if func: func() def launch_blocking_choices(text, choices, parent=None, title_fontsize=None, title_fontcolor=None, func=None): """choices are tuple (text,func)""" if title_fontsize is None: title_fontsize = thorpy.style.FONT_SIZE if title_fontcolor is None: title_fontcolor = thorpy.style.FONT_COLOR elements = [thorpy.make_button(t,f) for t,f in choices] ghost = thorpy.make_group(elements) e_text = thorpy.make_text(text, title_fontsize, title_fontcolor) box = thorpy.Box.make([e_text, ghost]) box.center() from thorpy.miscgui.reaction import ConstantReaction for e in elements: reac = ConstantReaction(thorpy.constants.THORPY_EVENT, thorpy.functions.quit_menu_func, {"id":thorpy.constants.EVENT_UNPRESS, "el":e}) box.add_reaction(reac) from thorpy.menus.tickedmenu import TickedMenu m = TickedMenu(box) m.play() box.unblit() if parent: parent.partial_blit(None, box.get_fus_rect()) box.update() if func: func() def launch_blocking_choices_str(text, choices, parent=None, title_fontsize=None, title_fontcolor=None, func=None, store="v"): """choices are tuple (text,func)""" if title_fontsize is None: title_fontsize = thorpy.style.FONT_SIZE if title_fontcolor is None: title_fontcolor = thorpy.style.FONT_COLOR class Choice: value = None def choice_func(value): Choice.value = value elements = [] for name in choices: e = thorpy.make_button(name, choice_func, {"value":name}) elements.append(e) ghost = thorpy.make_group(elements, mode=store) e_text = thorpy.make_text(text, title_fontsize, title_fontcolor) box = thorpy.Box.make([e_text, thorpy.Line(100,"h"), ghost]) box.center() from thorpy.miscgui.reaction import ConstantReaction for e in elements: reac = ConstantReaction(thorpy.constants.THORPY_EVENT, thorpy.functions.quit_menu_func, {"id":thorpy.constants.EVENT_UNPRESS, "el":e}) box.add_reaction(reac) def click_outside(e): if not box.get_fus_rect().collidepoint(e.pos): thorpy.functions.quit_menu_func() reac = thorpy.Reaction(pygame.MOUSEBUTTONDOWN, click_outside) box.add_reaction(reac) from thorpy.menus.tickedmenu import TickedMenu m = TickedMenu(box) m.play() box.unblit() if parent: parent.partial_blit(None, box.get_fus_rect()) box.update() if func: func() return Choice.value def launch_nonblocking_alert(title, text, parent=None, font_size=None, font_color=None, ok_text="Ok", transp=False, alpha_dialog=200, func=None): if font_size is None: font_size = thorpy.style.FONT_SIZE if font_color is None: font_color = thorpy.style.FONT_COLOR box_alert = make_textbox(title, text, font_size, font_color, ok_text) box_alert.center() if transp: color_transp = tuple(list(thorpy.style.DEF_COLOR)[:3]+[alpha_dialog]) box_alert.set_main_color(color_transp) thorpy.launch_nonblocking(box_alert) def launch_nonblocking_choices(text, choices, parent=None, title_fontsize=None, title_fontcolor=None, func=None): """choices are tuple (text,func)""" from thorpy.miscgui.launchers.launcher import post_done if title_fontsize is None: title_fontsize = thorpy.style.FONT_SIZE if title_fontcolor is None: title_fontcolor = thorpy.style.FONT_COLOR elements = [thorpy.make_button(t,f) for t,f in choices] ghost = thorpy.make_group(elements) e_text = thorpy.make_text(text, title_fontsize, title_fontcolor) box = thorpy.Box.make([e_text, ghost]) box.center() from thorpy.miscgui.reaction import ConstantReaction for e in elements: reac = ConstantReaction(thorpy.constants.THORPY_EVENT, post_done, {"id":thorpy.constants.EVENT_UNPRESS, "el":e}, {"el":box}) box.add_reaction(reac) thorpy.launch_nonblocking(box)
42.987421
90
0.653109
908
6,835
4.682819
0.129956
0.035748
0.035278
0.021167
0.802446
0.765287
0.732832
0.725306
0.664864
0.651929
0
0.00521
0.241843
6,835
158
91
43.259494
0.815322
0.100219
0
0.577778
0
0
0.004743
0
0
0
0
0
0
1
0.066667
false
0
0.066667
0
0.162963
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d9acb0623bdbf0decb18b129db90a1b6520ad6b8
83
py
Python
lib/pyexcel_io/database/__init__.py
logice/QQ-Groups-Spider
a161282c6832ed40183905e96205edb5a57e8a05
[ "MIT" ]
null
null
null
lib/pyexcel_io/database/__init__.py
logice/QQ-Groups-Spider
a161282c6832ed40183905e96205edb5a57e8a05
[ "MIT" ]
null
null
null
lib/pyexcel_io/database/__init__.py
logice/QQ-Groups-Spider
a161282c6832ed40183905e96205edb5a57e8a05
[ "MIT" ]
1
2021-04-12T07:48:42.000Z
2021-04-12T07:48:42.000Z
from . import django from . import sql exports = django.exports + sql.exports
16.6
39
0.710843
11
83
5.363636
0.454545
0.338983
0
0
0
0
0
0
0
0
0
0
0.216867
83
4
40
20.75
0.907692
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
d9b3cdfe590eb1694da93083fabbf0a9d11c1d87
294
py
Python
config/user_config.py
amirasaad/codejam-commandline
f49eccee781358a0af61ad85862147cf065ded63
[ "Apache-2.0" ]
4
2016-04-11T08:53:54.000Z
2017-04-08T21:22:02.000Z
config/user_config.py
amirasaad/codejam-commandline
f49eccee781358a0af61ad85862147cf065ded63
[ "Apache-2.0" ]
1
2018-01-12T09:51:07.000Z
2018-01-14T17:13:02.000Z
config/user_config.py
amirasaad/codejam-commandline
f49eccee781358a0af61ad85862147cf065ded63
[ "Apache-2.0" ]
13
2017-01-12T11:13:40.000Z
2019-04-19T10:02:34.000Z
# -*- coding: utf-8 -*- { 'host' : 'code.google.com', 'user' : 'your-name-here@gmail.com', 'data_directory' : './source', 'input_name_format' : '{problem}-{input}-{id}.in', 'output_name_format' : '{problem}-{input}-{id}.out', 'source_names_format' : [], }
29.4
53
0.537415
33
294
4.575758
0.69697
0.13245
0.225166
0.291391
0.317881
0
0
0
0
0
0
0.004329
0.214286
294
9
54
32.666667
0.649351
0.071429
0
0
0
0
0.642066
0.276753
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
8a0697e0d37a0d5842d5387e2e4bb544cc07023d
79
py
Python
cride/users/models.py
mdark1001/crideApiRest
228efec90d7f1ad8a6766b5a8085dd6bbf49fc8a
[ "MIT" ]
null
null
null
cride/users/models.py
mdark1001/crideApiRest
228efec90d7f1ad8a6766b5a8085dd6bbf49fc8a
[ "MIT" ]
null
null
null
cride/users/models.py
mdark1001/crideApiRest
228efec90d7f1ad8a6766b5a8085dd6bbf49fc8a
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. from . import models
13.166667
28
0.759494
12
79
5
0.666667
0.4
0
0
0
0
0
0
0
0
0
0
0.189873
79
5
29
15.8
0.9375
0.303797
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
8a0b8422cc53566b04b06abd49a9886f0c6999a5
195
py
Python
Leetcode/0883. Projection Area of 3D Shapes/0883.py
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
Leetcode/0883. Projection Area of 3D Shapes/0883.py
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
Leetcode/0883. Projection Area of 3D Shapes/0883.py
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
class Solution: def projectionArea(self, grid: List[List[int]]) -> int: return sum(a > 0 for row in grid for a in row) + sum(max(row) for row in grid) + sum(max(col) for col in zip(*grid))
48.75
120
0.661538
37
195
3.486486
0.486486
0.093023
0.124031
0.186047
0
0
0
0
0
0
0
0.006369
0.194872
195
3
121
65
0.815287
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
8a53a9fd4909cd44d51c25de872866e9b758ffa1
665
py
Python
sdk/python/pulumi_azure/streamanalytics/__init__.py
suresh198526/pulumi-azure
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure/streamanalytics/__init__.py
suresh198526/pulumi-azure
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure/streamanalytics/__init__.py
suresh198526/pulumi-azure
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** # Export this package's modules as members: from .function_java_script_udf import * from .get_job import * from .job import * from .output_blob import * from .output_event_hub import * from .output_mssql import * from .output_service_bus_queue import * from .output_servicebus_topic import * from .reference_input_blob import * from .stream_input_blob import * from .stream_input_event_hub import * from .stream_input_iot_hub import * from ._inputs import * from . import outputs
33.25
87
0.772932
102
665
4.803922
0.558824
0.265306
0.163265
0.128571
0.112245
0.112245
0
0
0
0
0
0.001764
0.147368
665
19
88
35
0.862434
0.329323
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8a5ab3f42fd85be79a1618cd6587e413df9ae72a
177
py
Python
backend/__init__.py
AzoeDesarrollos/SchematicStarSystemViewer
2356dcfe12c88d0992be919348f8c06a7e7257e4
[ "MIT" ]
null
null
null
backend/__init__.py
AzoeDesarrollos/SchematicStarSystemViewer
2356dcfe12c88d0992be919348f8c06a7e7257e4
[ "MIT" ]
10
2021-11-22T05:24:17.000Z
2021-12-08T00:04:47.000Z
backend/__init__.py
AzoeDesarrollos/SchematicStarSystemViewer
2356dcfe12c88d0992be919348f8c06a7e7257e4
[ "MIT" ]
null
null
null
from .widget_handler import WidgetHandler from .eventhandler import EventHandler from .contants import WIDTH, HEIGHT from .renderer import Renderer from .util import salir
29.5
42
0.819209
22
177
6.545455
0.545455
0
0
0
0
0
0
0
0
0
0
0
0.146893
177
5
43
35.4
0.953642
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8a5e85431e06aa07754f9f0631459c9d433d426c
88
py
Python
examples/batch_example.py
knowsuchagency/composer
b422ed4048b4d421e5100ea1770cbed37c4fb158
[ "MIT" ]
37
2021-05-24T22:34:59.000Z
2022-02-22T04:47:06.000Z
examples/batch_example.py
knowsuchagency/composer
b422ed4048b4d421e5100ea1770cbed37c4fb158
[ "MIT" ]
21
2021-05-26T09:14:05.000Z
2021-06-15T08:08:55.000Z
examples/batch_example.py
knowsuchagency/composer
b422ed4048b4d421e5100ea1770cbed37c4fb158
[ "MIT" ]
2
2021-06-22T09:51:39.000Z
2022-01-28T20:00:30.000Z
from orkestra import compose @compose def banana(event, context): return "banana"
12.571429
28
0.738636
11
88
5.909091
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.181818
88
6
29
14.666667
0.902778
0
0
0
0
0
0.068182
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
8a7a10ac8a9b71585a2925790586909fbe2edcb1
176
py
Python
twocode/parser/__init__.py
MrCoft/twocode
741620a2abab003edbabb60533c54a2d9bc55b7e
[ "MIT" ]
null
null
null
twocode/parser/__init__.py
MrCoft/twocode
741620a2abab003edbabb60533c54a2d9bc55b7e
[ "MIT" ]
20
2020-05-25T18:38:47.000Z
2020-06-12T23:14:32.000Z
twocode/parser/__init__.py
MrCoft/twocode
741620a2abab003edbabb60533c54a2d9bc55b7e
[ "MIT" ]
null
null
null
from .lexer import LexLanguage, Token from .grammar import Grammar from .parser import Parser, IncrementalParser from .context import Context from .console import Console
29.333333
46
0.806818
22
176
6.454545
0.454545
0
0
0
0
0
0
0
0
0
0
0
0.153409
176
5
47
35.2
0.95302
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8a9f5f825c7ab423dc26d67bb62aaba6691b621e
31,405
py
Python
tensorflow/contrib/slim/python/slim/learning_test.py
toptaldev92/tensorflow
1fd1f65d1b0896149e44a1f105267c27994010d9
[ "Apache-2.0" ]
null
null
null
tensorflow/contrib/slim/python/slim/learning_test.py
toptaldev92/tensorflow
1fd1f65d1b0896149e44a1f105267c27994010d9
[ "Apache-2.0" ]
null
null
null
tensorflow/contrib/slim/python/slim/learning_test.py
toptaldev92/tensorflow
1fd1f65d1b0896149e44a1f105267c27994010d9
[ "Apache-2.0" ]
1
2021-04-22T09:17:52.000Z
2021-04-22T09:17:52.000Z
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for slim.learning.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from numpy import testing as np_testing import tensorflow as tf slim = tf.contrib.slim class ClipGradientNormsTest(tf.test.TestCase): def clip_values(self, arr): norm = np.sqrt(np.sum(arr**2)) if norm > self._max_norm: return self._max_norm * arr / np.sqrt(np.sum(arr**2)) return arr def setUp(self): np.random.seed(0) self._max_norm = 1.0 self._grad_vec = np.array([1., 2., 3.]) self._clipped_grad_vec = self.clip_values(self._grad_vec) self._zero_vec = np.zeros(self._grad_vec.size) def testOrdinaryGradIsClippedCorrectly(self): gradient = tf.constant(self._grad_vec, dtype=tf.float32) variable = tf.Variable(self._zero_vec, dtype=tf.float32) gradients_to_variables = (gradient, variable) [gradients_to_variables] = slim.learning.clip_gradient_norms( [gradients_to_variables], self._max_norm) # Ensure the variable passed through. self.assertEqual(gradients_to_variables[1], variable) with self.test_session() as sess: actual_gradient = sess.run(gradients_to_variables[0]) np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec) def testNoneGradPassesThroughCorrectly(self): gradient = None variable = tf.Variable(self._zero_vec, dtype=tf.float32) gradients_to_variables = (gradient, variable) [gradients_to_variables] = slim.learning.clip_gradient_norms( [gradients_to_variables], self._max_norm) self.assertEqual(gradients_to_variables[0], None) self.assertEqual(gradients_to_variables[1], variable) def testIndexedSlicesGradIsClippedCorrectly(self): sparse_grad_indices = np.array([0, 1, 4]) sparse_grad_dense_shape = [self._grad_vec.size] values = tf.constant(self._grad_vec, dtype=tf.float32) indices = tf.constant(sparse_grad_indices, dtype=tf.int32) dense_shape = tf.constant(sparse_grad_dense_shape, dtype=tf.int32) gradient = tf.IndexedSlices(values, indices, dense_shape) variable = tf.Variable(self._zero_vec, dtype=tf.float32) gradients_to_variables = (gradient, variable) gradients_to_variables = slim.learning.clip_gradient_norms( [gradients_to_variables], self._max_norm)[0] # Ensure the built IndexedSlice has the right form. self.assertEqual(gradients_to_variables[1], variable) self.assertEqual(gradients_to_variables[0].indices, indices) self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape) with tf.Session() as sess: actual_gradient = sess.run(gradients_to_variables[0].values) np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec) class MultiplyGradientsTest(tf.test.TestCase): def setUp(self): np.random.seed(0) self._multiplier = 3.7 self._grad_vec = np.array([1., 2., 3.]) self._multiplied_grad_vec = np.multiply(self._grad_vec, self._multiplier) def testNonListGradsRaisesError(self): gradient = tf.constant(self._grad_vec, dtype=tf.float32) variable = tf.Variable(tf.zeros_like(gradient)) grad_to_var = (gradient, variable) gradient_multipliers = {variable: self._multiplier} with self.assertRaises(ValueError): slim.learning.multiply_gradients(grad_to_var, gradient_multipliers) def testEmptyMultiplesRaisesError(self): gradient = tf.constant(self._grad_vec, dtype=tf.float32) variable = tf.Variable(tf.zeros_like(gradient)) grad_to_var = (gradient, variable) with self.assertRaises(ValueError): slim.learning.multiply_gradients([grad_to_var], {}) def testNonDictMultiplierRaisesError(self): gradient = tf.constant(self._grad_vec, dtype=tf.float32) variable = tf.Variable(tf.zeros_like(gradient)) grad_to_var = (gradient, variable) with self.assertRaises(ValueError): slim.learning.multiply_gradients([grad_to_var], 3) def testMultipleOfNoneGradRaisesError(self): gradient = tf.constant(self._grad_vec, dtype=tf.float32) variable = tf.Variable(tf.zeros_like(gradient)) grad_to_var = (None, variable) gradient_multipliers = {variable: self._multiplier} with self.assertRaises(ValueError): slim.learning.multiply_gradients(grad_to_var, gradient_multipliers) def testMultipleGradientsWithVariables(self): gradient = tf.constant(self._grad_vec, dtype=tf.float32) variable = tf.Variable(tf.zeros_like(gradient)) grad_to_var = (gradient, variable) gradient_multipliers = {variable: self._multiplier} [grad_to_var] = slim.learning.multiply_gradients( [grad_to_var], gradient_multipliers) # Ensure the variable passed through. self.assertEqual(grad_to_var[1], variable) with self.test_session() as sess: actual_gradient = sess.run(grad_to_var[0]) np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec, 5) def testIndexedSlicesGradIsMultiplied(self): values = tf.constant(self._grad_vec, dtype=tf.float32) indices = tf.constant([0, 1, 2], dtype=tf.int32) dense_shape = tf.constant([self._grad_vec.size], dtype=tf.int32) gradient = tf.IndexedSlices(values, indices, dense_shape) variable = tf.Variable(tf.zeros((1, 3))) grad_to_var = (gradient, variable) gradient_multipliers = {variable: self._multiplier} [grad_to_var] = slim.learning.multiply_gradients( [grad_to_var], gradient_multipliers) # Ensure the built IndexedSlice has the right form. self.assertEqual(grad_to_var[1], variable) self.assertEqual(grad_to_var[0].indices, indices) self.assertEqual(grad_to_var[0].dense_shape, dense_shape) with self.test_session() as sess: actual_gradient = sess.run(grad_to_var[0].values) np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec, 5) def LogisticClassifier(inputs): return slim.fully_connected( inputs, 1, activation_fn=tf.sigmoid) def BatchNormClassifier(inputs): inputs = slim.batch_norm(inputs, decay=0.1) return slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid) class TrainBNClassifierTest(tf.test.TestCase): def setUp(self): # Create an easy training set: np.random.seed(0) self._inputs = np.zeros((16, 4)) self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32) self._logdir = os.path.join(self.get_temp_dir(), 'tmp_bnlogs/') for i in range(16): j = int(2 * self._labels[i] + np.random.randint(0, 2)) self._inputs[i, j] = 1 def testTrainWithNoInitAssignCanAchieveZeroLoss(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = BatchNormClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op( total_loss, optimizer) loss = slim.learning.train( train_op, self._logdir, number_of_steps=300, log_every_n_steps=10) self.assertLess(loss, .1) class CreateTrainOpTest(tf.test.TestCase): def setUp(self): # Create an easy training set: np.random.seed(0) self._inputs = np.random.rand(16, 4).astype(np.float32) self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32) def testUseUpdateOps(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) expected_mean = np.mean(self._inputs, axis=(0)) expected_var = np.var(self._inputs, axis=(0)) tf_predictions = BatchNormClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0] moving_variance = tf.contrib.framework.get_variables_by_name( 'moving_variance')[0] with tf.Session() as sess: # Initialize all variables sess.run(tf.initialize_all_variables()) mean, variance = sess.run([moving_mean, moving_variance]) # After initialization moving_mean == 0 and moving_variance == 1. self.assertAllClose(mean, [0] * 4) self.assertAllClose(variance, [1] * 4) for _ in range(10): sess.run([train_op]) mean = moving_mean.eval() variance = moving_variance.eval() # After 10 updates with decay 0.1 moving_mean == expected_mean and # moving_variance == expected_var. self.assertAllClose(mean, expected_mean) self.assertAllClose(variance, expected_var) def testEmptyUpdateOps(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = BatchNormClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer, update_ops=[]) moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0] moving_variance = tf.contrib.framework.get_variables_by_name( 'moving_variance')[0] with tf.Session() as sess: # Initialize all variables sess.run(tf.initialize_all_variables()) mean, variance = sess.run([moving_mean, moving_variance]) # After initialization moving_mean == 0 and moving_variance == 1. self.assertAllClose(mean, [0] * 4) self.assertAllClose(variance, [1] * 4) for _ in range(10): sess.run([train_op]) mean = moving_mean.eval() variance = moving_variance.eval() # Since we skip update_ops the moving_vars are not updated. self.assertAllClose(mean, [0] * 4) self.assertAllClose(variance, [1] * 4) def testRecordTrainOpInCollection(self): with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) # Make sure the training op was recorded in the proper collection self.assertTrue(train_op in tf.get_collection(tf.GraphKeys.TRAIN_OP)) class TrainTest(tf.test.TestCase): def setUp(self): # Create an easy training set: np.random.seed(0) self._inputs = np.zeros((16, 4)) self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32) self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs/') # To make sure one test doesnt interfere with another: if tf.gfile.Exists(self._logdir): tf.gfile.DeleteRecursively(self._logdir) for i in range(16): j = int(2 * self._labels[i] + np.random.randint(0, 2)) self._inputs[i, j] = 1 def testTrainWithNonDefaultGraph(self): self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs8/') g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) loss = slim.learning.train( train_op, self._logdir, number_of_steps=300, log_every_n_steps=10, graph=g) self.assertIsNotNone(loss) self.assertLess(loss, .015) def testTrainWithNoneAsLogdir(self): with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) loss = slim.learning.train( train_op, None, number_of_steps=300, log_every_n_steps=10) self.assertIsNotNone(loss) self.assertLess(loss, .015) def testTrainWithSessionConfig(self): with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) session_config = tf.ConfigProto(allow_soft_placement=True) loss = slim.learning.train( train_op, None, number_of_steps=300, log_every_n_steps=10, session_config=session_config) self.assertIsNotNone(loss) self.assertLess(loss, .015) def testTrainWithTrace(self): with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() tf.scalar_summary('total_loss', total_loss) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) loss = slim.learning.train( train_op, self._logdir, number_of_steps=300, log_every_n_steps=10, trace_every_n_steps=100) self.assertIsNotNone(loss) for trace_step in [1, 101, 201]: trace_filename = 'tf_trace-%d.json' % trace_step self.assertTrue( os.path.isfile(os.path.join(self._logdir, trace_filename))) def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self): with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() tf.scalar_summary('total_loss', total_loss) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) summary_op = tf.merge_all_summaries() with self.assertRaises(ValueError): slim.learning.train( train_op, None, number_of_steps=300, summary_op=summary_op) def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self): with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) with self.assertRaises(ValueError): slim.learning.train( train_op, None, number_of_steps=300, trace_every_n_steps=10) def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self): self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs_/') with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) saver = tf.train.Saver() with self.assertRaises(ValueError): slim.learning.train( train_op, None, init_op=None, number_of_steps=300, saver=saver) def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self): self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs_/') with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op( total_loss, optimizer) with self.assertRaises(RuntimeError): slim.learning.train( train_op, self._logdir, init_op=None, number_of_steps=300) def testTrainWithNoInitAssignCanAchieveZeroLoss(self): with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) loss = slim.learning.train( train_op, self._logdir, number_of_steps=300, log_every_n_steps=10) self.assertIsNotNone(loss) self.assertLess(loss, .015) def testTrainWithLocalVariable(self): with tf.Graph().as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) local_multiplier = slim.local_variable(1.0) tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op( total_loss, optimizer) loss = slim.learning.train( train_op, self._logdir, number_of_steps=300, log_every_n_steps=10) self.assertIsNotNone(loss) self.assertLess(loss, .015) def testResumeTrainAchievesRoughlyTheSameLoss(self): number_of_steps = [300, 301, 305] for i in range(len(number_of_steps)): with tf.Graph().as_default(): tf.set_random_seed(i) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op( total_loss, optimizer) loss = slim.learning.train( train_op, self._logdir, number_of_steps=number_of_steps[i], log_every_n_steps=10) self.assertIsNotNone(loss) self.assertLess(loss, .015) def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0): tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) total_loss = slim.losses.get_total_loss() optimizer = tf.train.GradientDescentOptimizer( learning_rate=learning_rate) if gradient_multiplier != 1.0: variables = tf.trainable_variables() gradient_multipliers = {var: gradient_multiplier for var in variables} else: gradient_multipliers = None return slim.learning.create_train_op( total_loss, optimizer, gradient_multipliers=gradient_multipliers) def testTrainWithInitFromCheckpoint(self): logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/') logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/') if tf.gfile.Exists(logdir1): # For running on jenkins. tf.gfile.DeleteRecursively(logdir1) if tf.gfile.Exists(logdir2): # For running on jenkins. tf.gfile.DeleteRecursively(logdir2) # First, train the model one step (make sure the error is high). with tf.Graph().as_default(): tf.set_random_seed(0) train_op = self.create_train_op() loss = slim.learning.train( train_op, logdir1, number_of_steps=1) self.assertGreater(loss, .5) # Next, train the model to convergence. with tf.Graph().as_default(): tf.set_random_seed(1) train_op = self.create_train_op() loss = slim.learning.train( train_op, logdir1, number_of_steps=300, log_every_n_steps=10) self.assertIsNotNone(loss) self.assertLess(loss, .02) # Finally, advance the model a single step and validate that the loss is # still low. with tf.Graph().as_default(): tf.set_random_seed(2) train_op = self.create_train_op() model_variables = tf.all_variables() model_path = os.path.join(logdir1, 'model.ckpt-300') init_op = tf.initialize_all_variables() op, init_feed_dict = slim.assign_from_checkpoint( model_path, model_variables) def InitAssignFn(sess): sess.run(op, init_feed_dict) loss = slim.learning.train( train_op, logdir2, number_of_steps=1, init_op=init_op, init_fn=InitAssignFn) self.assertIsNotNone(loss) self.assertLess(loss, .02) def testTrainWithInitFromFn(self): logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs4/') logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs5/') if tf.gfile.Exists(logdir1): # For running on jenkins. tf.gfile.DeleteRecursively(logdir1) if tf.gfile.Exists(logdir2): # For running on jenkins. tf.gfile.DeleteRecursively(logdir2) # First, train the model one step (make sure the error is high). with tf.Graph().as_default(): tf.set_random_seed(0) train_op = self.create_train_op() loss = slim.learning.train( train_op, logdir1, number_of_steps=1) self.assertGreater(loss, .5) # Next, train the model to convergence. with tf.Graph().as_default(): tf.set_random_seed(1) train_op = self.create_train_op() loss = slim.learning.train( train_op, logdir1, number_of_steps=300, log_every_n_steps=10) self.assertIsNotNone(loss) self.assertLess(loss, .015) # Finally, advance the model a single step and validate that the loss is # still low. with tf.Graph().as_default(): tf.set_random_seed(2) train_op = self.create_train_op() model_variables = tf.all_variables() model_path = os.path.join(logdir1, 'model.ckpt-300') saver = tf.train.Saver(model_variables) def RestoreFn(sess): saver.restore(sess, model_path) loss = slim.learning.train( train_op, logdir2, number_of_steps=1, init_fn=RestoreFn) self.assertIsNotNone(loss) self.assertLess(loss, .015) def ModelLoss(self): tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) tf_predictions = LogisticClassifier(tf_inputs) slim.losses.log_loss(tf_predictions, tf_labels) return slim.losses.get_total_loss() def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self): logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs3/') if tf.gfile.Exists(logdir1): # For running on jenkins. tf.gfile.DeleteRecursively(logdir1) # First, train only the weights of the model. with tf.Graph().as_default(): tf.set_random_seed(0) total_loss = self.ModelLoss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) weights = slim.get_variables_by_name('weights') train_op = slim.learning.create_train_op( total_loss, optimizer, variables_to_train=weights) loss = slim.learning.train( train_op, logdir1, number_of_steps=200, log_every_n_steps=10) self.assertGreater(loss, .015) self.assertLess(loss, .05) # Next, train the biases of the model. with tf.Graph().as_default(): tf.set_random_seed(1) total_loss = self.ModelLoss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) biases = slim.get_variables_by_name('biases') train_op = slim.learning.create_train_op( total_loss, optimizer, variables_to_train=biases) loss = slim.learning.train( train_op, logdir1, number_of_steps=300, log_every_n_steps=10) self.assertGreater(loss, .015) self.assertLess(loss, .05) # Finally, train both weights and bias to get lower loss. with tf.Graph().as_default(): tf.set_random_seed(2) total_loss = self.ModelLoss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) train_op = slim.learning.create_train_op(total_loss, optimizer) loss = slim.learning.train( train_op, logdir1, number_of_steps=400, log_every_n_steps=10) self.assertIsNotNone(loss) self.assertLess(loss, .015) def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self): # First, train only the weights of the model. with tf.Graph().as_default(): tf.set_random_seed(0) total_loss = self.ModelLoss() optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) weights, biases = slim.get_variables() train_op = slim.learning.create_train_op(total_loss, optimizer) train_weights = slim.learning.create_train_op( total_loss, optimizer, variables_to_train=[weights]) train_biases = slim.learning.create_train_op( total_loss, optimizer, variables_to_train=[biases]) with tf.Session() as sess: # Initialize the variables. sess.run(tf.initialize_all_variables()) # Get the intial weights and biases values. weights_values, biases_values = sess.run([weights, biases]) self.assertGreater(np.linalg.norm(weights_values), 0) self.assertAlmostEqual(np.linalg.norm(biases_values), 0) # Update weights and biases. loss = sess.run(train_op) self.assertGreater(loss, .5) new_weights, new_biases = sess.run([weights, biases]) # Check that the weights and biases have been updated. self.assertGreater(np.linalg.norm(weights_values - new_weights), 0) self.assertGreater(np.linalg.norm(biases_values - new_biases), 0) weights_values, biases_values = new_weights, new_biases # Update only weights. loss = sess.run(train_weights) self.assertGreater(loss, .5) new_weights, new_biases = sess.run([weights, biases]) # Check that the weights have been updated, but biases have not. self.assertGreater(np.linalg.norm(weights_values - new_weights), 0) self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0) weights_values = new_weights # Update only biases. loss = sess.run(train_biases) self.assertGreater(loss, .5) new_weights, new_biases = sess.run([weights, biases]) # Check that the biases have been updated, but weights have not. self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0) self.assertGreater(np.linalg.norm(biases_values - new_biases), 0) def testTrainWithAlteredGradients(self): # Use the same learning rate but different gradient multipliers # to train two models. Model with equivalently larger learning # rate (i.e., learning_rate * gradient_multiplier) has smaller # training loss. logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs6/') logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs7/') if tf.gfile.Exists(logdir1): # For running on jenkins. tf.gfile.DeleteRecursively(logdir1) if tf.gfile.Exists(logdir2): # For running on jenkins. tf.gfile.DeleteRecursively(logdir2) multipliers = [1., 1000.] number_of_steps = 10 losses = [] learning_rate = 0.001 # First, train the model with equivalently smaller learning rate. with tf.Graph().as_default(): tf.set_random_seed(0) train_op = self.create_train_op( learning_rate=learning_rate, gradient_multiplier=multipliers[0]) loss = slim.learning.train( train_op, logdir1, number_of_steps=number_of_steps) losses.append(loss) self.assertGreater(loss, .5) # Second, train the model with equivalently larger learning rate. with tf.Graph().as_default(): tf.set_random_seed(0) train_op = self.create_train_op( learning_rate=learning_rate, gradient_multiplier=multipliers[1]) loss = slim.learning.train( train_op, logdir2, number_of_steps=number_of_steps) losses.append(loss) self.assertIsNotNone(loss) self.assertLess(loss, .5) # The loss of the model trained with larger learning rate should # be smaller. self.assertGreater(losses[0], losses[1]) if __name__ == '__main__': tf.test.main()
36.903643
80
0.698774
4,113
31,405
5.086069
0.089959
0.028778
0.030116
0.024475
0.782638
0.770448
0.751757
0.728907
0.714327
0.703762
0
0.020472
0.1943
31,405
850
81
36.947059
0.806268
0.089922
0
0.69967
0
0
0.009014
0
0
0
0
0
0.127063
1
0.067657
false
0.00165
0.011551
0.00165
0.09736
0.00165
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8abadaf314b0ef1eead3d9e63d37881dea39ff02
80,278
py
Python
src/wrf/computation.py
khallock/wrf-python
9c5825c101722e7eddece2ca13cc8e9d9f96a21e
[ "Apache-2.0" ]
1
2018-10-30T18:06:26.000Z
2018-10-30T18:06:26.000Z
src/wrf/computation.py
mostamndi/wrf-python
3806bcdd01b31fa67da980eafefa0d1245faf6a6
[ "Apache-2.0" ]
null
null
null
src/wrf/computation.py
mostamndi/wrf-python
3806bcdd01b31fa67da980eafefa0d1245faf6a6
[ "Apache-2.0" ]
null
null
null
from __future__ import (absolute_import, division, print_function) import numpy as np import numpy.ma as ma from .constants import default_fill from .extension import (_interpz3d, _interp2dxy, _interp1d, _slp, _tk, _td, _rh, _uvmet, _smooth2d, _cape, _cloudfrac, _ctt, _dbz, _srhel, _udhel, _avo, _pvo, _eth, _wetbulb, _tv, _omega, _pw) from .decorators import convert_units from .metadecorators import (set_alg_metadata, set_uvmet_alg_metadata, set_interp_metadata, set_cape_alg_metadata, set_cloudfrac_alg_metadata, set_smooth_metdata) from .interputils import get_xy @set_interp_metadata("xy") def xy(field, pivot_point=None, angle=None, start_point=None, end_point=None, meta=True): """Return the x,y points for a line within a two-dimensional grid. This function is primarily used to obtain the x,y points when making a cross section. Args: field (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A field with at least two dimensions. pivot_point (:obj:`tuple` or :obj:`list`, optional): A :obj:`tuple` or :obj:`list` with two entries, in the form of [x, y] (or [west_east, south_north]), which indicates the x,y location through which the plane will pass. Must also specify `angle`. angle (:obj:`float`, optional): Only valid for cross sections where a plane will be plotted through a given point on the model domain. 0.0 represents a S-N cross section. 90.0 is a W-E cross section. start_point (:obj:`tuple` or :obj:`list`, optional): A :obj:`tuple` or :obj:`list` with two entries, in the form of [x, y] (or [west_east, south_north]), which indicates the start x,y location through which the plane will pass. end_point (:obj:`tuple` or :obj:`list`, optional): A :obj:`tuple` or :obj:`list` with two entries, in the form of [x, y] (or [west_east, south_north]), which indicates the end x,y location through which the plane will pass. meta (:obj:`bool`, optional): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: An array of x,y points, which has shape num_points x 2. If xarray is enabled and the *meta* parameter is True, then the result will be a :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. Examples: Example 1: Using Pivot Point and Angle .. code-block:: python from wrf import getvar, xy from netCDF4 import Dataset wrfnc = Dataset("wrfout_d02_2010-06-13_21:00:00") field = wrf.getvar(wrfnc, "slp") # Use the center of the grid pivot = (field.shape[-1]/2.0, field.shape[-2]/2.0) # West-East angle = 90.0 xy_points = xy(field, pivot_point=pivot, angle=angle) Example 2: Using Start Point and End Point .. code-block:: python from wrf import getvar, xy from netCDF4 import Dataset wrfnc = Dataset("wrfout_d02_2010-06-13_21:00:00") field = wrf.getvar(wrfnc, "slp") # Make a diagonal of lower left to upper right start = (0, 0) end = (-1, -1) xy_points = xy(field, start_point=start, end_point=end) """ return get_xy(field, pivot_point, angle, start_point, end_point) @set_interp_metadata("1d") def interp1d(field, z_in, z_out, missing=default_fill(np.float64), meta=True): """Return the linear interpolation of a one-dimensional variable. This function is typically used to interpolate a variable in a vertical column, but the coordinate system need not be a vertical coordinate system. Multiple interpolation points may be specified in the *z_out* parameter. Args: field (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A one-dimensional field. Metadata for *field* is only copied to the output if *field* is a :class:`xarray.DataArray` object. z_in (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The one-dimensional coordinates associated with *field* (usually the vertical coordinates, either height or pressure). z_out (:class:`xarray.DataArray`, :class:`numpy.ndarray`): A one-dimensional array of *z_in* coordinate points to interpolate to. Must be the same type as *z_in*. missing (:obj:`float`, optional): The fill value to use for the output. Default is :data:`wrf.default_fill(np.float64)`. meta (:obj:`bool`, optional): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: An array with the same dimensionality as *z_out* containing the interpolated values. If xarray is enabled and the *meta* parameter is True, then the result will be a :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. Examples: Example 1: Calculate the 850 hPa and 500 hPa values at location \ x,y = (100,200) .. code-block:: python import numpy as np from wrf import getvar, interp1d from netCDF4 import Dataset wrfnc = Dataset("wrfout_d02_2010-06-13_21:00:00") # Get a 1D vertical column for pressure at location x,y = 100,200 p_1d = wrf.getvar(wrfnc, "pres", units="hPa")[:,200,100] # Get a 1D vertical column for height at location 100,200 ht_1d = wrf.getvar(wrfnc, "z", units="dm")[:,200,100] # Want the heights (in decameters) at 850, 500 hPa levels = np.asarray([850., 500.]) # Get the 850 hPa and 500 hPa values at location 100,200. interp_vals = interp1d(p_1d, ht_1d, levels) """ return _interp1d(field, z_in, z_out, missing) @set_interp_metadata("2dxy") def interp2dxy(field3d, xy, meta=True): """Return a cross section for a three-dimensional field. The returned array will hold the vertical cross section data along the line described by *xy*. This method differs from :meth:`wrf.vertcross` in that it will return all vertical levels found in *field3d*. :meth:`wrf.vertcross` includes an additional interpolation to set the output to a fixed number of vertical levels. Also, a :class:`numpy.ma.MaskedArray` is not created and this routine should be considered as low-level access to the underlying Fortran routine. See Also: :meth:`wrf.xy`, :meth:`wrf.vertcross` Args: field3d (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The array to interpolate with at least three dimensions, whose rightmost dimensions are nz x ny x nx. xy (:class:`xarray.DataArray` or :class:`numpy.ndarray`): An array of one less dimension than *field3d*, whose rightmost dimensions are nxy x 2. This array holds the x,y pairs of a line across the model domain. The requested vertical cross section will be extracted from *field3d* along this line. meta (:obj:`bool`, optional): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: An array containing the vertical cross section along the line *xy*. The returned dimensions will be the same as *xy*, but with the rightmost dimensions being nz x nxy. If xarray is enabled and the *meta* parameter is True, then the result will be a :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. Examples: Example 1: Calculate the vertical cross section for RH for a diagonal line from the lower left to the upper right of the domain. .. code-block:: python from wrf import getvar, xy, interp2dxy from netCDF4 import Dataset wrfnc = Dataset("wrfout_d02_2010-06-13_21:00:00") rh = getvar(wrfnc, "rh") start = (0, 0) end = (-1, -1) xy_line = xy(rh, start_point=start, end_point=end) vert_cross = interp2dxy(rh, xy_line) """ return _interp2dxy(field3d, xy) @set_interp_metadata("horiz") def interpz3d(field3d, vert, desiredlev, missing=default_fill(np.float64), meta=True): """Return the field interpolated to a specified pressure or height level. This function is roughly equivalent to :meth:`interplevel`, but does not handle multi-product diagnostics (uvmet, cape_3d, etc) that contain an additional leftmost dimension for the product type. Also, a :class:`numpy.ma.MaskedArray` is not created and this routine should be considered as low-level access to the underlying Fortran routine. See Also: :meth:`wrf.interplevel` Args: field3d (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A three-dimensional field to interpolate, with the rightmost dimensions of nz x ny x nx. vert (:class:`xarray.DataArray` or :class:`numpy.ndarray`): A three-dimensional array for the vertical coordinate, typically pressure or height. This array must have the same dimensionality as *field3d*. desiredlev (:obj:`float`): The desired vertical level. Must be in the same units as the *vert* parameter. missing (:obj:`float`): The fill value to use for the output. Default is :data:`wrf.default_fill(numpy.float64)`. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The interpolated variable. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. Example: Example 1: Interpolate Geopotential Height to 500 hPa .. code-block:: python from netCDF4 import Dataset from wrf import getvar, interpz3d wrfin = Dataset("wrfout_d02_2010-06-13_21:00:00") p = getvar(wrfin, "pressure") ht = getvar(wrfin, "z", units="dm") ht_500 = interpz3d(ht, p, 500.0) """ return _interpz3d(field3d, vert, desiredlev, missing) @set_alg_metadata(2, "pres", refvarndims=3, description="sea level pressure") @convert_units("pressure", "hpa") def slp(height, tkel, pres, qv, meta=True, units="hPa"): """Return the sea level pressure. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: height (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Geopotential height in [m] with the rightmost dimensions being bottom_top x south_north x west_east. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with same dimensionality as *height*. pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa] with the same dimensionality as *height*. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *height*. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. units (:obj:`str`): The desired units. Refer to the :meth:`getvar` product table for a list of available units for 'slp'. Default is 'hPa'. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The sea level pressure. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.temp`, :meth:`wrf.tk` """ return _slp(height, tkel, pres, qv) @set_alg_metadata(3, "pres", description="temperature") @convert_units("temp", "k") def tk(pres, theta, meta=True, units="K"): """Return the temperature. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa] with at least three dimensions. The rightmost dimensions are bottom_top x south_north x west_east. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. theta (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Potential temperature (perturbation plus reference temperature) in [K] with the same dimensionality as *pres*. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. units (:obj:`str`): The desired units. Refer to the :meth:`getvar` product table for a list of available units for 'temp'. Default is 'K'. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The temperature in the specified units. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.tk` """ return _tk(pres, theta) @set_alg_metadata(3, "pres", description="dew point temperature") @convert_units("temp", "c") def td(pres, qv, meta=True, units="degC"): """Return the dewpoint temperature. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [hPa] with at least three dimensions. The rightmost dimensions are bottom_top x south_north x west_east. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres*. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. units (:obj:`str`): The desired units. Refer to the :meth:`getvar` product table for a list of available units for 'dp'. Default is 'degC'. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The dewpoint temperature. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.rh` """ return _td(pres, qv) @set_alg_metadata(3, "pres", description="relative humidity", units=None) def rh(qv, pres, tkel, meta=True): """Return the relative humidity. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with at least three dimensions. The rightmost dimensions are bottom_top x south_north x west_east. pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa] with the same dimensionality as *qv*. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with same dimensionality as *qv*. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The relative humidity. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.td` """ return _rh(qv, pres, tkel) @set_uvmet_alg_metadata(latarg="lat", windarg="u") @convert_units("wind", "m s-1") def uvmet(u, v, lat, lon, cen_long, cone, meta=True, units="m s-1"): """Return the u,v components of the wind rotated to earth coordinates. The leftmost dimension of the returned array represents two different quantities: - return_val[0,...] will contain U - return_val[1,...] will contain V This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: u (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The u component of the wind [m s-1]. This variable can be staggered or unstaggered, but must be at least two dimensions. If staggered, the rightmost dimensions are south_north x west east. If staggered, the rightmost dimensions are south_north x west_east_stag. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. v (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The v component of the wind [m s-1]. This variable can be staggered or unstaggered, but must be at least two dimensions. If staggered, the rightmost dimensions are south_north x west east. If staggered, the rightmost dimensions are south_north_stag x west_east. lat (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The latitude array. This array can either be: - two-dimensional of size south_north x west_east. - multi-dimensional with the same number of dimensions as *u* and *v*, but with rightmost dimensions south_north x west_east and the same leftmost dimensions as *u* and *v* - multi-dimensional with one fewer dimensions as *u* and *v*, with rightmost dimensions south_north x west_east and the same leftmost dimensions as *u* and *v*, minus the third-from-the-right dimension of *u* and *v*. Note: This variable must also be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. lon (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The longitude array. This array can either be: - two-dimensional of size south_north x west_east. - multi-dimensional with the same number of dimensions as *u* and *v*, but with rightmost dimensions south_north x west_east and the same leftmost dimensions as *u* and *v* - multi-dimensional with one fewer dimensions as *u* and *v*, with rightmost dimensions south_north x west_east and the same leftmost dimensions as *u* and *v*, minus the third-from-the-right dimension of *u* and *v*. cen_long (:obj:`float`): The standard longitude for the map projection. cone (:obj:`float`): The cone factor used for the map project. If the projection is not a conic projection, the *cone* is simply 1.0. For conic projections, the cone factor is given by: .. code-block:: python if((fabs(true_lat1 - true_lat2) > 0.1) and (fabs(true_lat2 - 90.) > 0.1)): cone = (log(cos(true_lat1*radians_per_degree)) - log(cos(true_lat2*radians_per_degree))) cone = (cone / (log(tan((45.-fabs(true_lat1/2.))*radians_per_degree)) - log(tan((45.-fabs(true_lat2/2.))*radians_per_degree)))) else: cone = sin(fabs(true_lat1)*radians_per_degree) meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. units (:obj:`str`): The desired units. Refer to the :meth:`getvar` product table for a list of available units for 'uvmet'. Default is 'm s-1'. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The u,v components of the wind rotated to earth coordinates. The leftmost dimension size is 2, for u and v. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar` """ return _uvmet(u, v, lat, lon, cen_long, cone) @set_smooth_metdata() def smooth2d(field, passes, cenweight=2.0, meta=True): """Return the field smoothed. The smoothing kernel applied is: .. math:: \\frac{1}{4 + cenweight} * \\begin{bmatrix} 0 & 1 & 0 \\\\ 1 & cenweight & 1 \\\\ 0 & 1 & 0 \\end{bmatrix} Data values along the borders are left unchanged. This routine does not modify the original data supplied by the *field* parameter.. If you need more general purpose multidimensional filtering tools, try the :meth:`scipy.ndimage.convolve` method. Args: field (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The field to smooth, which must be at least two dimensions. Missing/fill values will be ignored as long as the type is either a :class:`numpy.ma.MaskedArray or a :class:`xarray.DataArray` with a *_FillValue* attribute. passes (:obj:`int`): The number of smoothing passes. cenweight (:obj:`float`, optional): The weight to apply to the center of the smoothing kernel. Default is 2.0. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Returns: :class:`xarray.DataArray`, :class:`numpy.ma.MaskedArray` or \ :class:`numpy.ndarray`): The smoothed field. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be either a :class:`numpy.ndarray` or a :class:`numpy.ma.MaskedArray` depending on the type for *field*. See Also: :meth:`scipy.ndimage.convolve` """ return _smooth2d(field, passes, cenweight) @set_cape_alg_metadata(is2d=True, copyarg="pres_hpa") def cape_2d(pres_hpa, tkel, qv, height, terrain, psfc_hpa, ter_follow, missing=default_fill(np.float64), meta=True): """Return the two-dimensional MCAPE, MCIN, LCL, and LFC. This function calculates the maximum convective available potential energy (MCAPE), maximum convective inhibition (MCIN), lifted condensation level (LCL), and level of free convection (LFC). This function uses the RIP [Read/Interpolate/plot] code to calculate potential energy (CAPE) and convective inhibition (CIN) [J kg-1] only for the parcel with max theta-e in the column (i.e. something akin to Colman's MCAPE). CAPE is defined as the accumulated buoyant energy from the level of free convection (LFC) to the equilibrium level (EL). CIN is defined as the accumulated negative buoyant energy from the parcel starting point to the LFC. The cape_2d algorithm works by first finding the maximum theta-e height level in the lowest 3000 m. A parcel with a depth of 500 m is then calculated and centered over this maximum theta-e height level. The parcel's moisture and temperature characteristics are calculated by averaging over the depth of this 500 m parcel. This 'maximum' parcel is then used to compute MCAPE, MCIN, LCL and LFC. The leftmost dimension of the returned array represents four different quantities: - return_val[0,...] will contain MCAPE [J kg-1] - return_val[1,...] will contain MCIN [J kg-1] - return_val[2,...] will contain LCL [m] - return_val[3,...] will contain LFC [m] This function also supports computing MCAPE along a single vertical column. In this mode, the *pres_hpa*, *tkel*, *qv* and *height* arguments must be one-dimensional vertical columns, and the *terrain* and *psfc_hpa* arguments must be scalar values (:obj:`float`, :class:`numpy.float32` or :class:`numpy.float64`). This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: pres_hpa (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [hPa] with at least three dimensions. The rightmost dimensions can be top_bottom x south_north x west_east or bottom_top x south_north x west_east. When operating on only a single column of values, the vertical column can be bottom_top or top_bottom. In this case, *terrain* and *psfc_hpa* must be scalars. Note: The units for *pres_hpa* are [hPa]. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with same dimensionality as *pres_hpa*. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres_hpa*. height (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Geopotential height in [m] with the same dimensionality as *pres_hpa*. terrain (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Terrain height in [m]. This is at least a two-dimensional array with the same dimensionality as *pres_hpa*, excluding the vertical (bottom_top/top_bottom) dimension. When operating on a single vertical column, this argument must be a scalar (:obj:`float`, :class:`numpy.float32`, or :class:`numpy.float64`). psfc_hpa (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The surface pressure in [hPa]. This is at least a two-dimensional array with the same dimensionality as *pres_hpa*, excluding the vertical (bottom_top/top_bottom) dimension. When operating on a singlevertical column, this argument must be a scalar (:obj:`float`, :class:`numpy.float32`, or :class:`numpy.float64`). Note: The units for *psfc_hpa* are [hPa]. ter_follow (:obj:`bool`): A boolean that should be set to True if the data uses terrain following coordinates (WRF data). Set to False for pressure level data. missing (:obj:`float`, optional): The fill value to use for the output. Default is :data:`wrf.default_fill(numpy.float64)`. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The cape, cin, lcl, and lfc values as an array whose leftmost dimension is 4 (0=CAPE, 1=CIN, 2=LCL, 3=LFC) . If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.cape_3d` """ if isinstance(ter_follow, bool): ter_follow = 1 if ter_follow else 0 i3dflag = 0 cape_cin = _cape(pres_hpa, tkel, qv, height, terrain, psfc_hpa, missing, i3dflag, ter_follow) left_dims = cape_cin.shape[1:-3] right_dims = cape_cin.shape[-2:] resdim = (4,) + left_dims + right_dims # Make a new output array for the result result = np.zeros(resdim, cape_cin.dtype) # Cape 2D output is not flipped in the vertical, so index from the # end result[0,...,:,:] = cape_cin[0,...,-1,:,:] result[1,...,:,:] = cape_cin[1,...,-1,:,:] result[2,...,:,:] = cape_cin[1,...,-2,:,:] result[3,...,:,:] = cape_cin[1,...,-3,:,:] return ma.masked_values(result, missing) @set_cape_alg_metadata(is2d=False, copyarg="pres_hpa") def cape_3d(pres_hpa, tkel, qv, height, terrain, psfc_hpa, ter_follow, missing=default_fill(np.float64), meta=True): """Return the three-dimensional CAPE and CIN. This function calculates the maximum convective available potential energy (CAPE) and maximum convective inhibition (CIN). This function uses the RIP [Read/Interpolate/plot] code to calculate potential energy (CAPE) and convective inhibition (CIN) [J kg-1] for every grid point in the entire 3D domain (treating each grid point as a parcel). The leftmost dimension of the returned array represents two different quantities: - return_val[0,...] will contain CAPE [J kg-1] - return_val[1,...] will contain CIN [J kg-1] This function also supports computing CAPE along a single vertical column. In this mode, the *pres_hpa*, *tkel*, *qv* and *height* arguments must be one-dimensional vertical columns, and the *terrain* and *psfc_hpa* arguments must be scalar values (:obj:`float`, :class:`numpy.float32` or :class:`numpy.float64`). This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: pres_hpa (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [hPa] with at least three dimensions when operating on a grid of values. The rightmost dimensions can be top_bottom x south_north x west_east or bottom_top x south_north x west_east. When operating on only a single column of values, the vertical column can be bottom_top or top_bottom. In this case, *terrain* and *psfc_hpa* must be scalars. Note: The units for *pres_hpa* are [hPa]. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with same dimensionality as *pres_hpa*. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres_hpa*. height (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Geopotential height in [m] with the same dimensionality as *pres_hpa*. terrain (:class:`xarray.DataArray`, :class:`numpy.ndarray`, \ or a scalar): Terrain height in [m]. When operating on a grid of values, this argument is at least a two-dimensional array with the same dimensionality as *pres_hpa*, excluding the vertical (bottom_top/top_bottom) dimension. When operating on a single vertical column, this argument must be a scalar (:obj:`float`, :class:`numpy.float32`, or :class:`numpy.float64`). psfc_hpa (:class:`xarray.DataArray`, :class:`numpy.ndarray`, \ or a scalar): Surface pressure in [hPa]. When operating on a grid of values, this argument is at least a two-dimensional array with the same dimensionality as *pres_hpa*, excluding the vertical (bottom_top/top_bottom) dimension. When operating on a single vertical column, this argument must be a scalar (:obj:`float`, :class:`numpy.float32`, or :class:`numpy.float64`). Note: The units for *psfc_hpa* are [hPa]. ter_follow (:obj:`bool`): A boolean that should be set to True if the data uses terrain following coordinates (WRF data). Set to False for pressure level data. missing (:obj:`float`, optional): The fill value to use for the output. Default is :data:`wrf.default_fill(numpy.float64)`. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The CAPE and CIN as an array whose leftmost dimension is 2 (0=CAPE, 1=CIN). If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.cape_2d` """ if isinstance(ter_follow, bool): ter_follow = 1 if ter_follow else 0 i3dflag = 1 cape_cin = _cape(pres_hpa, tkel, qv, height, terrain, psfc_hpa, missing, i3dflag, ter_follow) return ma.masked_values(cape_cin, missing) @set_cloudfrac_alg_metadata(copyarg="vert") def cloudfrac(vert, relh, vert_inc_w_height, low_thresh, mid_thresh, high_thresh, missing=default_fill(np.float64), meta=True): """Return the cloud fraction. The leftmost dimension of the returned array represents three different quantities: - return_val[0,...] will contain LOW level cloud fraction - return_val[1,...] will contain MID level cloud fraction - return_val[2,...] will contain HIGH level cloud fraction The *low_thresh*, *mid_thresh*, and *high_threshold* paramters specify the low, mid, and high cloud levels in the same units as *vert*. In mountainous regions, there is a possibility that the lowest WRF level will be higher than the low_cloud or mid_cloud threshold. When this happens, a fill value will be used in the output at that location. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: vert (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The vertical coordinate variable (usually pressure or height) with the rightmost dimensions as bottom_top x south_north x west_east Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. relh (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Relative humidity with the same dimensionality as *vert* vert_inc_w_height (:obj:`int`): Set to 1 if the vertical coordinate values increase with height (height values). Set to 0 if the vertical coordinate values decrease with height (pressure values). low_thresh (:obj:`float`): The bottom vertical threshold for what is considered a low cloud. mid_thresh (:obj:`float`): The bottom vertical threshold for what is considered a mid level cloud. high_thresh (:obj:`float`): The bottom vertical threshold for what is considered a high cloud. missing (:obj:`float:`, optional): The fill value to use for areas where the surface is higher than the cloud threshold level (e.g. mountains). Default is :data:`wrf.default_fill(numpy.float64)`. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The cloud fraction array whose leftmost dimension is 3 (LOW=0, MID=1, HIGH=2). If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.rh` """ cfrac = _cloudfrac(vert, relh, vert_inc_w_height, low_thresh, mid_thresh, high_thresh, missing) return ma.masked_values(cfrac, missing) @set_alg_metadata(2, "pres_hpa", refvarndims=3, description="cloud top temperature") @convert_units("temp", "c") def ctt(pres_hpa, tkel, qv, qcld, height, terrain, qice=None, fill_nocloud=False, missing=default_fill(np.float64), opt_thresh=1.0, meta=True, units="degC"): """Return the cloud top temperature. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: pres_hpa (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [hPa], with the rightmost dimensions as bottom_top x south_north x west_east Note: The units for *psfc_hpa* are [hPa]. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with same dimensionality as *pres_hpa*. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres_hpa*. qcld (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Cloud water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres_hpa*. height (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Geopotential height in [m] with the same dimensionality as *pres_hpa*. terrain (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Terrain height in [m]. This is at least a two-dimensional array with the same dimensionality as *pres_hpa*, excluding the vertical (bottom_top/top_bottom) dimension. qice (:class:`xarray.DataArray` or :class:`numpy.ndarray`, optional): Ice mixing ratio in [kg/kg] with the same dimensionality as *pres_hpa*. fill_nocloud (:obj:`bool`, optional): Set to True to use fill values in regions where clouds are not detected (optical depth less than 1). Otherwise, the output will contain the surface temperature for areas without clouds. Default is False. missing (:obj:`float`, optional): The fill value to use for areas where no clouds are detected. Only used if *fill_nocloud* is True. Default is :data:`wrf.default_fill(numpy.float64)`. opt_thresh (:obj:`float`, optional): The amount of optical depth (integrated from top down) required to trigger a cloud top temperature calculation. The cloud top temperature is calculated at the vertical level where this threshold is met. Vertical columns with less than this threshold will be treated as cloud free areas. In general, the larger the value is for this threshold, the lower the altitude will be for the cloud top temperature calculation, and therefore higher cloud top temperature values. Default is 1.0, which should be sufficient for most users. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. units (:obj:`str`): The desired units. Refer to the :meth:`getvar` product table for a list of available units for 'ctt'. Default is 'degC'. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The cloud top temperature. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.cloudfrac` """ # Qice and QCLD need to be in g/kg if qice is None: qice = np.zeros(qv.shape, qv.dtype) haveqci = 0 else: haveqci = 1 if qice.any() else 0 _fill_nocloud = 1 if fill_nocloud else 0 ctt = _ctt(pres_hpa, tkel, qice, qcld, qv, height, terrain, haveqci, _fill_nocloud, missing, opt_thresh) return ma.masked_values(ctt, missing) @set_alg_metadata(3, "pres", units="dBZ", description="radar reflectivity") def dbz(pres, tkel, qv, qr, qs=None, qg=None, use_varint=False, use_liqskin=False, meta=True): """Return the simulated radar reflectivity. This function computes equivalent reflectivity factor [dBZ] at each model grid point assuming spherical particles of constant density, with exponential size distributions. This function is based on "dbzcalc.f" in RIP. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa], with the rightmost dimensions as bottom_top x south_north x west_east Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with same dimensionality as *pres*. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres*. qr (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Rain water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres*. qs (:class:`xarray.DataArray` or :class:`numpy.ndarray`, optional): Snow mixing ratio in [kg/kg] with the same dimensionality as *pres*. qg (:class:`xarray.DataArray` or :class:`numpy.ndarray`, optional): Graupel mixing ratio in [kg/kg] with the same dimensionality as *pres*. use_varint (:obj:`bool`, optional): When set to False, the intercept parameters are assumed constant (as in MM5's Reisner-2 bulk microphysical scheme). When set to True, the variable intercept parameters are used as in the more recent version of Reisner-2 (based on Thompson, Rasmussen, and Manning, 2004, Monthly weather Review, Vol. 132, No. 2, pp. 519-542.). use_liqskin (:obj:`bool`, optional): When set to True, frozen particles that are at a temperature above freezing are assumed to scatter as a liquid particle. Set to False to disable. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The simulated radar reflectivity. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar` """ if qs is None: qs = np.zeros(qv.shape, qv.dtype) if qg is None: qg = np.zeros(qv.shape, qv.dtype) sn0 = 1 if qs.any() else 0 ivarint = 1 if use_varint else 0 iliqskin = 1 if use_liqskin else 0 return _dbz(pres, tkel, qv, qr, qs, qg, sn0, ivarint, iliqskin) @set_alg_metadata(2, "terrain", units="m2 s-2", description="storm relative helicity") def srhel(u, v, height, terrain, top=3000.0, lats=None, meta=True): """Return the storm relative helicity. This function calculates storm relative helicity from WRF ARW output. SRH (Storm Relative Helicity) is a measure of the potential for cyclonic updraft rotation in right-moving supercells, and is calculated for the lowest 1-km and 3-km layers above ground level. There is no clear threshold value for SRH when forecasting supercells, since the formation of supercells appears to be related more strongly to the deeper layer vertical shear. Larger values of 0-3 km SRH (greater than 250 m2 s-2) and 0-1 km SRH (greater than 100 m2 s-2), however, do suggest an increased threat of tornadoes with supercells. For SRH, larger values are generally better, but there are no clear "boundaries" between non-tornadic and significant tornadic supercells. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: u (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The u component of the wind that must have at least three dimensions. The rightmost dimensions are bottom_top x south_north x west_east. v (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The v component of the wind with the same dimensionality as *u*. height (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Geopotential height in [m] with the same dimensionality as *u*. terrain (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Terrain height in [m]. This is at least a two-dimensional array with the same dimensionality as *u*, excluding the bottom_top dimension. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. top (:obj:`float`): The height of the layer below which helicity is calculated (meters above ground level). lats (:class:`xarray.DataArray` or :class:`numpy.ndarray`, optional): Array of latitudes. This is required if any (or all) of your domain is in the southern hemisphere. If not provided, the northern hemisphere is assumed. Default is None. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The storm relative helicity. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.udhel` """ # u, v get swapped in vertical _u = np.ascontiguousarray(u[...,::-1,:,:]) _v = np.ascontiguousarray(v[...,::-1,:,:]) _height = np.ascontiguousarray(height[...,::-1,:,:]) if lats is None: _lats = np.ones_like(terrain) else: _lats = lats return _srhel(_u, _v, _height, terrain, _lats, top) @set_alg_metadata(2, "u", refvarndims=3, units="m2 s-2", description="updraft helicity") def udhel(zstag, mapfct, u, v, wstag, dx, dy, bottom=2000.0, top=5000.0, meta=True): """Return the updraft helicity. This function calculates updraft helicity to detect rotating updrafts. The formula follows Kain et al., 2008, Wea. and Forecasting, 931-952, but this version has controls for the limits of integration, *bottom* to *top*, in m AGL. Kain et al used 2000 to 5000 m. The expected range is 25 to 250 m-2/s-2. Keith Brewster, CAPS/Univ. of Oklahoma ; March, 2010 This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: zstag (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Geopotential height in [m] that is at least three dimensions with a staggered vertical dimension. The rightmost dimensions are bottom_top_stag x south_north x west_east. mapfct (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The map scale factor on the mass grid. An array of at least two dimensions, whose rightmost two dimensions must be south_north x west_east. If this array is more than two dimensions, they must be the same as *zstag*'s leftmost dimensions. u (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The u component of the wind [m s-1] whose rightmost three dimensions must be bottom_top x south_north x west_east. The leftmost dimensions must be the same as zp's leftmost dimensions. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. v (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The v component of the wind [m s-1] whose rightmost three dimensions must be bottom_top x south_north x west_east. The leftmost dimensions must be the same as *zstag*'s leftmost dimensions. wstag (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The z component of the wind [m s-1] with the same dimensionality as *zstag*. dx (:obj:`float`): The distance between x grid points. dy (:obj:`float`): The distance between y grid points. bottom (:obj:`float`, optional): The bottom limit of integration. Default is 2000.0. top (:obj:`float`, optional): The upper limit of integration. Default is 5000.0. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The updraft helicity. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.srhel` """ return _udhel(zstag, mapfct, u, v, wstag, dx, dy, bottom, top) # Requires both u an v for dimnames @set_alg_metadata(3, "ustag", units="10-5 s-1", stagdim=-1, stagsubvar="vstag", description="absolute vorticity") def avo(ustag, vstag, msfu, msfv, msfm, cor, dx, dy, meta=True): """Return the absolute vorticity. This function returns absolute vorticity [10-5 s-1], which is the sum of the relative vorticity at each grid point and the Coriolis parameter at the latitude. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: ustag (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The u component of the wind in [m s-1] that is at least three dimensions with a staggered west_east dimension. The rightmost dimensions are bottom_top x south_north x west_east_stag. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. vstag (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The v component of the wind in [m s-1] that is at least three dimensions with a staggered south_north dimension. The rightmost dimensions are bottom_top x south_north_stag x west_east. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. msfu (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The map scale factor on the u-grid that is at least two dimensions, whose rightmost two dimensions must be the same as *ustag*. If this array contains more than two dimensions, they must be the same as *ustag* and *vstag*'s leftmost dimensions. msfv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The map scale factor on the v-grid that is at least two dimensions, whose rightmost two dimensions must be the same as *vstag*. If this array contains more than two dimensions, they must be the same as *ustag* and *vstag*'s leftmost dimensions. msfm (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The map scale factor on the mass grid that is at least two dimensions, whose rightmost two dimensions must be south_north x west_east. If this array contains more than two dimensions, they must be the same as *ustag* and *vstag*'s leftmost dimensions. cor (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The Coriolis sine latitude array that is at least two dimensions, whose dimensions must be the same as *msfm*. dx (:obj:`float`): The distance between x grid points. dy (:obj:`float`): The distance between y grid points. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The absolute vorticity. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.pvo` """ return _avo(ustag, vstag, msfu, msfv, msfm, cor, dx, dy) @set_alg_metadata(3, "theta", units="PVU", description="potential vorticity") def pvo(ustag, vstag, theta, pres, msfu, msfv, msfm, cor, dx, dy, meta=True): """Return the potential vorticity. This function calculates the potential vorticity [PVU] at each grid point. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: ustag (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The u component of the wind in [m s-1] that is at least three dimensions with a staggered west_east dimension. The rightmost dimensions are bottom_top x south_north x west_east_stag. vstag (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The v component of the wind in [m s-1] that is at least three dimensions with a staggered south_north dimension. The rightmost dimensions are bottom_top x south_north_stag x west_east. theta (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The potential temperature field [K] whose rightmost dimensions are bottom_top x south_north x west_east and whose leftmost dimensions are the same as *ustag*. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa], with the same dimensions as *theta*. msfu (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The map scale factor on the u-grid that is at least two dimensions, whose rightmost two dimensions must be the same as *ustag*. If this array contains more than two dimensions, they must be the same as *ustag* and *vstag*'s leftmost dimensions. msfv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The map scale factor on the v-grid that is at least two dimensions, whose rightmost two dimensions must be the same as *vstag*. If this array contains more than two dimensions, they must be the same as *ustag* and *vstag*'s leftmost dimensions. msfm (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The map scale factor on the mass grid that is at least two dimensions, whose rightmost two dimensions must be south_north x west_east. If this array contains more than two dimensions, they must be the same as *ustag* and *vstag*'s leftmost dimensions. cor (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The Coriolis sine latitude array that is at least two dimensions, whose dimensions must be the same as *msfm*. dx (:obj:`float`): The distance between x grid points. dy (:obj:`float`): The distance between y grid points. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The potential vorticity. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.avo` """ return _pvo(ustag, vstag, theta, pres, msfu, msfv, msfm, cor, dx, dy) @set_alg_metadata(3, "qv", description="equivalent potential temperature") @convert_units("temp", "k") def eth(qv, tkel, pres, meta=True, units="K"): """Return the equivalent potential temperature. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] that is at least three dimensions, with the rightmost dimensions of bottom_top x south_north x west_east. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with same dimensionality as *qv*. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa] with the same dimensionality as *qv*. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. units (:obj:`str`): The desired units. Refer to the :meth:`getvar` product table for a list of available units for 'eth'. Default is 'K'. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The equivalent potential temperature. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.temp`, :meth:`wrf.wetbulb`, :meth:`tvirtual` """ return _eth(qv, tkel, pres) @set_alg_metadata(3, "pres", description="wetbulb temperature") @convert_units("temp", "k") def wetbulb(pres, tkel, qv, meta=True, units="K"): """Return the wetbulb temperature. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa], with the rightmost dimensions as bottom_top x south_north x west_east Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with same dimensionality as *pres*. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres* meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. units (:obj:`str`): The desired units. Refer to the :meth:`getvar` product table for a list of available units for 'twb'. Default is 'K'. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The wetbulb temperature. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.temp`, :meth:`wrf.eth`, :meth:`tvirtual` """ return _wetbulb(pres, tkel, qv) @set_alg_metadata(3, "tkel", description="virtual temperature") @convert_units("temp", "k") def tvirtual(tkel, qv, meta=True, units="K"): """Return the virtual temperature. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with the rightmost dimensions as bottom_top x south_north x west_east. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *tkel* meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. units (:obj:`str`): The desired units. Refer to the :meth:`getvar` product table for a list of available units for 'tv'. Default is 'K'. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The virtual temperature. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`wrf.temp`, :meth:`wrf.eth`, :meth:`wetbulb` """ return _tv(tkel, qv) @set_alg_metadata(3, "qv", units="Pa s-1", description="omega") def omega(qv, tkel, w, pres, meta=True): """Return omega. This function calculates omega (dp/dt) [Pa s-1]. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the rightmost dimensions as bottom_top x south_north x west_east. Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with the same dimensionality as *qv*. w (:class:`xarray.DataArray` or :class:`numpy.ndarray`): The vertical velocity [m s-1] with the same dimensionality as *qv*. pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa] with the same dimensionality as *qv*. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: Omega. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar`, :meth:`uvmet` """ return _omega(qv, tkel, w, pres) @set_alg_metadata(2, "pres", refvarndims=3, units="kg m-2", description="precipitable water") def pw(pres, tkel, qv, height, meta=True): """Return the precipitable water. This is the raw computational algorithm and does not extract any variables from WRF output files. Use :meth:`wrf.getvar` to both extract and compute diagnostic variables. Args: pres (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Full pressure (perturbation + base state pressure) in [Pa], with the rightmost dimensions as bottom_top x south_north x west_east Note: This variable must be supplied as a :class:`xarray.DataArray` in order to copy the dimension names to the output. Otherwise, default names will be used. tkel (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Temperature in [K] with the same dimensionality as *pres*. qv (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Water vapor mixing ratio in [kg/kg] with the same dimensionality as *pres* height (:class:`xarray.DataArray` or :class:`numpy.ndarray`): Geopotential height in [m] with the same dimensionality as *pres*. meta (:obj:`bool`): Set to False to disable metadata and return :class:`numpy.ndarray` instead of :class:`xarray.DataArray`. Default is True. Warning: The input arrays must not contain any missing/fill values or :data:`numpy.nan` values. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The precipitable water [kg m-2]. If xarray is enabled and the *meta* parameter is True, then the result will be an :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. See Also: :meth:`wrf.getvar` """ tv = _tv(tkel, qv) return _pw(pres, tv, qv, height)
40.937277
81
0.593712
10,129
80,278
4.657222
0.072169
0.043605
0.079283
0.045111
0.762703
0.732241
0.713395
0.702351
0.682954
0.662116
0
0.009926
0.323576
80,278
1,960
82
40.958163
0.85879
0.818319
0
0.138365
0
0
0.06328
0
0
0
0
0
0
1
0.150943
false
0.012579
0.050314
0
0.352201
0.006289
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8ac1a83bba20eab6db2fb8312527e13122b65631
529
py
Python
identity_operators.py
HuuHoangNguyen/Python_learning
c33940ca95866cefa6381cdef901062be755052d
[ "MIT" ]
null
null
null
identity_operators.py
HuuHoangNguyen/Python_learning
c33940ca95866cefa6381cdef901062be755052d
[ "MIT" ]
null
null
null
identity_operators.py
HuuHoangNguyen/Python_learning
c33940ca95866cefa6381cdef901062be755052d
[ "MIT" ]
null
null
null
#!/usr/bin/python a = 20 b = 20 if a is b: print "Line 1: a and b have same identity" else: print "Line 1: a and b do not have same identity" if id(a) == id(b): print "Line 2: a and b have same identity" else: print "Line 2: a and b do not have same identity" b = 30 if a is b: print "Line 3: a and b have same identity" else: print "Line 3: a and b do not have same identity" if a is not b: print "Line 4: a and b do not have same identity" else: print "Line 4: a and b have same identity"
19.592593
53
0.63138
111
529
3.009009
0.207207
0.215569
0.11976
0.107784
0.898204
0.898204
0.628743
0.628743
0.473054
0
0
0.036842
0.281664
529
26
54
20.346154
0.842105
0.030246
0
0.315789
0
0
0.585938
0
0
0
0
0
0
0
null
null
0
0
null
null
0.421053
0
0
0
null
1
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
0a02cbc408bfdcceac23eb635a5dd6e91589d5af
14,008
py
Python
functions.py
gcontarini/yahoo-finance-scrapper
746f7e910b6357cbbbbb4bde8260dc65085999e0
[ "MIT" ]
null
null
null
functions.py
gcontarini/yahoo-finance-scrapper
746f7e910b6357cbbbbb4bde8260dc65085999e0
[ "MIT" ]
null
null
null
functions.py
gcontarini/yahoo-finance-scrapper
746f7e910b6357cbbbbb4bde8260dc65085999e0
[ "MIT" ]
1
2022-03-19T18:53:48.000Z
2022-03-19T18:53:48.000Z
from bs4 import BeautifulSoup import numpy as np import pandas as pd import requests from selenium import webdriver from time import sleep def balance(ticker, wd_path): ''' Uses selenium webdriver to open the Yahoo Finance balance sheet page and expand all possibles rows. Then, download the newest information available. Args: ------- ticker (str): must use the same ticker as Yahoo Finance wd_path (str): absolute path to webdriver executable Returns: ------- pd.DataFrame: Balance sheets data ''' # Web page url = 'https://finance.yahoo.com/quote/{}/balance-sheet?p={}'.format(ticker, ticker) # Open webdriver browser = webdriver.Chrome(executable_path=wd_path) # Open page browser.get(url) sleep(2) # Expand all possible rows cols = [] # Try clicking in everything possible 5 times for i in range(5): rows = browser.find_elements_by_css_selector('div[data-test="fin-row"]') for r in rows: col_name = r.find_element_by_css_selector('span') # Dont click the same col twice if col_name.text not in cols: cols.append(col_name.text) # Cant click, not a problem just keep clicking try: press = r.find_element_by_css_selector('svg') press.click() # print('CLICKED ON: ' + col_name.text) sleep(1) except: # print('NOT CLICKED IN: ' + col_name.text) pass # Now we finally take the data we want raw_dict = {} rows = browser.find_elements_by_css_selector('div[data-test="fin-row"]') for r in rows: # Take the data info = r.find_element_by_css_selector('div[data-test="fin-col"] > span') # Column name col_name = r.find_element_by_css_selector('div[title] > span') raw_dict[col_name.text] = [info.text] # Close webdrive browser.quit() # Convert to dict to df and values to numbers bs = pd.DataFrame.from_dict(raw_dict) bs = bs.replace(',', '', regex=True) bs = bs.astype('double') # All values are in thousand bs = bs * 1000 return bs def balance_allyears(ticker, wd_path): ''' Uses selenium webdriver to open the Yahoo Finance balance sheet page and expand all possibles rows. Then, download fundamental information for all years available. Args: ------- ticker (str): must use the same ticker as yahoo finance wd_path (str): absolute path to webdriver executable Returns: ------- pd.DataFrame: Balance sheets data (each row is a year) ''' # Web page url = 'https://finance.yahoo.com/quote/{}/balance-sheet?p={}'.format(ticker, ticker) # Open webdriver browser = webdriver.Chrome(executable_path=wd_path) # Open page browser.get(url) sleep(2) # Expand all possible rows cols = [] # Try clicking in everything possible 5 times for i in range(5): rows = browser.find_elements_by_css_selector('div[data-test="fin-row"]') for r in rows: col_name = r.find_element_by_css_selector('span') # Dont click the same col twice if col_name.text not in cols: cols.append(col_name.text) # Cant click, not a problem just keep clicking try: press = r.find_element_by_css_selector('svg') press.click() # print('CLICKED ON: ' + col_name.text) sleep(1) except: # print('NOT CLICKED IN: ' + col_name.text) pass # Now we finally take the data we want raw_dict = {} rows = browser.find_elements_by_css_selector('div[data-test="fin-row"]') for r in rows: # Take the data info = r.find_elements_by_css_selector('div[data-test="fin-col"]') # Column name col_name = r.find_element_by_css_selector('div[title] > span') info_l = [] for inf in info[:4]: info_l.append(inf.text) raw_dict[col_name.text] = info_l # Close webdrive browser.quit() # Convert to dict to df and values to numbers bs = pd.DataFrame.from_dict(raw_dict) bs = bs.replace(',', '', regex=True) bs = bs.replace('^-$', np.nan, regex=True) bs = bs.astype('double') # All values are in thousand bs = bs * 1000 return bs def financials(ticker): ''' Opens financials page from Yahoo Finance and download the newest available data. Args: ------ ticker (str): must use the same ticker as Yahoo Finance Returns: ------ pd.DataFrame: Financials page data ''' # Web page - Financial page url = 'https://finance.yahoo.com/quote/{}/financials?p={}'.format(ticker, ticker) # Make the request for html page page = requests.get(url) page_content = page.content # Open it as a BeatifulSoup object soup = BeautifulSoup(page_content, 'html.parser') # Now the black magic starts table = soup.find_all('div', attrs={'class': 'D(tbrg)'}) rows = table[0].find_all('div', attrs={'data-test': 'fin-row'}) data_dict = {} for row in rows: inside_row = row.find_all('span') col = inside_row[0].get_text() data_dict[col] = [inside_row[1].get_text()] # Convert to dict to df and values to numbers fp = pd.DataFrame.from_dict(data_dict) fp = fp.replace(',', '', regex=True) fp = fp.astype('double') # All values are in thousand fp = fp * 1000 return fp def financials_allyears(ticker): ''' Opens financials page from Yahoo Finance and download all available data. Args: ------ ticker (str): must use the same ticker as yahoo finance Returns: ------ pd.DataFrame: Financials page data (each row is a year) ''' # Web page - Financial page url = 'https://finance.yahoo.com/quote/{}/financials?p={}'.format(ticker, ticker) # Make the request for html page page = requests.get(url) page_content = page.content # Open it as a BeatifulSoup object soup = BeautifulSoup(page_content, 'html.parser') # Now the black magic starts table = soup.find_all('div', attrs={'class': 'D(tbrg)'}) rows = table[0].find_all('div', attrs={'data-test': 'fin-row'}) data_dict = {} for row in rows: # Select data inside_row = row.select('div[data-test="fin-col"]') # Select column name col = row.select('div[title] > span')[0].get_text() info = [] # Takes all data until the third element # After that the data is redundant for inf in inside_row[:4]: info.append(inf.get_text()) # Dict to hold all data data_dict[col] = info # Convert to dict to df and values to numbers fp = pd.DataFrame.from_dict(data_dict) fp = fp.replace(',', '', regex=True) fp = fp.replace('^-$', np.nan, regex=True) fp = fp.astype('double') # All values are in thousand fp = fp * 1000 return fp def cashflow(ticker): ''' Opens cash flow page from Yahoo Finance and takes the newest available data. Args: ------ ticker (str): must use the same ticker as Yahoo Finance Returns: ------ pd.DataFrame: Cashflow page data ''' # Web page - Cash flow page url = 'https://finance.yahoo.com/quote/{}/cash-flow?p={}'.format(ticker, ticker) # Make the request for html page page = requests.get(url) page_content = page.content # Open it as a BeatifulSoup object soup = BeautifulSoup(page_content, 'html.parser') # Again, same witchcraft table = soup.find_all('div', attrs={'class': 'D(tbrg)'}) rows = table[0].find_all('div', attrs={'data-test': 'fin-row'}) data_dict = {} for row in rows: inside_row = row.find_all('span') col = inside_row[0].get_text() data_dict[col] = [inside_row[1].get_text()] # Convert to dict to df and values to numbers cf = pd.DataFrame.from_dict(data_dict) cf = cf.replace(',', '', regex=True) cf = cf.astype('double') # All values are in thousand cf = cf * 1000 return cf def cashflow_allyears(ticker): ''' Opens cash flow page from Yahoo Finance and download all available data. Args: ------ ticker (str): must use the same ticker as yahoo finance Returns: ------ pd.DataFrame: Cashflow page data (each row is a year) ''' # Web page - Cash flow page url = 'https://finance.yahoo.com/quote/{}/cash-flow?p={}'.format(ticker, ticker) # Make the request for html page page = requests.get(url) page_content = page.content # Open it as a BeatifulSoup object soup = BeautifulSoup(page_content, 'html.parser') # Again, same witchcraft table = soup.find_all('div', attrs={'class': 'D(tbrg)'}) rows = table[0].find_all('div', attrs={'data-test': 'fin-row'}) data_dict = {} for row in rows: # Select data inside_row = row.select('div[data-test="fin-col"]') # Select column name col = row.select('div[title] > span')[0].get_text() info = [] # Takes all data until the third element # After that the data is redundant for inf in inside_row[:4]: info.append(inf.get_text()) # Dict to hold all data data_dict[col] = info # Convert to dict to df and values to numbers cf = pd.DataFrame.from_dict(data_dict) cf = cf.replace(',', '', regex=True) cf = cf.replace('^-$', np.nan, regex=True) cf = cf.astype('double') # All values are in thousand cf = cf * 1000 return cf def keystats(ticker): ''' Opens key statistics from Yahoo Finance and takes the newest available data. Args: ------ ticker (str): must use the same ticker as yahoo finance Returns: ------ pd.DataFrame: Key statistics page data ''' # Web page - Key statistics url = 'https://finance.yahoo.com/quote/{}/key-statistics?p={}'.format(ticker, ticker) # Make the request for html page page = requests.get(url) page_content = page.content # Open it as a BeatifulSoup object soup = BeautifulSoup(page_content, 'html.parser') # It starts with a similar code from before table = soup.find_all('table') rows = table[0].find_all('tr') raw_data = {} for r in rows: i = r.find_all('td') if i: raw_data[i[0].get_text()] = [i[1].get_text()] # It has 2 more bottom tables with different formatation bottom_table = soup.select('section[data-test] > div[class] > div[class]') for t in bottom_table[1:]: rows = t.select('table[class] > tbody > tr[class]') for r in rows: info = r.select('td[class]') raw_data[info[0].get_text().strip()] = [info[1].get_text()] # Convert to dict to df and values to numbers ks = pd.DataFrame.from_dict(raw_data) # Clean df ks = ks.replace('T', 'e+18', regex=True) ks = ks.replace('B', 'e+12', regex=True) ks = ks.replace('M', 'e+06', regex=True) ks = ks.replace('k', 'e+03', regex=True) ks = ks.replace('%', 'e-2', regex=True) ks = ks.replace('N/A', np.nan, regex=True) # Select not numerical columns notnum = ['Fiscal Year Ends', 'Most Recent Quarter (mrq)'] for col in ks.columns: if 'Date' in col or 'Split' in col: notnum.append(col) date_columns = ks[notnum] # Remove not numerical cols ks = ks.drop(columns=notnum) # Transform numerical cols ks = ks.replace(',', '', regex=True) ks = ks.astype('double') # Join df ks = pd.concat([date_columns, ks], axis=1) return ks def download_newest(ticker_list, wd_path, save_csv=False): ''' Uses the functions above to download all newest available data on a given stock list. Can save data as csv or return it as a dataframe Args: ------ ticker (str): must use the same ticker as Yahoo Finance wd_path (str): absolute path to webdriver executable Returns: ------ pd.DataFrame: With all available data on stock ''' # Dict to store data all_data = {} # Start process for ticker in ticker_list: print('#######', ticker, '#######') bs = balance(ticker, wd_path) sleep(1) fp = financials(ticker) sleep(1) cf = cashflow(ticker) sleep(1) ks = keystats(ticker) # Concat data data = pd.concat([bs, fp, cf, ks], axis=1) data.index = [ticker] # Append to dict of dfs all_data[ticker] = data # Wait sleep(1) # Join all stocks on same df concat = [] for ticker in ticker_list: # Drop duplicate columns all_data[ticker] = all_data[ticker].loc[:, ~all_data[ticker].columns.duplicated()] concat.append(all_data[ticker]) final_df = pd.concat(concat, axis=0, join='outer') # Save data as csv # Each stock is a row if save_csv: final_df.to_csv('yf_fundamental_data.csv') return final_df
28.823045
90
0.570603
1,833
14,008
4.265139
0.135843
0.018419
0.019954
0.017396
0.783576
0.75582
0.74661
0.745715
0.739703
0.716935
0
0.007169
0.312893
14,008
486
91
28.823045
0.805091
0.317676
0
0.661765
0
0
0.122691
0.023786
0
0
0
0
0
1
0.039216
false
0.009804
0.029412
0
0.107843
0.004902
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0a0beae25adc579e60e41cf50ee67fd1b867dce3
4,758
py
Python
tests/test_nmap.py
tagazoo/node
d9453df69b3e6441f5b4c22a74dc9b11057773ad
[ "MIT" ]
null
null
null
tests/test_nmap.py
tagazoo/node
d9453df69b3e6441f5b4c22a74dc9b11057773ad
[ "MIT" ]
null
null
null
tests/test_nmap.py
tagazoo/node
d9453df69b3e6441f5b4c22a74dc9b11057773ad
[ "MIT" ]
null
null
null
import unittest import unittest.mock as mock import re from node.nmap import Nmap NMAP_EXAMPLE = """# Nmap 7.01 scan initiated Tue May 28 18:53:50 2019 as: nmap --top-ports 10 -oG output.txt -sV -A -T4 --version-all -d 37.59.37.5 # Ports scanned: TCP(10;21-23,25,80,110,139,443,445,3389) UDP(0;) SCTP(0;) PROTOCOLS(0;) Host: 38.59.37.5 (ns398199.ip-38-59-37.eu) Status: Up Host: 38.59.37.5 (ns398199.ip-38-59-37.eu) Ports: 21/closed/tcp//ftp///, 22/open/tcp//ssh//OpenSSH 7.2p2 Ubuntu 4ubuntu2.8 (Ubuntu Linux; protocol 2.0)/, 23/closed/tcp//telnet///, 25/filtered/tcp//smtp///, 80/closed/tcp//http///, 110/closed/tcp//pop3///, 139/closed/tcp//netbios-ssn///, 443/closed/tcp//https///, 445/filtered/tcp//microsoft-ds///, 3389/closed/tcp//ms-wbt-server/// # Nmap done at Tue May 28 18:53:53 2019 -- 1 IP address (1 host up) scanned in 3.13 secondss """ class TestNmap(unittest.TestCase): def setUp(self): self.nmap = Nmap() def test_is_up(self): result = self.nmap.is_up(NMAP_EXAMPLE, "38.59.37.5") self.assertTrue(result) def test_is_up_fail(self): result = self.nmap.is_up(NMAP_EXAMPLE, "127.0.0.1") self.assertFalse(result) def test_get_ip_dns_ports(self): result = self.nmap.get_ip_dns_ports(NMAP_EXAMPLE) expected = ( "38.59.37.5", "ns398199.ip-38-59-37.eu", "21/closed/tcp//ftp///, 22/open/tcp//ssh//OpenSSH 7.2p2 Ubuntu 4ubuntu2.8 (Ubuntu Linux; protocol 2.0)/, 23/closed/tcp//telnet///, 25/filtered/tcp//smtp///, 80/closed/tcp//http///, 110/closed/tcp//pop3///, 139/closed/tcp//netbios-ssn///, 443/closed/tcp//https///, 445/filtered/tcp//microsoft-ds///, 3389/closed/tcp//ms-wbt-server///" ) self.assertEqual(result, expected) def test_parse_ports(self): ports = "21/closed/tcp//ftp///, 22/open/tcp//ssh//OpenSSH 7.2p2 Ubuntu 4ubuntu2.8 (Ubuntu Linux; protocol 2.0)/, 23/closed/tcp//telnet///, 25/filtered/tcp//smtp///, 80/closed/tcp//http///, 110/closed/tcp//pop3///, 139/closed/tcp//netbios-ssn///, 443/closed/tcp//https///, 445/filtered/tcp//microsoft-ds///, 3389/closed/tcp//ms-wbt-server///" result = self.nmap.parse_ports(ports) expected = [ {'port': '21', 'state': 'closed', 'protocol': 'tcp', 'service': 'ftp'}, {'port': '22', 'state': 'open', 'protocol': 'tcp', 'service': 'ssh', 'version': 'OpenSSH 7.2p2 Ubuntu 4ubuntu2.8 (Ubuntu Linux; protocol 2.0)'}, {'port': '23', 'state': 'closed', 'protocol': 'tcp', 'service': 'telnet'}, {'port': '25', 'state': 'filtered', 'protocol': 'tcp', 'service': 'smtp'}, {'port': '80', 'state': 'closed', 'protocol': 'tcp', 'service': 'http'}, {'port': '110', 'state': 'closed', 'protocol': 'tcp', 'service': 'pop3'}, {'port': '139', 'state': 'closed', 'protocol': 'tcp', 'service': 'netbios-ssn'}, {'port': '443', 'state': 'closed', 'protocol': 'tcp', 'service': 'https'}, {'port': '445', 'state': 'filtered', 'protocol': 'tcp', 'service': 'microsoft-ds'}, {'port': '3389', 'state': 'closed', 'protocol': 'tcp', 'service': 'ms-wbt-server'} ] self.assertEqual(result, expected) def test_parse_nmap(self): result = self.nmap.parse_nmap(NMAP_EXAMPLE, "38.59.37.5") expected = { "status" : "up", "ip": "38.59.37.5", "dns": "ns398199.ip-38-59-37.eu", "scan": [ {'port': '21', 'state': 'closed', 'protocol': 'tcp', 'service': 'ftp'}, {'port': '22', 'state': 'open', 'protocol': 'tcp', 'service': 'ssh', 'version': 'OpenSSH 7.2p2 Ubuntu 4ubuntu2.8 (Ubuntu Linux; protocol 2.0)'}, {'port': '23', 'state': 'closed', 'protocol': 'tcp', 'service': 'telnet'}, {'port': '25', 'state': 'filtered', 'protocol': 'tcp', 'service': 'smtp'}, {'port': '80', 'state': 'closed', 'protocol': 'tcp', 'service': 'http'}, {'port': '110', 'state': 'closed', 'protocol': 'tcp', 'service': 'pop3'}, {'port': '139', 'state': 'closed', 'protocol': 'tcp', 'service': 'netbios-ssn'}, {'port': '443', 'state': 'closed', 'protocol': 'tcp', 'service': 'https'}, {'port': '445', 'state': 'filtered', 'protocol': 'tcp', 'service': 'microsoft-ds'}, {'port': '3389', 'state': 'closed', 'protocol': 'tcp', 'service': 'ms-wbt-server'} ] } self.assertEqual(result, expected)
52.285714
381
0.539512
601
4,758
4.229617
0.186356
0.074351
0.141621
0.121164
0.77144
0.761998
0.745083
0.745083
0.719119
0.719119
0
0.094658
0.240647
4,758
90
382
52.866667
0.608912
0
0
0.041096
0
0.09589
0.525851
0.196931
0
0
0
0
0.068493
1
0.082192
false
0
0.054795
0
0.150685
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0a3e9dc3a363cd6027fdf3ccdb2a841d0c9a2d6c
110
py
Python
k2/python/k2/ragged/__init__.py
open-speech/sequeender
7a64e1a7d8a4b05b0b82e17c542f9f7f943a41e0
[ "MIT" ]
5
2020-11-19T15:49:55.000Z
2021-06-10T23:51:52.000Z
k2/python/k2/ragged/__init__.py
open-speech/sequeender
7a64e1a7d8a4b05b0b82e17c542f9f7f943a41e0
[ "MIT" ]
null
null
null
k2/python/k2/ragged/__init__.py
open-speech/sequeender
7a64e1a7d8a4b05b0b82e17c542f9f7f943a41e0
[ "MIT" ]
null
null
null
from k2.python.k2.ragged import ops from k2.python.k2.ragged.ops import (index,) __all__ = ['index', 'ops']
18.333333
44
0.709091
18
110
4.111111
0.444444
0.162162
0.324324
0.378378
0.540541
0
0
0
0
0
0
0.041667
0.127273
110
5
45
22
0.729167
0
0
0
0
0
0.072727
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5