hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d5b53335d1ebbf71100f14cf7d7bf804d177951
| 187
|
py
|
Python
|
client/linux/__init__.py
|
nahidupa/grr
|
100a9d85ef2abb234e12e3ac2623caffb4116be7
|
[
"Apache-2.0"
] | 6
|
2015-04-03T02:25:28.000Z
|
2021-11-17T21:42:59.000Z
|
client/linux/__init__.py
|
nahidupa/grr
|
100a9d85ef2abb234e12e3ac2623caffb4116be7
|
[
"Apache-2.0"
] | 3
|
2020-02-11T22:29:15.000Z
|
2021-06-10T17:44:31.000Z
|
client/linux/__init__.py
|
nahidupa/grr
|
100a9d85ef2abb234e12e3ac2623caffb4116be7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""This module contains linux specific client code."""
# These need to register plugins so, pylint: disable=unused-import
from grr.client.linux import installers
| 23.375
| 66
| 0.764706
| 27
| 187
| 5.296296
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13369
| 187
| 7
| 67
| 26.714286
| 0.882716
| 0.716578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1d729e4b7555f77a1f57c36103f3b0ebb96365b3
| 30
|
py
|
Python
|
aionpc/packet_builder.py
|
PrVrSs/aionpc
|
ec00834d6a87709522bfbda6aa78c1a936b54c63
|
[
"Apache-2.0"
] | null | null | null |
aionpc/packet_builder.py
|
PrVrSs/aionpc
|
ec00834d6a87709522bfbda6aa78c1a936b54c63
|
[
"Apache-2.0"
] | null | null | null |
aionpc/packet_builder.py
|
PrVrSs/aionpc
|
ec00834d6a87709522bfbda6aa78c1a936b54c63
|
[
"Apache-2.0"
] | null | null | null |
class PacketBuilder:
pass
| 10
| 20
| 0.733333
| 3
| 30
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 30
| 2
| 21
| 15
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d524e703b2e24f9f30b1ed9ba1400770b10a18ef
| 5,401
|
py
|
Python
|
controller/rvpld-soc/rendering/rendering_6_no_bft.py
|
icgrp/doblink
|
f0b1ffd29588e9cc26371238e4f005e814556fa3
|
[
"MIT"
] | null | null | null |
controller/rvpld-soc/rendering/rendering_6_no_bft.py
|
icgrp/doblink
|
f0b1ffd29588e9cc26371238e4f005e814556fa3
|
[
"MIT"
] | null | null | null |
controller/rvpld-soc/rendering/rendering_6_no_bft.py
|
icgrp/doblink
|
f0b1ffd29588e9cc26371238e4f005e814556fa3
|
[
"MIT"
] | 1
|
2022-03-07T21:52:07.000Z
|
2022-03-07T21:52:07.000Z
|
from axil_cdc import AxilCDC
from axilite2bft import AxiLite2Bft
from bft import Bft
from interface_wrapper import InterfaceWrapper
from litex.soc.interconnect.axi import AXILiteInterface, AXIStreamInterface
from litex.soc.interconnect.stream import (ClockDomainCrossing, Converter,
Endpoint)
from migen import *
from pld_axi import *
class Rendering6Mono(Module):
def __init__(self, clk, rst, platform, start=1, clock_domain="bft"):
self.source = PldAXIStreamInterface(data_width=32)
self.sink = PldAXIStreamInterface(data_width=32)
self.clk = clk
self.rst = rst
self.start = start
self.platform = platform
source_sigs = self.source.get_signals()
sink_sigs = self.sink.get_signals()
self.platform.add_source("rtl/rendering_6_page/rendering_mono.v")
self.platform.add_source("rtl/rendering_6_page/regslice_core.v")
self.platform.add_source("rtl/rendering_6_page/zculling_top_prj/leaf_7.v")
self.platform.add_source("rtl/rendering_6_page/zculling_top_prj/zculling_top.v")
self.platform.add_source(
"rtl/rendering_6_page/zculling_top_prj/zculling_top_pixecud.v"
)
self.platform.add_source(
"rtl/rendering_6_page/zculling_top_prj/zculling_top_z_bubkb.v"
)
self.platform.add_source("rtl/rendering_6_page/rasterization2_m_prj/leaf_6.v")
self.platform.add_source(
"rtl/rendering_6_page/rasterization2_m_prj/rasterization2_m.v"
)
self.platform.add_source(
"rtl/rendering_6_page/rasterization2_m_prj/rasterization2_odd.v"
)
self.platform.add_source(
"rtl/rendering_6_page/rasterization2_m_prj/rasterization2_even.v"
)
self.platform.add_source(
"rtl/rendering_6_page/rasterization2_m_prj/rasterization2_odd_fragment_x_V_1.v"
)
self.platform.add_source(
"rtl/rendering_6_page/rasterization2_m_prj/rasterization2_m_udiv_16ns_8ns_8_20_1.v"
)
self.platform.add_source(
"rtl/rendering_6_page/rasterization2_m_prj/rasterization2_m_urem_16ns_8ns_8_20_1.v"
)
self.platform.add_source(
"rtl/rendering_6_page/rasterization2_m_prj/rasterization2_m_am_submul_8ns_8ns_9s_18_1_1.v"
)
self.platform.add_source(
"rtl/rendering_6_page/rasterization2_m_prj/rasterization2_m_ama_submul_sub_8ns_8ns_9s_18s_18_1_1.v"
)
self.platform.add_source("rtl/rendering_6_page/zculling_bot_prj/zculling_bot.v")
self.platform.add_source(
"rtl/rendering_6_page/zculling_bot_prj/zculling_bot_pixecud.v"
)
self.platform.add_source(
"rtl/rendering_6_page/zculling_bot_prj/zculling_bot_z_bubkb.v"
)
self.platform.add_source("rtl/rendering_6_page/zculling_bot_prj/leaf_5.v")
self.platform.add_source(
"rtl/rendering_6_page/coloringFB_bot_m_prj/coloringFB_bot_m.v"
)
self.platform.add_source(
"rtl/rendering_6_page/coloringFB_bot_m_prj/coloringFB_bot_m_bkb.v"
)
self.platform.add_source("rtl/rendering_6_page/coloringFB_bot_m_prj/leaf_4.v")
self.platform.add_source(
"rtl/rendering_6_page/data_redir_m_prj/data_redir_m_am_submul_8ns_8ns_9s_16_1_1.v"
)
self.platform.add_source(
"rtl/rendering_6_page/data_redir_m_prj/data_redir_m_am_submul_8ns_8ns_9s_18_1_1.v"
)
self.platform.add_source("rtl/rendering_6_page/data_redir_m_prj/data_redir_m.v")
self.platform.add_source(
"rtl/rendering_6_page/data_redir_m_prj/projection_odd_m.v"
)
self.platform.add_source(
"rtl/rendering_6_page/data_redir_m_prj/projection_even_m.v"
)
self.platform.add_source(
"rtl/rendering_6_page/data_redir_m_prj/rasterization1_odd_m.v"
)
self.platform.add_source(
"rtl/rendering_6_page/data_redir_m_prj/rasterization1_even_s.v"
)
self.platform.add_source("rtl/rendering_6_page/data_redir_m_prj/leaf_3.v")
self.platform.add_source("rtl/rendering_6_page/coloringFB_top_m_prj/leaf_2.v")
self.platform.add_source(
"rtl/rendering_6_page/coloringFB_top_m_prj/coloringFB_top_m.v"
)
self.platform.add_source(
"rtl/rendering_6_page/coloringFB_top_m_prj/coloringFB_top_m_bkb.v"
)
self.specials += Instance(
"rendering_mono",
i_ap_clk=self.clk,
i_ap_rst=self.rst,
i_ap_start=self.start,
# stream interface
i_Input_1_V_V=sink_sigs["tpayload"].data,
i_Input_1_V_V_ap_vld=sink_sigs["tvalid"],
o_Input_1_V_V_ap_ack=sink_sigs["tready"],
o_Output_1_V_V=source_sigs["tpayload"].data,
o_Output_1_V_V_ap_vld=source_sigs["tvalid"],
i_Output_1_V_V_ap_ack=source_sigs["tready"],
)
def connect_input(self, stream):
assert isinstance(stream, Endpoint)
assert stream.payload.data.nbits == 32
self.comb += stream.connect(self.sink)
def connect_output(self, stream):
assert isinstance(stream, Endpoint)
assert stream.payload.data.nbits == 32
self.comb += self.source.connect(stream)
| 44.270492
| 111
| 0.68728
| 753
| 5,401
| 4.480744
| 0.146082
| 0.120925
| 0.14671
| 0.205394
| 0.744517
| 0.700948
| 0.700948
| 0.700948
| 0.689686
| 0.678127
| 0
| 0.028878
| 0.224218
| 5,401
| 121
| 112
| 44.636364
| 0.776372
| 0.002962
| 0
| 0.224138
| 0
| 0
| 0.383615
| 0.373026
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.025862
| false
| 0
| 0.068966
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d5280f63843328c1afe2096653d023b8093b0b90
| 80
|
py
|
Python
|
src/session/twitter/gui/list_manager/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 21
|
2015-08-02T21:26:14.000Z
|
2019-12-27T09:57:44.000Z
|
src/session/twitter/gui/list_manager/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 34
|
2015-01-12T00:38:14.000Z
|
2020-08-31T11:19:37.000Z
|
src/session/twitter/gui/list_manager/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 15
|
2015-03-24T15:42:30.000Z
|
2020-09-24T20:26:42.000Z
|
from add_list import AddListDialog
from list_manager import ListManagerDialog
| 26.666667
| 43
| 0.875
| 10
| 80
| 6.8
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 80
| 2
| 44
| 40
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d56779b27d9f7ff5f6b2370259901151560dc473
| 130
|
py
|
Python
|
module4-acid-and-database-scalability-tradeoffs/titanic_queries.py
|
noreallyimfine/DS-Unit-3-Sprint-2-SQL-and-Databases
|
a8262712295a33321c9fc2aa0dc0e2fd9051d244
|
[
"MIT"
] | null | null | null |
module4-acid-and-database-scalability-tradeoffs/titanic_queries.py
|
noreallyimfine/DS-Unit-3-Sprint-2-SQL-and-Databases
|
a8262712295a33321c9fc2aa0dc0e2fd9051d244
|
[
"MIT"
] | null | null | null |
module4-acid-and-database-scalability-tradeoffs/titanic_queries.py
|
noreallyimfine/DS-Unit-3-Sprint-2-SQL-and-Databases
|
a8262712295a33321c9fc2aa0dc0e2fd9051d244
|
[
"MIT"
] | null | null | null |
'''
Thursday assignment DS6 Unit 3 Sprint 2 Module 4
Running queries in postgreSQL on the titanic database
'''
import psycopq2
| 14.444444
| 53
| 0.769231
| 19
| 130
| 5.263158
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04717
| 0.184615
| 130
| 8
| 54
| 16.25
| 0.896226
| 0.792308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d57a2b66bdbad97a6d496aa34012360367a1e6df
| 87
|
py
|
Python
|
BIAS/__init__.py
|
Basvanstein/BIAS
|
5eed56d01803e455ef53afb278f630d5f3c85dc4
|
[
"BSD-3-Clause"
] | null | null | null |
BIAS/__init__.py
|
Basvanstein/BIAS
|
5eed56d01803e455ef53afb278f630d5f3c85dc4
|
[
"BSD-3-Clause"
] | null | null | null |
BIAS/__init__.py
|
Basvanstein/BIAS
|
5eed56d01803e455ef53afb278f630d5f3c85dc4
|
[
"BSD-3-Clause"
] | null | null | null |
from .SB_Toolbox import run_SB_test, f0
__all__ = (
"run_SB_test",
"f0"
)
| 14.5
| 40
| 0.609195
| 13
| 87
| 3.384615
| 0.615385
| 0.227273
| 0.409091
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031746
| 0.275862
| 87
| 6
| 41
| 14.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.156627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d58a5d32f575a5b8dd8cfe022dbc188021cde541
| 68
|
py
|
Python
|
microdevices/__init__.py
|
lmokto/microdevices
|
75a129d1c32f64afe9027338c4be304322ded857
|
[
"MIT"
] | null | null | null |
microdevices/__init__.py
|
lmokto/microdevices
|
75a129d1c32f64afe9027338c4be304322ded857
|
[
"MIT"
] | 1
|
2021-06-02T00:01:14.000Z
|
2021-06-02T00:01:14.000Z
|
microdevices/__init__.py
|
lmokto/microdevices
|
75a129d1c32f64afe9027338c4be304322ded857
|
[
"MIT"
] | null | null | null |
from .connector import *
from .libs import *
from .celery import app
| 22.666667
| 24
| 0.764706
| 10
| 68
| 5.2
| 0.6
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161765
| 68
| 3
| 25
| 22.666667
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6378682a74868377bda1857bc3d8299ee82397aa
| 84
|
py
|
Python
|
gencove/command/uploads/__init__.py
|
mislavcimpersak/gencove-cli
|
2ee9204609d4120c013392f892653ebe9f4a8f7e
|
[
"Apache-2.0"
] | 1
|
2020-04-28T06:31:53.000Z
|
2020-04-28T06:31:53.000Z
|
gencove/command/uploads/__init__.py
|
mislavcimpersak/gencove-cli
|
2ee9204609d4120c013392f892653ebe9f4a8f7e
|
[
"Apache-2.0"
] | null | null | null |
gencove/command/uploads/__init__.py
|
mislavcimpersak/gencove-cli
|
2ee9204609d4120c013392f892653ebe9f4a8f7e
|
[
"Apache-2.0"
] | 1
|
2021-07-29T08:24:51.000Z
|
2021-07-29T08:24:51.000Z
|
"""Sample sheet uploads related commands."""
from .cli import uploads # noqa: F401
| 28
| 44
| 0.72619
| 11
| 84
| 5.545455
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042254
| 0.154762
| 84
| 2
| 45
| 42
| 0.816901
| 0.595238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
63ad0122e26dd7ee8c6846afb3f8acdb0f807f18
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/virtualenv/util/path/_pathlib/via_os_path.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/virtualenv/util/path/_pathlib/via_os_path.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/virtualenv/util/path/_pathlib/via_os_path.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/7d/80/c5/017e3cdf356f0bd8403800bd158b6b19d65eb614b4bed62d2ac2f9afeb
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375
| 0
| 96
| 1
| 96
| 96
| 0.520833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
63c7d8aa12c88549fa6a4b8ad7a9008016598912
| 64
|
py
|
Python
|
fine/parser/__init__.py
|
Roger-luo/fine
|
4cb8acf4e856fc15ae12fa3e127cde8a80b7b97c
|
[
"Apache-2.0"
] | 2
|
2018-04-03T13:40:57.000Z
|
2018-06-25T13:17:17.000Z
|
fine/parser/__init__.py
|
Roger-luo/fine
|
4cb8acf4e856fc15ae12fa3e127cde8a80b7b97c
|
[
"Apache-2.0"
] | 2
|
2018-04-03T15:56:28.000Z
|
2018-04-03T16:54:50.000Z
|
fine/parser/__init__.py
|
Roger-luo/fine
|
4cb8acf4e856fc15ae12fa3e127cde8a80b7b97c
|
[
"Apache-2.0"
] | 1
|
2018-06-25T13:18:00.000Z
|
2018-06-25T13:18:00.000Z
|
from .presentation import Presentation
from .frame import Frame
| 21.333333
| 38
| 0.84375
| 8
| 64
| 6.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 64
| 2
| 39
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
63d00d085d1a9bc12f261af256f7b31a5444d59c
| 406
|
py
|
Python
|
probability/calculations/operators/binary_operators/__init__.py
|
vahndi/probability
|
6ddf88e6f3d947c96b879e426030f60eb5cb2d59
|
[
"MIT"
] | 2
|
2020-02-21T00:47:03.000Z
|
2020-09-22T19:00:48.000Z
|
probability/calculations/operators/binary_operators/__init__.py
|
vahndi/probability
|
6ddf88e6f3d947c96b879e426030f60eb5cb2d59
|
[
"MIT"
] | 52
|
2020-01-16T16:05:08.000Z
|
2022-02-24T15:10:10.000Z
|
probability/calculations/operators/binary_operators/__init__.py
|
vahndi/probability
|
6ddf88e6f3d947c96b879e426030f60eb5cb2d59
|
[
"MIT"
] | null | null | null |
from probability.calculations.operators.binary_operators.add_operator \
import AddOperator
from probability.calculations.operators.binary_operators.divide_operator \
import DivideOperator
from probability.calculations.operators.binary_operators.multiply_operator \
import MultiplyOperator
from probability.calculations.operators.binary_operators.subtract_operator \
import SubtractOperator
| 45.111111
| 76
| 0.862069
| 40
| 406
| 8.55
| 0.375
| 0.175439
| 0.315789
| 0.421053
| 0.596491
| 0.596491
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08867
| 406
| 8
| 77
| 50.75
| 0.924324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8934a41157f13364697060baeb653f7b0d6c32bf
| 118
|
py
|
Python
|
nn_closed_loop/nn_closed_loop/analyzers/__init__.py
|
StanfordASL/nn_robustness_analysis
|
2e03d98efb3ee848b4d8b277968e162513abbd0f
|
[
"MIT"
] | 36
|
2021-02-17T22:46:14.000Z
|
2022-03-28T08:36:27.000Z
|
nn_closed_loop/nn_closed_loop/analyzers/__init__.py
|
zhouzhiqian/nn_robustness_analysis
|
cff947c1b6c6b586a004d13387bb2fe31131dcab
|
[
"MIT"
] | null | null | null |
nn_closed_loop/nn_closed_loop/analyzers/__init__.py
|
zhouzhiqian/nn_robustness_analysis
|
cff947c1b6c6b586a004d13387bb2fe31131dcab
|
[
"MIT"
] | 9
|
2021-06-03T09:03:54.000Z
|
2022-03-07T15:12:03.000Z
|
from .ClosedLoopAnalyzer import ClosedLoopAnalyzer
from .ClosedLoopBackwardAnalyzer import ClosedLoopBackwardAnalyzer
| 39.333333
| 66
| 0.915254
| 8
| 118
| 13.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 118
| 2
| 67
| 59
| 0.981818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
897058d4a6ca411242991d85e8ba47bca6710637
| 127
|
py
|
Python
|
wildwar/arrowanimation/__init__.py
|
gottadiveintopython/wildwar-old-version
|
e4786fa75843a026a32f345226c03b9091cd0bd9
|
[
"MIT"
] | 3
|
2018-11-18T01:34:03.000Z
|
2021-11-07T18:29:36.000Z
|
wildwar/arrowanimation/__init__.py
|
gottadiveintopython/wildwar-old-version
|
e4786fa75843a026a32f345226c03b9091cd0bd9
|
[
"MIT"
] | null | null | null |
wildwar/arrowanimation/__init__.py
|
gottadiveintopython/wildwar-old-version
|
e4786fa75843a026a32f345226c03b9091cd0bd9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .playstretchanimation import play_stretch_animation
from .outlinedpolygon import OutlinedPolygon
| 25.4
| 56
| 0.80315
| 13
| 127
| 7.692308
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00885
| 0.110236
| 127
| 4
| 57
| 31.75
| 0.876106
| 0.165354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
898af0e33a800d739c82eb8e638cb732e619a72e
| 86
|
py
|
Python
|
app/stops/admin.py
|
IvanBodnar/subway-api
|
36d17533995394fc5a5e6e1707ef312778296869
|
[
"MIT"
] | null | null | null |
app/stops/admin.py
|
IvanBodnar/subway-api
|
36d17533995394fc5a5e6e1707ef312778296869
|
[
"MIT"
] | 9
|
2019-12-04T23:23:07.000Z
|
2022-02-10T08:12:30.000Z
|
app/stops/admin.py
|
IvanBodnar/subway-api
|
36d17533995394fc5a5e6e1707ef312778296869
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Stop
admin.site.register(Stop)
| 14.333333
| 32
| 0.802326
| 13
| 86
| 5.307692
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 86
| 5
| 33
| 17.2
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8990a665142a2569d6561e6ef1f4303fd304ebf1
| 244
|
py
|
Python
|
magicword/admin.py
|
sunlightlabs/django-magicword
|
95ec4f318299f6de4e4427f891708af38cf3ac87
|
[
"BSD-3-Clause"
] | 2
|
2017-12-23T05:17:38.000Z
|
2019-04-27T20:21:14.000Z
|
magicword/admin.py
|
sunlightlabs/django-magicword
|
95ec4f318299f6de4e4427f891708af38cf3ac87
|
[
"BSD-3-Clause"
] | null | null | null |
magicword/admin.py
|
sunlightlabs/django-magicword
|
95ec4f318299f6de4e4427f891708af38cf3ac87
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from magicword.models import MagicWord
class MagicWordAdmin(admin.ModelAdmin):
list_display = ('password', 'is_enabled')
list_editable = ('is_enabled',)
admin.site.register(MagicWord, MagicWordAdmin)
| 24.4
| 46
| 0.77459
| 28
| 244
| 6.607143
| 0.642857
| 0.097297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122951
| 244
| 9
| 47
| 27.111111
| 0.864486
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.333333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
89a58b49d6f178e4e492b339cf99762acbfc5cec
| 267
|
py
|
Python
|
app/operation/operation_errors.py
|
BartekSzpak/adversary
|
231caf58722a5641dd08afe354f2760e89699f3a
|
[
"Apache-2.0",
"CC0-1.0"
] | 22
|
2019-06-08T11:00:02.000Z
|
2021-09-10T10:22:20.000Z
|
app/operation/operation_errors.py
|
BartekSzpak/adversary
|
231caf58722a5641dd08afe354f2760e89699f3a
|
[
"Apache-2.0",
"CC0-1.0"
] | 39
|
2019-04-28T13:28:58.000Z
|
2020-07-28T00:49:45.000Z
|
app/operation/operation_errors.py
|
BartekSzpak/adversary
|
231caf58722a5641dd08afe354f2760e89699f3a
|
[
"Apache-2.0",
"CC0-1.0"
] | 11
|
2019-04-29T00:58:35.000Z
|
2021-06-28T02:18:48.000Z
|
class StepParseError(Exception):
pass
class RatDisconnectedError(Exception):
pass
class InvalidTimeoutExceptionError(Exception):
pass
class RatCallbackTimeoutError(Exception):
pass
class MissingFileError(Exception):
pass
| 14.052632
| 47
| 0.715356
| 20
| 267
| 9.55
| 0.4
| 0.340314
| 0.376963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228464
| 267
| 18
| 48
| 14.833333
| 0.927184
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
89ae9d9e15924a56b4fbe42ae88b8c5e5ca920aa
| 153
|
wsgi
|
Python
|
web/searchtube.wsgi
|
dermasmid/searchtube
|
68d740b37b990d00c35e9eec4fa30cc24affe954
|
[
"MIT"
] | 11
|
2021-06-17T06:12:29.000Z
|
2022-02-17T14:54:08.000Z
|
web/searchtube.wsgi
|
dermasmid/searchtube
|
68d740b37b990d00c35e9eec4fa30cc24affe954
|
[
"MIT"
] | null | null | null |
web/searchtube.wsgi
|
dermasmid/searchtube
|
68d740b37b990d00c35e9eec4fa30cc24affe954
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import sys
sys.path.append('/var/www/searchtube/web')
from dotenv import load_dotenv
load_dotenv()
from server import app as application
| 19.125
| 42
| 0.79085
| 24
| 153
| 4.958333
| 0.708333
| 0.168067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007299
| 0.104575
| 153
| 7
| 43
| 21.857143
| 0.861314
| 0.084967
| 0
| 0
| 0
| 0
| 0.165468
| 0.165468
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
98261284adbf90e36b8dc8675b79df4188ca136e
| 26
|
py
|
Python
|
main.py
|
Bisma13123/cp_19_Mcqs-application-
|
44cb688f95bd9d81b6ad281b856d2922307a31a7
|
[
"MIT"
] | null | null | null |
main.py
|
Bisma13123/cp_19_Mcqs-application-
|
44cb688f95bd9d81b6ad281b856d2922307a31a7
|
[
"MIT"
] | null | null | null |
main.py
|
Bisma13123/cp_19_Mcqs-application-
|
44cb688f95bd9d81b6ad281b856d2922307a31a7
|
[
"MIT"
] | null | null | null |
print('demo')
print('abc')
| 13
| 13
| 0.653846
| 4
| 26
| 4.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 26
| 2
| 14
| 13
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0.259259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
9834bb71f4aa4b56c52fce152e3af02902979374
| 222
|
py
|
Python
|
mnist_digit/loss.py
|
julkaar9/neuralgym
|
d4567b7316027ce0d13a0de6fb20c71ccd2afc83
|
[
"MIT"
] | null | null | null |
mnist_digit/loss.py
|
julkaar9/neuralgym
|
d4567b7316027ce0d13a0de6fb20c71ccd2afc83
|
[
"MIT"
] | null | null | null |
mnist_digit/loss.py
|
julkaar9/neuralgym
|
d4567b7316027ce0d13a0de6fb20c71ccd2afc83
|
[
"MIT"
] | null | null | null |
import numpy as np
class Cross_entropy:
def __init__(self):
pass
def value(self, yp, y):
return -1*np.sum(y.flatten()*np.log(1e-15 +yp.flatten()))
def dvalue(self, yp, y):
return yp-y
| 20.181818
| 65
| 0.585586
| 36
| 222
| 3.472222
| 0.611111
| 0.072
| 0.112
| 0.208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024691
| 0.27027
| 222
| 11
| 66
| 20.181818
| 0.746914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.125
| 0.125
| 0.25
| 0.875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
9847ea0f010eb3623e1023559ef6e1ede4d56c2d
| 238
|
py
|
Python
|
montecarlo/mcpy/utils.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
montecarlo/mcpy/utils.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
montecarlo/mcpy/utils.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
def filesafe(str):
return "".join([c for c in str if c.isalpha() or c.isdigit() or c==' ']).rstrip().replace(' ', '_')
| 29.75
| 103
| 0.668067
| 36
| 238
| 4.388889
| 0.805556
| 0.037975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172269
| 238
| 7
| 104
| 34
| 0.80203
| 0.37395
| 0
| 0
| 0
| 0
| 0.020548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
9860006fda2458cd1be120f2f015450a31dab7c1
| 201
|
py
|
Python
|
WarioEditor/toolkits/default/intToFloat.py
|
McMasterRS/WARIO-Editor
|
49156de97497e8457eb8a924256118a8f7fb5597
|
[
"MIT"
] | 1
|
2020-09-05T05:45:14.000Z
|
2020-09-05T05:45:14.000Z
|
WarioEditor/toolkits/default/intToFloat.py
|
McMasterRS/WARIO-Editor
|
49156de97497e8457eb8a924256118a8f7fb5597
|
[
"MIT"
] | null | null | null |
WarioEditor/toolkits/default/intToFloat.py
|
McMasterRS/WARIO-Editor
|
49156de97497e8457eb8a924256118a8f7fb5597
|
[
"MIT"
] | null | null | null |
from wario import Node
class intToFloat(Node):
def __init__(self, name):
super(intToFloat, self).__init__(name)
def process(self, intIn):
return {"Out": float(intIn)}
| 22.333333
| 46
| 0.626866
| 24
| 201
| 4.916667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253731
| 201
| 9
| 47
| 22.333333
| 0.786667
| 0
| 0
| 0
| 0
| 0
| 0.014851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
986e9618ec9a65c6057c5385172ea8c1771da9c0
| 213
|
py
|
Python
|
scraper/__init__.py
|
Michael-F-Bryan/scraper
|
44db3421999deb6e282f5a67be53347e0b058fe4
|
[
"MIT"
] | null | null | null |
scraper/__init__.py
|
Michael-F-Bryan/scraper
|
44db3421999deb6e282f5a67be53347e0b058fe4
|
[
"MIT"
] | null | null | null |
scraper/__init__.py
|
Michael-F-Bryan/scraper
|
44db3421999deb6e282f5a67be53347e0b058fe4
|
[
"MIT"
] | null | null | null |
"""
A simple, multithreaded web scraping framework.
"""
from .models import Job, Page
from .spider import BaseScraper
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 17.75
| 47
| 0.774648
| 27
| 213
| 5.814815
| 0.62963
| 0.210191
| 0.229299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13615
| 213
| 11
| 48
| 19.363636
| 0.853261
| 0.220657
| 0
| 0
| 0
| 0
| 0.044304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
98a17a788a8a9257fdfe6f262d240e0944cf15c5
| 62
|
py
|
Python
|
venv/lib/python3.5/site-packages/tests/operators/__init__.py
|
mesodiar/bello-airflow
|
afede57f214774b50e6a4c083ca096ca2c060d31
|
[
"MIT"
] | 1
|
2021-04-05T11:25:36.000Z
|
2021-04-05T11:25:36.000Z
|
tests/operators/__init__.py
|
fvlankvelt/airflow
|
6cbe4a475f773bf32e1d7743718f7ae1a7dd9c91
|
[
"Apache-2.0"
] | null | null | null |
tests/operators/__init__.py
|
fvlankvelt/airflow
|
6cbe4a475f773bf32e1d7743718f7ae1a7dd9c91
|
[
"Apache-2.0"
] | 1
|
2019-12-12T06:44:14.000Z
|
2019-12-12T06:44:14.000Z
|
from .docker_operator import *
from .subdag_operator import *
| 20.666667
| 30
| 0.806452
| 8
| 62
| 6
| 0.625
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 62
| 2
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7f3f5f56991a1d3d92f212c4a69c73329d67ab58
| 64
|
py
|
Python
|
wrangling/fluorimeter/__init__.py
|
ebentley17/Deniz_lab_code
|
3cf13c769bed0ddf0abf0dc74213a9dec96bfabb
|
[
"MIT"
] | null | null | null |
wrangling/fluorimeter/__init__.py
|
ebentley17/Deniz_lab_code
|
3cf13c769bed0ddf0abf0dc74213a9dec96bfabb
|
[
"MIT"
] | null | null | null |
wrangling/fluorimeter/__init__.py
|
ebentley17/Deniz_lab_code
|
3cf13c769bed0ddf0abf0dc74213a9dec96bfabb
|
[
"MIT"
] | 1
|
2020-11-07T18:11:49.000Z
|
2020-11-07T18:11:49.000Z
|
"""Contents: fluorimeter.py module"""
from . import corrections
| 21.333333
| 37
| 0.75
| 7
| 64
| 6.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 3
| 38
| 21.333333
| 0.842105
| 0.484375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7f4b4d9e0077dc626bb71707bea7c039036d32b7
| 98
|
py
|
Python
|
enthought/block_canvas/context/shell/api.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/block_canvas/context/shell/api.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/block_canvas/context/shell/api.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from blockcanvas.context.shell.api import *
| 24.5
| 43
| 0.836735
| 13
| 98
| 5.923077
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112245
| 98
| 3
| 44
| 32.666667
| 0.885057
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7f515c584332efc9b27717f280ce06611582a3e8
| 357
|
py
|
Python
|
weapon_evolution/player.py
|
chenbojian/WeaponEvolution
|
02d249110329cf4d1a9a5c2e9837b5bfe6acf49a
|
[
"MIT"
] | null | null | null |
weapon_evolution/player.py
|
chenbojian/WeaponEvolution
|
02d249110329cf4d1a9a5c2e9837b5bfe6acf49a
|
[
"MIT"
] | null | null | null |
weapon_evolution/player.py
|
chenbojian/WeaponEvolution
|
02d249110329cf4d1a9a5c2e9837b5bfe6acf49a
|
[
"MIT"
] | null | null | null |
class Player:
def __init__(self, name, life_value, attack_value):
self.name = name
self.life_value = life_value
self.attack_value = attack_value
def attack(self, enemy_player: 'Player'):
enemy_player.life_value = enemy_player.life_value - self.attack_value
def is_alive(self):
return self.life_value > 0
| 32.454545
| 77
| 0.680672
| 49
| 357
| 4.591837
| 0.285714
| 0.24
| 0.142222
| 0.168889
| 0.213333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003663
| 0.235294
| 357
| 11
| 78
| 32.454545
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.01676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.111111
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7f7f39e95df143c2638645dc8b9ce75b4097bc57
| 144
|
py
|
Python
|
core/app/utils/django/standalone_init.py
|
EmixMaxime/mx-home-security
|
ec6d329a09bb2e0afbbd7e481937893311f02634
|
[
"MIT"
] | 2
|
2021-04-29T19:28:59.000Z
|
2021-04-29T21:20:32.000Z
|
core/app/utils/django/standalone_init.py
|
EmixMaxime/mx-home-security
|
ec6d329a09bb2e0afbbd7e481937893311f02634
|
[
"MIT"
] | 101
|
2020-06-26T19:51:24.000Z
|
2021-03-28T09:35:55.000Z
|
core/app/utils/django/standalone_init.py
|
mxmaxime/mx-tech-house
|
f6b66b8390b348e48d4c6ea0da51e409f3845fd6
|
[
"MIT"
] | null | null | null |
import os
import sys
import django
def init():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hello_django.settings')
django.setup()
| 18
| 76
| 0.75
| 19
| 144
| 5.526316
| 0.631579
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 144
| 7
| 77
| 20.571429
| 0.846774
| 0
| 0
| 0
| 0
| 0
| 0.298611
| 0.298611
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7fa71bfb0eb19bf1afb969b4536a6ba6983989fa
| 22
|
py
|
Python
|
randomfile.py
|
artpomm/cs3240-labdemo
|
be51c63135db0120dfcedcf5b26074c152565a83
|
[
"MIT"
] | null | null | null |
randomfile.py
|
artpomm/cs3240-labdemo
|
be51c63135db0120dfcedcf5b26074c152565a83
|
[
"MIT"
] | null | null | null |
randomfile.py
|
artpomm/cs3240-labdemo
|
be51c63135db0120dfcedcf5b26074c152565a83
|
[
"MIT"
] | null | null | null |
a = 4 + 7
print (a)
| 7.333333
| 10
| 0.409091
| 5
| 22
| 1.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0.409091
| 22
| 2
| 11
| 11
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
7fae6a274d751e909c7c6ec321e8bd84a3ccd6be
| 51
|
py
|
Python
|
backend/src/baserow/contrib/database/formula/exceptions.py
|
lucastm/baserow
|
c5fd45b75c753cc5dfd3227902a79535fbe5ad0f
|
[
"MIT"
] | 839
|
2020-07-20T13:29:34.000Z
|
2022-03-31T21:09:16.000Z
|
backend/src/baserow/contrib/database/formula/exceptions.py
|
lucastm/baserow
|
c5fd45b75c753cc5dfd3227902a79535fbe5ad0f
|
[
"MIT"
] | 28
|
2020-08-07T09:23:58.000Z
|
2022-03-01T22:32:40.000Z
|
backend/src/baserow/contrib/database/formula/exceptions.py
|
lucastm/baserow
|
c5fd45b75c753cc5dfd3227902a79535fbe5ad0f
|
[
"MIT"
] | 79
|
2020-08-04T01:48:01.000Z
|
2022-03-27T13:30:54.000Z
|
class BaserowFormulaException(Exception):
pass
| 17
| 41
| 0.803922
| 4
| 51
| 10.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 2
| 42
| 25.5
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
f6a0023b700feb17ed9b0f3c05354ce4f60a5dfd
| 148
|
py
|
Python
|
zengo/strings.py
|
ableeck/django-zengo
|
33f3795215dac4ac2121d26fc702a24adb1748f2
|
[
"MIT"
] | 10
|
2019-02-11T19:13:41.000Z
|
2021-12-10T21:23:51.000Z
|
zengo/strings.py
|
myles/django-zengo
|
d896b931139a65c497196b9669313f1dcfd560c9
|
[
"MIT"
] | 4
|
2019-01-03T00:02:31.000Z
|
2020-11-11T01:31:06.000Z
|
zengo/strings.py
|
myles/django-zengo
|
d896b931139a65c497196b9669313f1dcfd560c9
|
[
"MIT"
] | 3
|
2019-02-28T15:58:24.000Z
|
2020-06-09T02:45:42.000Z
|
data_malformed = "No JSON object could be decoded"
data_no_ticket_id = "`id` not found in data"
secret_missing_or_wrong = "Secret missing or wrong"
| 37
| 51
| 0.783784
| 25
| 148
| 4.36
| 0.68
| 0.238532
| 0.275229
| 0.366972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141892
| 148
| 3
| 52
| 49.333333
| 0.858268
| 0
| 0
| 0
| 0
| 0
| 0.513514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f6a3a3c5f35a9e01c029d48f2a932fe4aa32bb85
| 169
|
py
|
Python
|
distriopt/embedding/algorithms/__init__.py
|
Giuseppe1992/mapping_distrinet
|
a9e171bf27e3b1d4cc674ac8a6e67ffe8f9c6878
|
[
"MIT"
] | 2
|
2020-01-31T16:18:38.000Z
|
2021-07-19T07:44:13.000Z
|
distriopt/embedding/algorithms/__init__.py
|
jdoe79250/jd_mapping_distrinet_1
|
7ccbf2fb09b619dc97d105d936d549ab9ac828fa
|
[
"MIT"
] | null | null | null |
distriopt/embedding/algorithms/__init__.py
|
jdoe79250/jd_mapping_distrinet_1
|
7ccbf2fb09b619dc97d105d936d549ab9ac828fa
|
[
"MIT"
] | 2
|
2020-01-31T16:18:43.000Z
|
2020-02-03T10:14:19.000Z
|
from .greedy import EmbedGreedy
from .ilp import EmbedILP
from .kbalanced import EmbedBalanced
from .partition import EmbedPartition
from .random import RandomSelection
| 28.166667
| 37
| 0.852071
| 20
| 169
| 7.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118343
| 169
| 5
| 38
| 33.8
| 0.966443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
122a94a92d3a8af8f04f208790c93eb77caa6e23
| 83
|
py
|
Python
|
src/polls/forLoop4.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | 1
|
2015-08-27T13:03:27.000Z
|
2015-08-27T13:03:27.000Z
|
src/polls/forLoop4.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | 22
|
2015-08-23T18:17:30.000Z
|
2015-09-16T13:38:36.000Z
|
src/polls/forLoop4.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | null | null | null |
for i in range(0, 201, 2):
print(i)
for i in range(0, 100, 3):
print(i)
| 10.375
| 26
| 0.53012
| 18
| 83
| 2.444444
| 0.555556
| 0.181818
| 0.272727
| 0.5
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 0.301205
| 83
| 7
| 27
| 11.857143
| 0.586207
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
89f14dff630acc49e89ff334bb5893d1e77a421c
| 163
|
py
|
Python
|
store_item_models/store_item_stocks/apps.py
|
reimibeta/django-store-item-models
|
0be5fad0df0b3ebc7283fc6369f0e769a4743987
|
[
"Apache-2.0"
] | null | null | null |
store_item_models/store_item_stocks/apps.py
|
reimibeta/django-store-item-models
|
0be5fad0df0b3ebc7283fc6369f0e769a4743987
|
[
"Apache-2.0"
] | null | null | null |
store_item_models/store_item_stocks/apps.py
|
reimibeta/django-store-item-models
|
0be5fad0df0b3ebc7283fc6369f0e769a4743987
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class StoreItemStockConfig(AppConfig):
name = 'store_item_models.store_item_stocks'
verbose_name = 'Store Item Stocks'
| 23.285714
| 48
| 0.785276
| 20
| 163
| 6.15
| 0.65
| 0.219512
| 0.211382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147239
| 163
| 6
| 49
| 27.166667
| 0.884892
| 0
| 0
| 0
| 0
| 0
| 0.319018
| 0.214724
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
d63f7f91804007a2219572c89937d6e1e5c23cb0
| 203
|
py
|
Python
|
libdlt/result.py
|
datalogistics/libdlt
|
f3d8afb06a237fe6e4114c1a55e6f407ba9cc7b0
|
[
"BSD-3-Clause"
] | null | null | null |
libdlt/result.py
|
datalogistics/libdlt
|
f3d8afb06a237fe6e4114c1a55e6f407ba9cc7b0
|
[
"BSD-3-Clause"
] | 2
|
2018-05-20T21:33:03.000Z
|
2019-02-15T16:48:37.000Z
|
libdlt/result.py
|
datalogistics/libdlt
|
f3d8afb06a237fe6e4114c1a55e6f407ba9cc7b0
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import namedtuple
GenericTransactionResult = namedtuple('GenericTransactionResult', ['time', 't_size', 'exnode'])
UploadResult = DownloadResult = CopyResult = GenericTransactionResult
| 33.833333
| 95
| 0.812808
| 15
| 203
| 10.933333
| 0.8
| 0.414634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093596
| 203
| 5
| 96
| 40.6
| 0.891304
| 0
| 0
| 0
| 0
| 0
| 0.19802
| 0.118812
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d64a3578942fd4414274e72c37b8e458b2223c6e
| 93
|
py
|
Python
|
ims_soft/stock/admin.py
|
fouadsan/ims-soft
|
872f9fedd7d848cf9b2a16d53bcf1925fe042721
|
[
"MIT"
] | null | null | null |
ims_soft/stock/admin.py
|
fouadsan/ims-soft
|
872f9fedd7d848cf9b2a16d53bcf1925fe042721
|
[
"MIT"
] | null | null | null |
ims_soft/stock/admin.py
|
fouadsan/ims-soft
|
872f9fedd7d848cf9b2a16d53bcf1925fe042721
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Barcode
admin.site.register(Barcode)
| 13.285714
| 32
| 0.806452
| 13
| 93
| 5.769231
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 93
| 6
| 33
| 15.5
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d64cec10534dd62f9f77f32040fa7f5cf7b5d3d6
| 47,611
|
py
|
Python
|
Development Indicators Project/Docker/Clustering_Wrangling.py
|
autodidact-m/Projects
|
f4c0473adba42f3a629b62eb09d3b1df91982f46
|
[
"Apache-2.0"
] | null | null | null |
Development Indicators Project/Docker/Clustering_Wrangling.py
|
autodidact-m/Projects
|
f4c0473adba42f3a629b62eb09d3b1df91982f46
|
[
"Apache-2.0"
] | null | null | null |
Development Indicators Project/Docker/Clustering_Wrangling.py
|
autodidact-m/Projects
|
f4c0473adba42f3a629b62eb09d3b1df91982f46
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[1]:
import csv
import pandas as pd
import os
pd.options.mode.chained_assignment = None
import numpy as np
import boto3
from boto3.s3.transfer import S3Transfer
import sys
# In[5]:
def readFile():
homepath = os.path.expanduser('~')
indicator_data = pd.read_csv('./Data/Clustering/Indicators_Clustering_Combined.csv', low_memory=False)
return indicator_data
# # Handling Missing values for Australia
# In[25]:
def australia():
indicator_data = readFile()
australia_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind1['Value'] = australia_df_ind1['Value'].fillna(method='bfill', axis = 0)
australia_df_ind2['Value'] = australia_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Argentina Dataframes
australia_df = pd.concat([australia_df_ind1, australia_df_ind2, australia_df_ind3, australia_df_ind4, australia_df_ind5, australia_df_ind6, australia_df_ind7, australia_df_ind8, australia_df_ind9, australia_df_ind10])
print('Clustering Wrangling completed for Australia!!', '\n')
return australia_df
# # Handling Missing values for Canada
# In[26]:
def canada():
indicator_data = readFile()
canada_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind1['Value'] = canada_df_ind1['Value'].fillna(method='bfill', axis = 0)
canada_df_ind2['Value'] = canada_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Brazil Dataframes
canada_df = pd.concat([canada_df_ind1, canada_df_ind2, canada_df_ind3, canada_df_ind4, canada_df_ind5, canada_df_ind6, canada_df_ind7, canada_df_ind8, canada_df_ind9, canada_df_ind10])
print('Clustering Wrangling completed for Canada!!', '\n')
return canada_df
# # Handling Missing values for Saudi Arabia
# In[27]:
def saudi_Arabia():
indicator_data = readFile()
saudi_arabia_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind1['Value'] = saudi_arabia_df_ind1['Value'].fillna(method='bfill', axis = 0)
saudi_arabia_df_ind2['Value'] = saudi_arabia_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Ecuador Dataframes
saudi_arabia_df = pd.concat([saudi_arabia_df_ind1, saudi_arabia_df_ind2, saudi_arabia_df_ind3, saudi_arabia_df_ind4, saudi_arabia_df_ind5, saudi_arabia_df_ind6, saudi_arabia_df_ind7, saudi_arabia_df_ind8, saudi_arabia_df_ind9, saudi_arabia_df_ind10])
print('Clustering Wrangling completed for Saudi Arabia!!', '\n')
return saudi_arabia_df
# # Handling Missing values for United States
# In[28]:
def united_States():
indicator_data = readFile()
united_states_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind1['Value'] = united_states_df_ind1['Value'].fillna(method='bfill', axis = 0)
united_states_df_ind2['Value'] = united_states_df_ind2['Value'].fillna(method='bfill', axis = 0)
united_states_df_ind4['Value'] = united_states_df_ind4['Value'].fillna(method='bfill', axis = 0)
united_states_df_ind5['Value'] = united_states_df_ind5['Value'].fillna(method='bfill', axis = 0)
united_states_df_ind10['Value'] = united_states_df_ind10['Value'].fillna(method='bfill', axis = 0)
# Combining all the Libya Dataframes
united_states_df = pd.concat([united_states_df_ind1, united_states_df_ind2, united_states_df_ind3, united_states_df_ind4, united_states_df_ind5, united_states_df_ind6, united_states_df_ind7, united_states_df_ind8, united_states_df_ind9, united_states_df_ind10])
print('Clustering Wrangling completed for United States!!', '\n')
return united_states_df
# # Handling Missing values for India
# In[10]:
def india():
indicator_data = readFile()
india_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind1['Value'] = india_df_ind1['Value'].fillna(method='bfill', axis = 0)
india_df_ind2['Value'] = india_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the India Dataframes
india_df = pd.concat([india_df_ind1, india_df_ind2, india_df_ind3, india_df_ind4, india_df_ind5, india_df_ind6, india_df_ind7, india_df_ind8, india_df_ind9, india_df_ind10])
print('Clustering Wrangling completed for India!!', '\n')
return india_df
# # Handling Missing values for Russia
# In[11]:
def russia():
indicator_data = readFile()
russia_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind1['Value'] = russia_df_ind1['Value'].fillna(method='bfill', axis = 0)
russia_df_ind2['Value'] = russia_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
russia_df = pd.concat([russia_df_ind1, russia_df_ind2, russia_df_ind3, russia_df_ind4, russia_df_ind5, russia_df_ind6, russia_df_ind7, russia_df_ind8, russia_df_ind9, russia_df_ind10])
print('Clustering Wrangling completed for Russia!!', '\n')
return russia_df
# # Handling Missing values for South Africa
# In[12]:
def south_Africa():
indicator_data = readFile()
south_africa_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind1['Value'] = south_africa_df_ind1['Value'].fillna(method='bfill', axis = 0)
south_africa_df_ind2['Value'] = south_africa_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
south_africa_df = pd.concat([south_africa_df_ind1, south_africa_df_ind2, south_africa_df_ind3, south_africa_df_ind4, south_africa_df_ind5, south_africa_df_ind6, south_africa_df_ind7, south_africa_df_ind8, south_africa_df_ind9, south_africa_df_ind10])
print('Clustering Wrangling completed for South Africa!!', '\n')
return south_africa_df
# # Handling Missing values for Turkey
# In[13]:
def turkey():
indicator_data = readFile()
turkey_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind1['Value'] = turkey_df_ind1['Value'].fillna(method='bfill', axis = 0)
turkey_df_ind2['Value'] = turkey_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
turkey_df = pd.concat([turkey_df_ind1, turkey_df_ind2, turkey_df_ind3, turkey_df_ind4, turkey_df_ind5, turkey_df_ind6, turkey_df_ind7, turkey_df_ind8, turkey_df_ind9, turkey_df_ind10])
print('Clustering Wrangling completed for Turkey!!', '\n')
return turkey_df
# # Handling Missing values for Argentina
# In[14]:
def argentina():
indicator_data = readFile()
argentina_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind1['Value'] = argentina_df_ind1['Value'].fillna(method='bfill', axis = 0)
argentina_df_ind2['Value'] = argentina_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Argentina Dataframes
argentina_df = pd.concat([argentina_df_ind1, argentina_df_ind2, argentina_df_ind3, argentina_df_ind4, argentina_df_ind5, argentina_df_ind6, argentina_df_ind7, argentina_df_ind8, argentina_df_ind9, argentina_df_ind10])
print('Clustering Wrangling completed for Argentina!!', '\n')
return argentina_df
# # Handling Missing values for Brazil
# In[17]:
def brazil():
indicator_data = readFile()
brazil_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind1['Value'] = brazil_df_ind1['Value'].fillna(method='bfill', axis = 0)
brazil_df_ind2['Value'] = brazil_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Brazil Dataframes
brazil_df = pd.concat([brazil_df_ind1, brazil_df_ind2, brazil_df_ind3, brazil_df_ind4, brazil_df_ind5, brazil_df_ind6, brazil_df_ind7, brazil_df_ind8, brazil_df_ind9, brazil_df_ind10])
print('Clustering Wrangling completed for Brazil!!', '\n')
return brazil_df
# # Handling Missing values for Mexico
# In[16]:
def mexico():
indicator_data = readFile()
mexico_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'MX')]
mexico_df_ind1['Value'] = mexico_df_ind1['Value'].fillna(method='bfill', axis = 0)
mexico_df_ind2['Value'] = mexico_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
mexico_df = pd.concat([mexico_df_ind1, mexico_df_ind2, mexico_df_ind3, mexico_df_ind4, mexico_df_ind5, mexico_df_ind6, mexico_df_ind7, mexico_df_ind8, mexico_df_ind9, mexico_df_ind10])
print('Clustering Wrangling completed for Mexico!!', '\n')
return mexico_df
# # Handling Missing values for France
# In[18]:
def france():
indicator_data = readFile()
france_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'FR')]
france_df_ind1['Value'] = france_df_ind1['Value'].fillna(method='bfill', axis = 0)
france_df_ind2['Value'] = france_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
france_df = pd.concat([france_df_ind1, france_df_ind2, france_df_ind3, france_df_ind4, france_df_ind5, france_df_ind6, france_df_ind7, france_df_ind8, france_df_ind9, france_df_ind10])
print('Clustering Wrangling completed for France!!', '\n')
return france_df
# # Handling Missing values for Germany
# In[19]:
def germany():
indicator_data = readFile()
germany_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'DE')]
germany_df_ind1['Value'] = germany_df_ind1['Value'].fillna(method='bfill', axis = 0)
germany_df_ind2['Value'] = germany_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
germany_df = pd.concat([germany_df_ind1, germany_df_ind2, germany_df_ind3, germany_df_ind4, germany_df_ind5, germany_df_ind6, germany_df_ind7, germany_df_ind8, germany_df_ind9, germany_df_ind10])
print('Clustering Wrangling completed for Germany!!', '\n')
return germany_df
# # Handling Missing values for Italy
# In[20]:
def italy():
indicator_data = readFile()
italy_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IT')]
italy_df_ind1['Value'] = italy_df_ind1['Value'].fillna(method='bfill', axis = 0)
italy_df_ind2['Value'] = italy_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
italy_df = pd.concat([italy_df_ind1, italy_df_ind2, italy_df_ind3, italy_df_ind4, italy_df_ind5, italy_df_ind6, italy_df_ind7, italy_df_ind8, italy_df_ind9, italy_df_ind10])
print('Clustering Wrangling completed for Italy!!', '\n')
return italy_df
# # Handling Missing values for United Kingdom
# In[21]:
def united_kingdom():
indicator_data = readFile()
united_kingdom_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'GB')]
united_kingdom_df_ind1['Value'] = united_kingdom_df_ind1['Value'].fillna(method='bfill', axis = 0)
united_kingdom_df_ind2['Value'] = united_kingdom_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
united_kingdom_df = pd.concat([united_kingdom_df_ind1, united_kingdom_df_ind2, united_kingdom_df_ind3, united_kingdom_df_ind4, united_kingdom_df_ind5, united_kingdom_df_ind6, united_kingdom_df_ind7, united_kingdom_df_ind8, united_kingdom_df_ind9, united_kingdom_df_ind10])
print('Clustering Wrangling completed for United Kingdom!!', '\n')
return united_kingdom_df
# # Handling Missing values for China
# In[22]:
def china():
indicator_data = readFile()
china_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CN')]
china_df_ind1['Value'] = china_df_ind1['Value'].fillna(method='bfill', axis = 0)
china_df_ind2['Value'] = china_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
china_df = pd.concat([china_df_ind1, china_df_ind2, china_df_ind3, china_df_ind4, china_df_ind5, china_df_ind6, china_df_ind7, china_df_ind8, china_df_ind9, china_df_ind10])
print('Clustering Wrangling completed for China!!', '\n')
return china_df
# # Handling Missing values for Indonesia
# In[23]:
def indonesia():
indicator_data = readFile()
indonesia_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ID')]
indonesia_df_ind1['Value'] = indonesia_df_ind1['Value'].fillna(method='bfill', axis = 0)
indonesia_df_ind2['Value'] = indonesia_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
indonesia_df = pd.concat([indonesia_df_ind1, indonesia_df_ind2, indonesia_df_ind3, indonesia_df_ind4, indonesia_df_ind5, indonesia_df_ind6, indonesia_df_ind7, indonesia_df_ind8, indonesia_df_ind9, indonesia_df_ind10])
print('Clustering Wrangling completed for Indonesia!!', '\n')
return indonesia_df
# # Handling Missing values for Japan
# In[24]:
def japan():
indicator_data = readFile()
japan_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'JP')]
japan_df_ind1['Value'] = japan_df_ind1['Value'].fillna(method='bfill', axis = 0)
japan_df_ind2['Value'] = japan_df_ind2['Value'].fillna(method='bfill', axis = 0)
japan_df_ind4['Value'] = japan_df_ind4['Value'].fillna(method='bfill', axis = 0)
japan_df_ind5['Value'] = japan_df_ind5['Value'].fillna(method='bfill', axis = 0)
japan_df_ind10['Value'] = japan_df_ind10['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
japan_df = pd.concat([japan_df_ind1, japan_df_ind2, japan_df_ind3, japan_df_ind4, japan_df_ind5, japan_df_ind6, japan_df_ind7, japan_df_ind8, japan_df_ind9, japan_df_ind10])
print('Clustering Wrangling completed for Japan!!', '\n')
return japan_df
# In[45]:
def writeFile():
australia_df = australia()
canada_df = canada()
saudi_arabia_df = saudi_Arabia()
united_states_df = united_States()
india_df = india()
russia_df = russia()
south_africa_df = south_Africa()
turkey_df = turkey()
argentina_df = argentina()
brazil_df = brazil()
mexico_df = mexico()
france_df = france()
germany_df = germany()
italy_df = italy()
united_kingdom_df = united_kingdom()
china_df = china()
indonesia_df = indonesia()
japan_df = japan()
# Combining all countries DataFrame
final_df = pd.concat([australia_df, canada_df, saudi_arabia_df, united_states_df, india_df, russia_df, south_africa_df, turkey_df, argentina_df, brazil_df, mexico_df, france_df, germany_df, italy_df, united_kingdom_df, china_df, indonesia_df, japan_df])
actual_filename = './Data/Clustering/Indicators_Clustering_Cleaned.csv'
final_df.to_csv(actual_filename, index=False)
print('Clustering Wrangling completed and file created!!', '\n')
# In[ ]:
def fileUploadToS3(AWS_ACCESS_KEY, AWS_SECRET_KEY):
conn = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY)
transfer = S3Transfer(conn)
response = conn.list_buckets()
existent = []
for bucket in response["Buckets"]:
existent.append(bucket['Name'])
bucket_name = 'Team6FinalProject'
target_dir = './Data/Clustering/'
filenames = []
file_list = os.listdir(target_dir)
for file in file_list:
if '_Cleaned' in file:
filenames.append(file)
if bucket_name in existent:
print('Bucket already exists!!', '\n')
print('Clustering Cleaned File upload started to s3!!!!!', '\n')
for files in filenames:
upload_filename = 'Clustering/'+files
transfer.upload_file(os.path.join(target_dir, files), bucket_name, upload_filename, extra_args={'ACL': 'public-read'})
print('Clustering CLeaned File uploaded to s3!!!!!','\n')
else:
print('Bucket not present. Created bucket!!', '\n')
conn.create_bucket(Bucket=bucket_name, ACL='public-read-write')
print('Clustering CLeaned File upload started to s3!!!!!', '\n')
for files in filenames:
upload_filename = 'Clustering/'+files
transfer.upload_file(os.path.join(target_dir, files), bucket_name, upload_filename, extra_args={'ACL': 'public-read'})
print('Clustering Cleaned File uploaded to s3!!!!!','\n')
# In[ ]:
def main():
user_input = sys.argv[1:]
print("----Process Started----")
counter = 0
if len(user_input) == 0:
print('No Input provided. Process is exiting!!')
exit(0)
for ip in user_input:
if counter == 0:
AWS_ACCESS_KEY = str(ip)
else:
AWS_SECRET_KEY = str(ip)
counter += 1
readFile()
writeFile()
fileUploadToS3(AWS_ACCESS_KEY, AWS_SECRET_KEY)
print('Clustering Wrangling Process completed!!','\n')
# In[ ]:
if __name__ == '__main__':
main()
| 66.588811
| 337
| 0.608074
| 5,267
| 47,611
| 5.169356
| 0.04139
| 0.267382
| 0.145444
| 0.171888
| 0.772028
| 0.744886
| 0.725199
| 0.676057
| 0.627649
| 0.613619
| 0
| 0.016233
| 0.240491
| 47,611
| 714
| 338
| 66.682073
| 0.736705
| 0.03432
| 0
| 0.065823
| 0
| 0
| 0.201856
| 0.002244
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055696
| false
| 0
| 0.017722
| 0
| 0.121519
| 0.070886
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c395e34533dfb3944ed5f26f6d6f57117ef5fc73
| 168
|
py
|
Python
|
estate/admin.py
|
apwao/neighborhood
|
b71028fb0e312a57776b8485c7bf8e43b8f6c5d5
|
[
"Unlicense",
"MIT"
] | null | null | null |
estate/admin.py
|
apwao/neighborhood
|
b71028fb0e312a57776b8485c7bf8e43b8f6c5d5
|
[
"Unlicense",
"MIT"
] | 5
|
2020-06-05T22:06:38.000Z
|
2021-09-08T01:07:31.000Z
|
estate/admin.py
|
apwao/neighborhood
|
b71028fb0e312a57776b8485c7bf8e43b8f6c5d5
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Neighborhood,Business
# Register your models here.
admin.site.register(Neighborhood)
admin.site.register(Business)
| 28
| 41
| 0.833333
| 22
| 168
| 6.363636
| 0.545455
| 0.128571
| 0.242857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 168
| 6
| 42
| 28
| 0.915033
| 0.154762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c3a09533629b42bc022ec3c4aae37a6cd2395b81
| 50
|
py
|
Python
|
tests/tests_data/testpackage/subpackage/submodule.py
|
desty2k/paker
|
80a08576155e8737067fba6fede49d2d71d257ff
|
[
"MIT"
] | null | null | null |
tests/tests_data/testpackage/subpackage/submodule.py
|
desty2k/paker
|
80a08576155e8737067fba6fede49d2d71d257ff
|
[
"MIT"
] | null | null | null |
tests/tests_data/testpackage/subpackage/submodule.py
|
desty2k/paker
|
80a08576155e8737067fba6fede49d2d71d257ff
|
[
"MIT"
] | null | null | null |
def is_even(number):
return number % 2 == 0
| 10
| 26
| 0.6
| 8
| 50
| 3.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.28
| 50
| 4
| 27
| 12.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
c3b7083a1237ad36a8fa973cd204ca245c946a62
| 2,858
|
py
|
Python
|
z2/part3/updated_part2_batch/jm/parser_errors_2/751582637.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part3/updated_part2_batch/jm/parser_errors_2/751582637.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part3/updated_part2_batch/jm/parser_errors_2/751582637.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 751582637
"""
"""
random actions, total chaos
"""
board = gamma_new(4, 4, 2, 4)
assert board is not None
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_busy_fields(board, 1) == 2
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_move(board, 2, 3, 0) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_golden_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 1, 1) == 1
board238663772 = gamma_board(board)
assert board238663772 is not None
assert board238663772 == ("....\n"
"...2\n"
"1211\n"
"11.2\n")
del board238663772
board238663772 = None
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_free_fields(board, 2) == 8
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_busy_fields(board, 1) == 6
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_free_fields(board, 1) == 7
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_busy_fields(board, 1) == 6
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_free_fields(board, 2) == 5
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_free_fields(board, 1) == 5
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_busy_fields(board, 1) == 8
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 2, 0) == 0
board693508817 = gamma_board(board)
assert board693508817 is not None
assert board693508817 == (".2.1\n"
".1.2\n"
"1211\n"
"1112\n")
del board693508817
board693508817 = None
assert gamma_move(board, 1, 2, 2) == 1
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 3) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_golden_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_golden_possible(board, 2) == 1
gamma_delete(board)
| 28.868687
| 46
| 0.664801
| 512
| 2,858
| 3.548828
| 0.080078
| 0.320859
| 0.33847
| 0.451293
| 0.733077
| 0.729774
| 0.626307
| 0.511282
| 0.343974
| 0.329664
| 0
| 0.135274
| 0.182645
| 2,858
| 98
| 47
| 29.163265
| 0.642551
| 0
| 0
| 0.231707
| 0
| 0
| 0.017329
| 0
| 0
| 0
| 0
| 0
| 0.707317
| 1
| 0
| false
| 0
| 0.012195
| 0
| 0.012195
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c3bd856d1f48368c18ec692891b30b733e97eb44
| 40
|
py
|
Python
|
exotica_examples/src/exotica_examples_py/__init__.py
|
Tobias-Fischer/exotica
|
3fb5484882e390e045c8213f21acc92d2d40ce28
|
[
"BSD-3-Clause"
] | 130
|
2018-03-12T11:00:55.000Z
|
2022-02-21T02:41:28.000Z
|
exotica_examples/src/exotica_examples_py/__init__.py
|
Tobias-Fischer/exotica
|
3fb5484882e390e045c8213f21acc92d2d40ce28
|
[
"BSD-3-Clause"
] | 349
|
2017-09-14T00:42:33.000Z
|
2022-03-29T13:51:04.000Z
|
exotica_examples/src/exotica_examples_py/__init__.py
|
Tobias-Fischer/exotica
|
3fb5484882e390e045c8213f21acc92d2d40ce28
|
[
"BSD-3-Clause"
] | 48
|
2017-10-04T15:50:42.000Z
|
2022-02-10T05:03:39.000Z
|
from .target_marker import TargetMarker
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c3e49ce1edf89296e0203680397057d5100649cb
| 44
|
py
|
Python
|
fuocore/exc.py
|
AmyLewis/feeluown-core
|
0aecb39ce49504b04fa54a391260e9976220a288
|
[
"MIT"
] | null | null | null |
fuocore/exc.py
|
AmyLewis/feeluown-core
|
0aecb39ce49504b04fa54a391260e9976220a288
|
[
"MIT"
] | null | null | null |
fuocore/exc.py
|
AmyLewis/feeluown-core
|
0aecb39ce49504b04fa54a391260e9976220a288
|
[
"MIT"
] | null | null | null |
class FuocoreException(Exception):
pass
| 14.666667
| 34
| 0.772727
| 4
| 44
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 2
| 35
| 22
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
c3f879e94af46826d7d565ab7bc9a7ef89ae2d17
| 11
|
py
|
Python
|
cap1/teste.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
cap1/teste.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
cap1/teste.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
print("o")
| 5.5
| 10
| 0.545455
| 2
| 11
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 11
| 1
| 11
| 11
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
617de8c4cb300ffb6ba0b7afe3ed556f1d2d13cc
| 121
|
py
|
Python
|
examples/example_decorator_lib.py
|
HelmchenLabSoftware/mesostat-dev
|
8baa7120b892fe0df893cdcf0f20f49876643d75
|
[
"MIT"
] | null | null | null |
examples/example_decorator_lib.py
|
HelmchenLabSoftware/mesostat-dev
|
8baa7120b892fe0df893cdcf0f20f49876643d75
|
[
"MIT"
] | null | null | null |
examples/example_decorator_lib.py
|
HelmchenLabSoftware/mesostat-dev
|
8baa7120b892fe0df893cdcf0f20f49876643d75
|
[
"MIT"
] | null | null | null |
from mesostat.utils.decorators import time_mem_1starg
@time_mem_1starg
def myfunc(x):
return x**2
print(myfunc(10))
| 17.285714
| 53
| 0.77686
| 20
| 121
| 4.5
| 0.75
| 0.155556
| 0.288889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04717
| 0.123967
| 121
| 7
| 54
| 17.285714
| 0.801887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0.2
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
61c0b342096332bbcbddf338c734583233246fda
| 154
|
py
|
Python
|
backend/wish/admin.py
|
gabrielgaava/iWish
|
948915470f056a8582935727d1d19248d7f63ad1
|
[
"MIT"
] | null | null | null |
backend/wish/admin.py
|
gabrielgaava/iWish
|
948915470f056a8582935727d1d19248d7f63ad1
|
[
"MIT"
] | null | null | null |
backend/wish/admin.py
|
gabrielgaava/iWish
|
948915470f056a8582935727d1d19248d7f63ad1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Wish, WishList
# Register your models here.
admin.site.register(Wish)
admin.site.register(WishList)
| 22
| 34
| 0.805195
| 22
| 154
| 5.636364
| 0.545455
| 0.145161
| 0.274194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11039
| 154
| 6
| 35
| 25.666667
| 0.905109
| 0.168831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4edee6aa450547915a3e0cc14b6bb54bff434d49
| 23
|
py
|
Python
|
torch_mimicry/utils/__init__.py
|
houliangict/mimicry
|
d9e43940254de4a85c78e644f2d2b1135de4b50d
|
[
"MIT"
] | 560
|
2020-03-31T07:07:26.000Z
|
2022-03-15T08:29:37.000Z
|
torch_mimicry/utils/__init__.py
|
houliangict/mimicry
|
d9e43940254de4a85c78e644f2d2b1135de4b50d
|
[
"MIT"
] | 34
|
2020-03-31T02:42:16.000Z
|
2021-12-10T15:47:30.000Z
|
torch_mimicry/utils/__init__.py
|
houliangict/mimicry
|
d9e43940254de4a85c78e644f2d2b1135de4b50d
|
[
"MIT"
] | 63
|
2020-04-04T09:56:22.000Z
|
2022-03-15T02:34:58.000Z
|
from .common import *
| 11.5
| 22
| 0.695652
| 3
| 23
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 23
| 1
| 23
| 23
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4edfe6d90df0c4fb186b80cf92a7ee7079c7297b
| 30
|
py
|
Python
|
api/__init__.py
|
evandrocoan/Javatar
|
b38d4f9d852565d6dcecb236386628b4e56d9d09
|
[
"MIT"
] | 142
|
2015-01-11T19:43:17.000Z
|
2021-11-15T11:44:56.000Z
|
api/__init__.py
|
evandroforks/Javatar
|
b38d4f9d852565d6dcecb236386628b4e56d9d09
|
[
"MIT"
] | 46
|
2015-01-02T20:29:37.000Z
|
2018-09-15T05:12:52.000Z
|
api/__init__.py
|
evandroforks/Javatar
|
b38d4f9d852565d6dcecb236386628b4e56d9d09
|
[
"MIT"
] | 25
|
2015-01-16T01:33:39.000Z
|
2022-01-07T11:12:43.000Z
|
from .javatar_plugin import *
| 15
| 29
| 0.8
| 4
| 30
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f61f3b6e1ce3ef5e42be37bfb2e6c849e308a20b
| 5,527
|
py
|
Python
|
src/tipsextension/azext_tipsextension/vendored_sdks/oscp/dataplane/models/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | null | null | null |
src/tipsextension/azext_tipsextension/vendored_sdks/oscp/dataplane/models/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | null | null | null |
src/tipsextension/azext_tipsextension/vendored_sdks/oscp/dataplane/models/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.2.1, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Amount
from ._models_py3 import AvailableWarehouseItems
from ._models_py3 import Barcode
from ._models_py3 import BaseNode
from ._models_py3 import BulkResponseItemOfDeliveryNode
from ._models_py3 import BulkResponseItemOfItem
from ._models_py3 import BulkResponseItemOfString
from ._models_py3 import BulkResponseItemOfWarehouse
from ._models_py3 import BulkResponseItemOfWarehouseItem
from ._models_py3 import Carrier
from ._models_py3 import CarrierReference
from ._models_py3 import Connector
from ._models_py3 import DataInflow
from ._models_py3 import DataInflowRun
from ._models_py3 import DataOutflow
from ._models_py3 import DataOutflowRun
from ._models_py3 import Dataset
from ._models_py3 import DeliveryNode
from ._models_py3 import Directory
from ._models_py3 import ErrorObject
from ._models_py3 import FulfillmentOption
from ._models_py3 import FulfillmentPlan
from ._models_py3 import GenerateFulfillmentOptionsRequest
from ._models_py3 import GenerateFulfillmentOptionsResponse
from ._models_py3 import HoursOfOperation
from ._models_py3 import Item
from ._models_py3 import ItemReferenceData
from ._models_py3 import Location
from ._models_py3 import Note
from ._models_py3 import OrderFulfillment
from ._models_py3 import OrderFulfillmentReferenceData
from ._models_py3 import OrderLine
from ._models_py3 import SchemaReference
from ._models_py3 import Shipment
from ._models_py3 import ShipmentItem
from ._models_py3 import Transformer
from ._models_py3 import UnitOfMeasure
from ._models_py3 import Warehouse
from ._models_py3 import WarehouseItem
from ._models_py3 import WarehouseItemReferenceData
except (SyntaxError, ImportError):
from ._models import Amount # type: ignore
from ._models import AvailableWarehouseItems # type: ignore
from ._models import Barcode # type: ignore
from ._models import BaseNode # type: ignore
from ._models import BulkResponseItemOfDeliveryNode # type: ignore
from ._models import BulkResponseItemOfItem # type: ignore
from ._models import BulkResponseItemOfString # type: ignore
from ._models import BulkResponseItemOfWarehouse # type: ignore
from ._models import BulkResponseItemOfWarehouseItem # type: ignore
from ._models import Carrier # type: ignore
from ._models import CarrierReference # type: ignore
from ._models import Connector # type: ignore
from ._models import DataInflow # type: ignore
from ._models import DataInflowRun # type: ignore
from ._models import DataOutflow # type: ignore
from ._models import DataOutflowRun # type: ignore
from ._models import Dataset # type: ignore
from ._models import DeliveryNode # type: ignore
from ._models import Directory # type: ignore
from ._models import ErrorObject # type: ignore
from ._models import FulfillmentOption # type: ignore
from ._models import FulfillmentPlan # type: ignore
from ._models import GenerateFulfillmentOptionsRequest # type: ignore
from ._models import GenerateFulfillmentOptionsResponse # type: ignore
from ._models import HoursOfOperation # type: ignore
from ._models import Item # type: ignore
from ._models import ItemReferenceData # type: ignore
from ._models import Location # type: ignore
from ._models import Note # type: ignore
from ._models import OrderFulfillment # type: ignore
from ._models import OrderFulfillmentReferenceData # type: ignore
from ._models import OrderLine # type: ignore
from ._models import SchemaReference # type: ignore
from ._models import Shipment # type: ignore
from ._models import ShipmentItem # type: ignore
from ._models import Transformer # type: ignore
from ._models import UnitOfMeasure # type: ignore
from ._models import Warehouse # type: ignore
from ._models import WarehouseItem # type: ignore
from ._models import WarehouseItemReferenceData # type: ignore
from ._open_supply_chain_platform_service_api_enums import (
HostOptions,
)
__all__ = [
'Amount',
'AvailableWarehouseItems',
'Barcode',
'BaseNode',
'BulkResponseItemOfDeliveryNode',
'BulkResponseItemOfItem',
'BulkResponseItemOfString',
'BulkResponseItemOfWarehouse',
'BulkResponseItemOfWarehouseItem',
'Carrier',
'CarrierReference',
'Connector',
'DataInflow',
'DataInflowRun',
'DataOutflow',
'DataOutflowRun',
'Dataset',
'DeliveryNode',
'Directory',
'ErrorObject',
'FulfillmentOption',
'FulfillmentPlan',
'GenerateFulfillmentOptionsRequest',
'GenerateFulfillmentOptionsResponse',
'HoursOfOperation',
'Item',
'ItemReferenceData',
'Location',
'Note',
'OrderFulfillment',
'OrderFulfillmentReferenceData',
'OrderLine',
'SchemaReference',
'Shipment',
'ShipmentItem',
'Transformer',
'UnitOfMeasure',
'Warehouse',
'WarehouseItem',
'WarehouseItemReferenceData',
'HostOptions',
]
| 40.343066
| 99
| 0.724625
| 527
| 5,527
| 7.351044
| 0.16129
| 0.206505
| 0.134228
| 0.19618
| 0.261745
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00985
| 0.191786
| 5,527
| 136
| 100
| 40.639706
| 0.857399
| 0.155962
| 0
| 0
| 1
| 0
| 0.133579
| 0.060403
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.640625
| 0
| 0.640625
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f61fb6a98c49c8d418b3688dabb6b03532ab4b49
| 912
|
py
|
Python
|
tests/life/test_rlist.py
|
QuanTakeuchi/yoda
|
c0d68d550c5d2a0cfc3f689d4cfd09e082c9079d
|
[
"MIT"
] | null | null | null |
tests/life/test_rlist.py
|
QuanTakeuchi/yoda
|
c0d68d550c5d2a0cfc3f689d4cfd09e082c9079d
|
[
"MIT"
] | null | null | null |
tests/life/test_rlist.py
|
QuanTakeuchi/yoda
|
c0d68d550c5d2a0cfc3f689d4cfd09e082c9079d
|
[
"MIT"
] | 1
|
2019-10-02T11:01:33.000Z
|
2019-10-02T11:01:33.000Z
|
# coding=utf-8
from unittest import TestCase
from click.testing import CliRunner
import yoda
class TestHealth(TestCase):
"""
Test for the following commands:
| Module: health
| command: health
"""
def __init__(self, methodName='runTest'):
super(TestHealth, self).__init__()
self.runner = CliRunner()
def runTest(self):
# result = self.runner.invoke(yoda.cli, ['rlist', 'view', 'opt'])
# self.assertEqual(result.exit_code, 0)
# result = self.runner.invoke(yoda.cli, ['rlist', 'add'], input="title\n_auth\n_kind\n_tags\n")
# self.assertEqual(result.exit_code, 0)
# output_string = str(result.output.encode('ascii', 'ignore'))
# print(output_string)
#
# result = self.runner.invoke(yoda.cli, ['rlist', 'view'])
# self.assertEqual(result.exit_code, 0)
# todo
pass
| 26.823529
| 103
| 0.60636
| 106
| 912
| 5.066038
| 0.509434
| 0.074488
| 0.089385
| 0.122905
| 0.372439
| 0.372439
| 0.204842
| 0.141527
| 0
| 0
| 0
| 0.005857
| 0.251096
| 912
| 33
| 104
| 27.636364
| 0.780381
| 0.546053
| 0
| 0
| 0
| 0
| 0.019022
| 0
| 0
| 0
| 0
| 0.030303
| 0
| 1
| 0.222222
| false
| 0.111111
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
f6530e6f4c80b0fed0dc943b285d758d5115d2d8
| 134
|
py
|
Python
|
tests/builtin/oct.py
|
Slater-Victoroff/pyjaco
|
89c4e3c46399c5023b0e160005d855a01241c58a
|
[
"MIT"
] | 38
|
2015-01-01T18:08:59.000Z
|
2022-02-18T08:57:27.000Z
|
tests/builtin/oct.py
|
dusty-phillips/pyjaco
|
066895ae38d1828498e529c1875cb88df6cbc54d
|
[
"MIT"
] | 1
|
2020-07-15T13:30:32.000Z
|
2020-07-15T13:30:32.000Z
|
tests/builtin/oct.py
|
Slater-Victoroff/pyjaco
|
89c4e3c46399c5023b0e160005d855a01241c58a
|
[
"MIT"
] | 12
|
2016-03-07T09:30:49.000Z
|
2021-09-05T20:38:47.000Z
|
print oct(42)
print oct(0)
print oct(12345678)
print oct(-100)
try:
print oct("foo")
except TypeError, E:
print "Failed:", E
| 13.4
| 22
| 0.656716
| 22
| 134
| 4
| 0.545455
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 0.19403
| 134
| 9
| 23
| 14.888889
| 0.685185
| 0
| 0
| 0
| 0
| 0
| 0.075188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.75
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f676894de4f6a303dada091c4185ad8d0770e4d6
| 7,815
|
py
|
Python
|
blockchain/tests/test_auction.py
|
dapp-z/auction
|
bb8a7cf4807cc2ca91672adab2ed186ff7a87fc1
|
[
"MIT"
] | 2
|
2022-03-29T08:08:42.000Z
|
2022-03-30T18:39:47.000Z
|
blockchain/tests/test_auction.py
|
MobinHajizadeh/auction
|
5f09c684f4b4f20eee9f5c17eb330550c19cba7a
|
[
"MIT"
] | null | null | null |
blockchain/tests/test_auction.py
|
MobinHajizadeh/auction
|
5f09c684f4b4f20eee9f5c17eb330550c19cba7a
|
[
"MIT"
] | 2
|
2022-03-28T15:54:24.000Z
|
2022-03-28T18:56:37.000Z
|
import brownie
from brownie import Auction, Weth, Nft
import pytest
from scripts.useful import get_account
import time
ACCOUNT = get_account(1)
BIDDER = get_account(2)
TOKEN_ID = 18
@pytest.fixture
def create_auction(deploy_auction, nft):
auction = deploy_auction[0]
weth = deploy_auction[1]
starting_price = 10 ** 18
starting_timestamp = int(time.time()) + 10
ending_timestamp = starting_timestamp + 30
nft.approve(auction, TOKEN_ID, {"from": ACCOUNT})
auction.createAuction(nft, TOKEN_ID, starting_price,
starting_timestamp, ending_timestamp, {"from": ACCOUNT})
time.sleep(10)
return auction, nft, weth
@pytest.fixture
def deploy_auction():
weth = Weth.deploy({"from": BIDDER})
return Auction.deploy(weth, {"from": ACCOUNT}), weth
@pytest.fixture
def nft():
return Nft.deploy({"from": ACCOUNT})
def test_cant_create_auction_not_owner(deploy_auction, nft):
starting_price = 10 ** 18
starting_timestamp = int(time.time()) + 10
ending_timestamp = starting_timestamp + 35
with brownie.reverts("The sender doesn't own NFT!"):
deploy_auction[0].createAuction(nft, TOKEN_ID, starting_price,
starting_timestamp, ending_timestamp, {"from": BIDDER})
def test_cant_create_auction_started(create_auction):
auction = create_auction[0]
nft = create_auction[1]
starting_price = 10 ** 18
starting_timestamp = int(time.time()) + 10
ending_timestamp = starting_timestamp + 35
with brownie.reverts("The auction already started by the owner!"):
auction.createAuction(nft, TOKEN_ID, starting_price,
starting_timestamp, ending_timestamp, {"from": ACCOUNT})
def test_cant_create_auction_timestamp_false(deploy_auction, nft):
starting_price = 10 ** 18
starting_timestamp = int(time.time()) - 60
ending_timestamp = starting_timestamp + 35
with brownie.reverts("startingTimestamp must be greater than now!"):
deploy_auction[0].createAuction(nft, TOKEN_ID, starting_price,
starting_timestamp, ending_timestamp, {"from": ACCOUNT})
def test_cant_create_auction_nft_not_approved(deploy_auction, nft):
starting_price = 10 ** 18
starting_timestamp = int(time.time()) + 60
ending_timestamp = starting_timestamp + 35
with brownie.reverts("The NFT is not approved!"):
deploy_auction[0].createAuction(nft, TOKEN_ID, starting_price,
starting_timestamp, ending_timestamp, {"from": ACCOUNT})
def test_create_auction(create_auction):
auction = create_auction[0]
nft = create_auction[1]
assert auction.allAuctions(nft, TOKEN_ID)[-1] == ACCOUNT # seller
assert auction.allAuctions(nft, TOKEN_ID)[2] == 10 ** 18 # starting price
def test_cant_bid_ended(create_auction):
auction = create_auction[0]
nft = create_auction[1]
weth = create_auction[2]
weth.approve(auction, 10 ** 19, {"from": BIDDER})
time.sleep(35)
with brownie.reverts("The auction is over!"):
auction.bid(nft, TOKEN_ID, 10 ** 19, {"from": BIDDER})
def test_cant_bid_lower_amount(create_auction):
auction = create_auction[0]
nft = create_auction[1]
weth = create_auction[2]
weth.approve(auction, 10 ** 18, {"from": BIDDER})
with brownie.reverts("The amount must be greater than the starting price!"):
auction.bid(nft, TOKEN_ID, 10 ** 17, {"from": BIDDER})
def test_cant_bid_seller(create_auction):
auction = create_auction[0]
nft = create_auction[1]
with brownie.reverts("The seller can not bid!"):
auction.bid(nft, TOKEN_ID, 10 ** 19, {"from": ACCOUNT})
def test_cant_bid_weth_not_approve(create_auction):
auction = create_auction[0]
nft = create_auction[1]
with brownie.reverts("The amount is not approved!"):
auction.bid(nft, TOKEN_ID, 10 ** 19, {"from": BIDDER})
def test_bid(create_auction):
auction = create_auction[0]
nft = create_auction[1]
weth = create_auction[2]
weth.approve(auction, 10 ** 19, {"from": BIDDER})
auction.bid(nft, TOKEN_ID, 10 ** 19, {"from": BIDDER})
assert auction.allAuctions(nft, TOKEN_ID)[3] == 10 ** 19 # highest bid
assert auction.allAuctions(nft, TOKEN_ID)[4] == BIDDER # highest bidder
def test_cant_update_timestamp_ended(create_auction):
auction = create_auction[0]
nft = create_auction[1]
new_timestamp = int(time.time()) + 10
time.sleep(35)
with brownie.reverts("The auction is over!"):
auction.updateEndingTimestamp(
nft, TOKEN_ID, new_timestamp, {"from": ACCOUNT})
def test_cant_update_timestamp_not_woner(create_auction):
auction = create_auction[0]
nft = create_auction[1]
new_timestamp = int(time.time()) + 10
with brownie.reverts("The sender is not the seller!"):
auction.updateEndingTimestamp(
nft, TOKEN_ID, new_timestamp, {"from": BIDDER})
def test_update_timestamp(create_auction):
auction = create_auction[0]
nft = create_auction[1]
new_timestamp = int(time.time()) + 10
auction.updateEndingTimestamp(
nft, TOKEN_ID, new_timestamp, {"from": ACCOUNT})
assert auction.allAuctions(nft, TOKEN_ID)[1] == new_timestamp
def test_cant_update_price_ended(create_auction):
auction = create_auction[0]
nft = create_auction[1]
new_price = 10 ** 17
time.sleep(35)
with brownie.reverts("The auction is over!"):
auction.updateStartingPrice(
nft, TOKEN_ID, new_price, {"from": ACCOUNT})
def test_cant_update_price_not_woner(create_auction):
auction = create_auction[0]
nft = create_auction[1]
new_price = 10 ** 17
with brownie.reverts("The sender is not the seller!"):
auction.updateStartingPrice(nft, TOKEN_ID, new_price, {"from": BIDDER})
def test_update_price(create_auction):
auction = create_auction[0]
nft = create_auction[1]
new_price = 10 ** 17
auction.updateStartingPrice(nft, TOKEN_ID, new_price, {"from": ACCOUNT})
assert auction.allAuctions(nft, TOKEN_ID)[2] == new_price
def test_cant_end_not_ended(create_auction):
auction = create_auction[0]
nft = create_auction[1]
with brownie.reverts("The auction is not over!"):
auction.endAuction(nft, TOKEN_ID, {"from": ACCOUNT})
def test_end(create_auction):
auction = create_auction[0]
nft = create_auction[1]
time.sleep(35)
auction.endAuction(nft, TOKEN_ID, {"from": ACCOUNT})
assert auction.allAuctions(
nft, TOKEN_ID)[-1] == "0x0000000000000000000000000000000000000000"
def test_cant_force_reset_ended(create_auction):
auction = create_auction[0]
nft = create_auction[1]
time.sleep(35)
auction.endAuction(nft, TOKEN_ID, {"from": ACCOUNT})
with brownie.reverts("The auction has already ended!"):
auction.forceReset(nft, TOKEN_ID, {"from": ACCOUNT})
def test_cant_force_reset_ongoing(create_auction):
auction = create_auction[0]
nft = create_auction[1]
with brownie.reverts("You can only force reset after 7 days!"):
auction.forceReset(nft, TOKEN_ID, {"from": ACCOUNT})
def test_cant_force_reset_ended_before_7_days(create_auction):
auction = create_auction[0]
nft = create_auction[1]
time.sleep(35)
with brownie.reverts("You can only force reset after 7 days!"):
auction.forceReset(nft, TOKEN_ID, {"from": ACCOUNT})
def test_force_reset(create_auction):
auction = create_auction[0]
nft = create_auction[1]
time.sleep((7*24*60*60)+30) # 7 days
auction.forceReset(nft, TOKEN_ID, {"from": ACCOUNT})
assert auction.allAuctions(
nft, TOKEN_ID)[-1] == "0x0000000000000000000000000000000000000000"
| 28.944444
| 96
| 0.685988
| 1,010
| 7,815
| 5.072277
| 0.092079
| 0.16748
| 0.060511
| 0.096428
| 0.831934
| 0.789967
| 0.751513
| 0.722038
| 0.692758
| 0.631856
| 0
| 0.042434
| 0.200896
| 7,815
| 269
| 97
| 29.052045
| 0.777902
| 0.007038
| 0
| 0.603448
| 0
| 0
| 0.088728
| 0.010833
| 0
| 0
| 0.010833
| 0
| 0.045977
| 1
| 0.143678
| false
| 0
| 0.028736
| 0.005747
| 0.189655
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f67b09e9c1d5c8318ab8bb5c5cca50a06fd25f11
| 423
|
py
|
Python
|
appbak/core/errors.py
|
Linyameng/alphadata-dev
|
7a48c9ddf24442a89f3f8ab1ba78e573c8844f26
|
[
"Apache-2.0"
] | null | null | null |
appbak/core/errors.py
|
Linyameng/alphadata-dev
|
7a48c9ddf24442a89f3f8ab1ba78e573c8844f26
|
[
"Apache-2.0"
] | null | null | null |
appbak/core/errors.py
|
Linyameng/alphadata-dev
|
7a48c9ddf24442a89f3f8ab1ba78e573c8844f26
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on 2018/5/24
@author: xing yan
"""
from flask import render_template
from . import core
@core.app_errorhandler(403)
def page_not_found(e):
return render_template('403.html'), 403
@core.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@core.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
| 17.625
| 43
| 0.723404
| 64
| 423
| 4.578125
| 0.5
| 0.191126
| 0.194539
| 0.215017
| 0.245734
| 0.245734
| 0.245734
| 0.245734
| 0
| 0
| 0
| 0.09589
| 0.137116
| 423
| 23
| 44
| 18.391304
| 0.706849
| 0.146572
| 0
| 0.181818
| 0
| 0
| 0.067989
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.181818
| 0.272727
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9c9398951f475a71010ba0d1cd8b0a0e5c0a940f
| 112
|
py
|
Python
|
symarray/symarray/arrays.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
symarray/symarray/arrays.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
symarray/symarray/arrays.py
|
costrouc/uarray
|
c3c42147181a88265942ad5f9cf439467f746782
|
[
"BSD-3-Clause"
] | null | null | null |
from .symbol_generator import ModuleWrapper
from .calculus import Array
ModuleWrapper('symarray.arrays', Array)
| 28
| 43
| 0.839286
| 13
| 112
| 7.153846
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 112
| 3
| 44
| 37.333333
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0.133929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1432ad5bbae4a7bbf01c2aa6d919f7636cf7081b
| 139
|
py
|
Python
|
tfdet/model/postprocess/__init__.py
|
Burf/tfdetection
|
658e67d6db71e04bda2965d5a5d506d304ab8ad6
|
[
"Apache-2.0"
] | null | null | null |
tfdet/model/postprocess/__init__.py
|
Burf/tfdetection
|
658e67d6db71e04bda2965d5a5d506d304ab8ad6
|
[
"Apache-2.0"
] | null | null | null |
tfdet/model/postprocess/__init__.py
|
Burf/tfdetection
|
658e67d6db71e04bda2965d5a5d506d304ab8ad6
|
[
"Apache-2.0"
] | null | null | null |
from . import rcnn
from . import retina
from . import yolo
effdet = fcos = retina
from . import anodet
spade = padim = patch_core = anodet
| 19.857143
| 35
| 0.733813
| 20
| 139
| 5.05
| 0.6
| 0.39604
| 0.316832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201439
| 139
| 7
| 35
| 19.857143
| 0.90991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1475a023b29037e3c1c204b14f2c236d6a2277b2
| 80
|
py
|
Python
|
Solution/75.py
|
pallavimr12/Python_Levelwise_Exercises
|
4090437b537260be2eca06c8d52d3a2bba1f5a5e
|
[
"BSD-3-Clause"
] | 2
|
2020-10-23T10:55:58.000Z
|
2020-11-24T04:26:23.000Z
|
Solution/75.py
|
pallavimr12/Python_Levelwise_Exercises
|
4090437b537260be2eca06c8d52d3a2bba1f5a5e
|
[
"BSD-3-Clause"
] | null | null | null |
Solution/75.py
|
pallavimr12/Python_Levelwise_Exercises
|
4090437b537260be2eca06c8d52d3a2bba1f5a5e
|
[
"BSD-3-Clause"
] | 2
|
2020-11-19T06:37:29.000Z
|
2022-01-18T14:36:46.000Z
|
import random
print(random.choice([i for i in range(201) if i%5==0 and i%7==0]))
| 40
| 66
| 0.6875
| 19
| 80
| 2.894737
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.125
| 80
| 2
| 66
| 40
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
14bb4f7a9f106d3554f16cb80e8b6e9e75df9f9c
| 68
|
py
|
Python
|
helloworld/__init__.py
|
stevej2608/pypi-holoniq-simple
|
b944639a81153fd0af40fec61c0108b6a8ac7aba
|
[
"MIT"
] | null | null | null |
helloworld/__init__.py
|
stevej2608/pypi-holoniq-simple
|
b944639a81153fd0af40fec61c0108b6a8ac7aba
|
[
"MIT"
] | null | null | null |
helloworld/__init__.py
|
stevej2608/pypi-holoniq-simple
|
b944639a81153fd0af40fec61c0108b6a8ac7aba
|
[
"MIT"
] | null | null | null |
from .greetings import say_hello
from ._version import __version__
| 17
| 33
| 0.838235
| 9
| 68
| 5.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132353
| 68
| 3
| 34
| 22.666667
| 0.864407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1ad71b737cf7f027a0980356eb71323d68e78c1c
| 215
|
py
|
Python
|
laboratory/pgfgantt/test_classes.py
|
matt-ketk/study-hall
|
6a6837278daefb336643aca7b203c41cab5debcb
|
[
"MIT"
] | null | null | null |
laboratory/pgfgantt/test_classes.py
|
matt-ketk/study-hall
|
6a6837278daefb336643aca7b203c41cab5debcb
|
[
"MIT"
] | null | null | null |
laboratory/pgfgantt/test_classes.py
|
matt-ketk/study-hall
|
6a6837278daefb336643aca7b203c41cab5debcb
|
[
"MIT"
] | null | null | null |
class Bye:
def __init__(self):
self.foo = 'bar'
def is_hello(self):
return type(self) == Hello
class Hello:
def __init__(self):
self.value = 'foobar'
print(Bye().is_hello())
| 14.333333
| 34
| 0.572093
| 28
| 215
| 4.035714
| 0.5
| 0.123894
| 0.19469
| 0.265487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288372
| 215
| 14
| 35
| 15.357143
| 0.738562
| 0
| 0
| 0.222222
| 0
| 0
| 0.042254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.111111
| 0.666667
| 0.111111
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
1ad95737118332656d045dfa4c1664b41a28ae5f
| 1,815
|
py
|
Python
|
webapp/models.py
|
dumono/myMajordomGPIO
|
fb98d30a974f9e6bc6cdd102fc9650826433b064
|
[
"MIT"
] | null | null | null |
webapp/models.py
|
dumono/myMajordomGPIO
|
fb98d30a974f9e6bc6cdd102fc9650826433b064
|
[
"MIT"
] | 2
|
2022-01-26T18:52:54.000Z
|
2022-01-26T19:18:02.000Z
|
webapp/models.py
|
dumono/myMajordomGPIO
|
fb98d30a974f9e6bc6cdd102fc9650826433b064
|
[
"MIT"
] | null | null | null |
from webapp import db, login
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User {}>'.format(self.username)
class GPIO_connect(db.Model):
# id = db.Column(db.Integer, primary_key=True)
gpio_type = db.Column(db.String(64))
gpio_num = db.Column(db.Integer, primary_key=True)
val = db.Column(db.String(64))
# comment = db.Column(db.String(120))
class GlobalConf(db.Model):
# id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.String(128), index=True, primary_key=True)
val = db.Column(db.String(128))
comment = db.Column(db.String(128))
def __repr__(self):
return f'{self.key} {self.val} {self.comment}'
class GPIOTypes(db.Model):
gpioType = db.Column(db.String(16), index=True, primary_key=True)
def __repr__(self):
return self.gpioType
class GpioRules(db.Model):
id = db.Column(db.Integer, primary_key=True)
signal_pin = db.Column(db.Integer)
signal_type = db.Column(db.String(16))
condition = db.Column(db.String(3))
condition_value = db.Column(db.String(10))
action_type = db.Column(db.String(10))
action_pin = db.Column(db.Integer)
| 31.842105
| 73
| 0.701377
| 269
| 1,815
| 4.572491
| 0.226766
| 0.136585
| 0.170732
| 0.182114
| 0.460976
| 0.272358
| 0.197561
| 0.18374
| 0.130081
| 0.130081
| 0
| 0.021753
| 0.164187
| 1,815
| 57
| 74
| 31.842105
| 0.789057
| 0.072176
| 0
| 0.125
| 1
| 0
| 0.026818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0.15
| 0.1
| 0.125
| 0.95
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
2113a3163580de81cc9e835bcf0e2c7847744c44
| 124
|
py
|
Python
|
genero/admin.py
|
gabriel-laurindo-1/django-igtiflix
|
16b0a243584644dc41f235b12a2124a1e7797185
|
[
"MIT"
] | null | null | null |
genero/admin.py
|
gabriel-laurindo-1/django-igtiflix
|
16b0a243584644dc41f235b12a2124a1e7797185
|
[
"MIT"
] | null | null | null |
genero/admin.py
|
gabriel-laurindo-1/django-igtiflix
|
16b0a243584644dc41f235b12a2124a1e7797185
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from genero.models import Genero
# Register your models here.
admin.site.register(Genero)
| 20.666667
| 32
| 0.814516
| 18
| 124
| 5.611111
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120968
| 124
| 5
| 33
| 24.8
| 0.926606
| 0.209677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2130312c52215af34c77de286760f9abbf56b5d4
| 208
|
py
|
Python
|
accessibility/passengers/models.py
|
RobSullivan/onboard
|
463fb9f09d52f0796b7fa3b0fc5beb8784161652
|
[
"MIT"
] | null | null | null |
accessibility/passengers/models.py
|
RobSullivan/onboard
|
463fb9f09d52f0796b7fa3b0fc5beb8784161652
|
[
"MIT"
] | null | null | null |
accessibility/passengers/models.py
|
RobSullivan/onboard
|
463fb9f09d52f0796b7fa3b0fc5beb8784161652
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Passenger(models.Model):
bus_route = models.CharField(max_length=7)
bus_stop = models.CharField(max_length=5)
is_waiting = models.BooleanField
| 29.714286
| 43
| 0.798077
| 31
| 208
| 5.193548
| 0.709677
| 0.186335
| 0.223602
| 0.298137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010811
| 0.110577
| 208
| 7
| 44
| 29.714286
| 0.859459
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0.2
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
dcf3dceaeed2196f229eea9868a2f707a0033182
| 10,853
|
py
|
Python
|
excut/feedback/strategies.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | 5
|
2020-11-17T19:59:49.000Z
|
2021-09-23T23:10:39.000Z
|
excut/feedback/strategies.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
excut/feedback/strategies.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains the different strategies to construct Auxiliary triples that are used to retain the embedding
Currently the module contains Abstract Strategy and 4 different implementations
"""
from itertools import chain, product
import numpy as np
from excut.feedback.rulebased_deduction.deduction_engine import SparqlBasedDeductionEngine
from excut.feedback.rulebased_deduction.deduction_engine_extended import SparqlBasedDeductionEngineExtended
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.kg.kg_triples_source import SimpleTriplesSource
from excut.clustering.target_entities import EntityLabels
class AbstractAugmentationStrategy():
"""
"""
def __init__(self, query_interface, quality_method='x_coverage', predictions_min_quality=0, topk=1,
aux_relation=DEFUALT_AUX_RELATION):
self.quality_method = quality_method
self.topk = topk
self.predictions_min_quality = predictions_min_quality
self.query_interface = query_interface
self.deduction_engine = SparqlBasedDeductionEngineExtended(kg_query_interface=self.query_interface, quality=quality_method,
relation=aux_relation)
def infer_cluster_assignments(self, descriptions, target_entities=None, output_file=None):
descriptions_list = chain.from_iterable(descriptions.values())
per_var_predictions = self.deduction_engine.infer(descriptions_list, target_entities=target_entities,
min_quality=self.predictions_min_quality,
topk=self.topk, output_filepath=output_file)
# print(len(per_var_predictions.values()))
triples = np.array([list(x.triple) for x in chain.from_iterable(per_var_predictions.values())], dtype=object)
triples = triples.reshape(-1, 3)
return triples
def get_augmentation_triples(self, **kwargs):
pass
class DirectAugmentationStrategy(AbstractAugmentationStrategy):
def __init__(self, query_interface, quality_method='x_coverage', predictions_min_quality=0, topk=1,
aux_relation=DEFUALT_AUX_RELATION):
super(DirectAugmentationStrategy, self).__init__(query_interface, quality_method=quality_method,
predictions_min_quality=predictions_min_quality, topk=topk,
aux_relation=aux_relation)
def get_augmentation_triples(self, descriptions: dict, target_entities=None, output_file=None, iter_num=0):
triples = self.infer_cluster_assignments(descriptions, target_entities=target_entities, output_file=output_file)
triples=np.array([[t[0],t[1]+'_%i'%iter_num,t[2]] for t in triples], dtype=object)
return EntityLabels(triples, 'Itr %i re-assignments')
class SameAsAugmentationStrategy(AbstractAugmentationStrategy):
def __init__(self, query_interface, quality_method='x_coverage', predictions_min_quality=0, topk=1,
aux_relation=DEFUALT_AUX_RELATION):
super(SameAsAugmentationStrategy, self).__init__(query_interface, quality_method=quality_method,
predictions_min_quality=predictions_min_quality, topk=topk,
aux_relation=aux_relation)
def get_augmentation_triples(self, descriptions: dict, target_entities=None, output_file=None, iter_num=0):
triples = self.infer_cluster_assignments(descriptions, target_entities=target_entities, output_file=output_file)
print('Inferred triples shape: %r' % str(triples.shape))
labels = np.unique(triples[:, 2])
output_relations = []
for l in labels:
c_triples = triples[triples[:, 2] == l, 0]
output_relations += [[s[0], 'http://execute.org/sameCLAs_%i' % iter_num, s[1]] for s in
product(c_triples, repeat=2)]
return SimpleTriplesSource(output_relations)
class RuleAndClusterNodesAugmentationStrategy(AbstractAugmentationStrategy):
def __init__(self, query_interface, quality_method='x_coverage', predictions_min_quality=0, topk=5,
aux_relation=DEFUALT_AUX_RELATION):
super(RuleAndClusterNodesAugmentationStrategy, self).__init__(query_interface, quality_method=quality_method,
predictions_min_quality=predictions_min_quality, topk=topk,
aux_relation=aux_relation)
def get_augmentation_triples(self, descriptions: dict, target_entities=None, output_file=None, iter_num=0):
descriptions_list = [d for d in chain.from_iterable(descriptions.values())]
per_var_predictions = self.deduction_engine.infer(descriptions_list,
target_entities=target_entities,
min_quality=self.predictions_min_quality,
topk=self.topk, output_filepath=output_file)
# all_predictions= [x for x in chain.from_iterable(per_var_predictions.values())]
# print(all_predictions[0].all_sources)
# descriptions_lists=[p.all_sources for p in all_predictions ]
# # set( reduce(lambda x,y :x.all_sources+y.all_sources, all_predictions))
# unique_descriptions = {d for d in chain.from_iterable(descriptions_lists)}
# unique_descriptions= set(filter(lambda d: d.get_quality(self.quality_method)> self.predictions_min_quality,
# unique_descriptions))
unique_descriptions=set(list(descriptions_list))
descriptions_ids = dict(zip(unique_descriptions, range(len(unique_descriptions))))
output_triples=[]
for p in chain.from_iterable(per_var_predictions.values()):
explans_to_model= filter(lambda d: d.get_quality(self.quality_method)> self.predictions_min_quality,
p.all_sources)
explans_ids= ['http://execute.org/r%i_%i'%(descriptions_ids[expl],iter_num) for expl in explans_to_model]
entity_rule_triples=[[p.get_subject(), 'http://execute.org/ground_%i'%iter_num, expl] for expl in explans_ids]
rules_clusters_triples= [[expl, 'http://execute.org/explain_%i'%iter_num, p.get_object() ] for expl in explans_ids]
output_triples+= entity_rule_triples + rules_clusters_triples
return SimpleTriplesSource(output_triples)
class RuleEdgesClusterNodesAugmentationStrategy(AbstractAugmentationStrategy):
def __init__(self, query_interface, quality_method='x_coverage', predictions_min_quality=0, topk=5,
aux_relation=DEFUALT_AUX_RELATION):
super(RuleEdgesClusterNodesAugmentationStrategy, self).__init__(query_interface, quality_method=quality_method,
predictions_min_quality=predictions_min_quality, topk=topk,
aux_relation=aux_relation)
def get_augmentation_triples(self, descriptions: dict, target_entities=None, output_file=None, iter_num=0):
descriptions_list = [d for d in chain.from_iterable(descriptions.values())]
per_var_predictions = self.deduction_engine.infer(descriptions_list,
target_entities=target_entities,
min_quality=self.predictions_min_quality,
topk=self.topk, output_filepath=output_file)
# all_predictions= [x for x in chain.from_iterable(per_var_predictions.values())]
# print(all_predictions[0].all_sources)
# descriptions_lists=[p.all_sources for p in all_predictions ]
# # set( reduce(lambda x,y :x.all_sources+y.all_sources, all_predictions))
# unique_descriptions = {d for d in chain.from_iterable(descriptions_lists)}
# unique_descriptions= set(filter(lambda d: d.get_quality(self.quality_method)> self.predictions_min_quality,
# unique_descriptions))
unique_descriptions=set(list(descriptions_list))
descriptions_ids = dict(zip(unique_descriptions, range(len(unique_descriptions))))
output_triples=[]
for p in chain.from_iterable(per_var_predictions.values()):
explans_to_model= filter(lambda d: d.get_quality(self.quality_method)> self.predictions_min_quality,
p.all_sources)
explans_ids= ['http://execute.org/r%i_%i'%(descriptions_ids[expl],iter_num) for expl in explans_to_model]
entity_rule_triples=[[p.get_subject(), expl, p.get_object()] for expl in explans_ids]
# rules_clusters_triples= [[expl, 'http://execute.org/explain_%i'%iter_num, p.get_object() ] for expl in explans_ids]
output_triples+= entity_rule_triples #+ rules_clusters_triples
return SimpleTriplesSource(output_triples)
def get_strategy(method_name, kg_query_interface, quality_method='x_coverage', predictions_min_quality=0, topk=1,
aux_relation=DEFUALT_AUX_RELATION):
method_name = method_name.lower()
if method_name == 'direct':
return DirectAugmentationStrategy(kg_query_interface, quality_method=quality_method,
predictions_min_quality=predictions_min_quality,
topk=topk, aux_relation=aux_relation)
elif method_name == 'sameclas':
return SameAsAugmentationStrategy(kg_query_interface, quality_method=quality_method,
predictions_min_quality=predictions_min_quality,
topk=topk, aux_relation=aux_relation)
elif method_name == 'entexplcls':
return RuleAndClusterNodesAugmentationStrategy(kg_query_interface, quality_method=quality_method,
predictions_min_quality=predictions_min_quality,
topk=topk, aux_relation=aux_relation)
elif method_name == 'explasedges':
return RuleEdgesClusterNodesAugmentationStrategy(kg_query_interface, quality_method=quality_method,
predictions_min_quality=predictions_min_quality,
topk=topk, aux_relation=aux_relation)
else:
raise Exception("Method %s not Supported!" % method_name)
| 57.121053
| 131
| 0.657975
| 1,149
| 10,853
| 5.861619
| 0.136641
| 0.050483
| 0.096659
| 0.056125
| 0.737194
| 0.732591
| 0.721604
| 0.706756
| 0.705271
| 0.705271
| 0
| 0.00376
| 0.264904
| 10,853
| 189
| 132
| 57.42328
| 0.840436
| 0.124482
| 0
| 0.516949
| 0
| 0
| 0.032333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0.008475
| 0.059322
| 0
| 0.279661
| 0.008475
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0d0bfb0fa61fc66552f3a4db19865de4ed48c791
| 75
|
py
|
Python
|
gui/renderer/__init__.py
|
nerdinand/shooty-game
|
a2f35035bd1ed02676a8384ba6d04e4d7ec42d0c
|
[
"MIT"
] | null | null | null |
gui/renderer/__init__.py
|
nerdinand/shooty-game
|
a2f35035bd1ed02676a8384ba6d04e4d7ec42d0c
|
[
"MIT"
] | null | null | null |
gui/renderer/__init__.py
|
nerdinand/shooty-game
|
a2f35035bd1ed02676a8384ba6d04e4d7ec42d0c
|
[
"MIT"
] | null | null | null |
from .render_settings import RenderSettings
from .renderer import Renderer
| 25
| 43
| 0.866667
| 9
| 75
| 7.111111
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 2
| 44
| 37.5
| 0.955224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0d0cf2866c9b89a300d5647a0ee1c73ec7d995b8
| 86,156
|
py
|
Python
|
efficientnet.py
|
QFaceblue/Driving-Behavior-Recognition
|
98c8fab51c7074852598ea9119f472ed7b1bda13
|
[
"Apache-2.0"
] | 1
|
2022-03-13T14:37:17.000Z
|
2022-03-13T14:37:17.000Z
|
efficientnet.py
|
QFaceblue/Driving-Behavior-Recognition
|
98c8fab51c7074852598ea9119f472ed7b1bda13
|
[
"Apache-2.0"
] | null | null | null |
efficientnet.py
|
QFaceblue/Driving-Behavior-Recognition
|
98c8fab51c7074852598ea9119f472ed7b1bda13
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, division
import torch
from torch.nn import init
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim import lr_scheduler
import numpy as np
from torch.utils.data import DataLoader, Dataset
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import math
import copy
# from tensorboardX import SummaryWriter
from utils import progress_bar, format_time
import json
from PIL import Image
from efficientnet_pytorch import EfficientNet
from ghost_net import ghost_net, ghost_net_Cifar
from ghostnet import ghostnet
from mnext import mnext
from mobilenetv3 import MobileNetV3, mobilenetv3_s
from mobilenetv3_2 import MobileNetV3_Small, MobileNetV3_Large
from mobilenet import my_mobilenext, my_mobilenext_2
from mobilenetv3_torch import mobilenet_v3_large, mobilenet_v3_small
import onnxruntime
import cv2
import json
import pandas as pd
from mobilenetv2_cbam import MobileNetV2_cbam
def softmax_np(x):
x_row_max = x.max(axis=-1)
x_row_max = x_row_max.reshape(list(x.shape)[:-1]+[1])
x = x - x_row_max
x_exp = np.exp(x)
x_exp_row_sum = x_exp.sum(axis=-1).reshape(list(x.shape)[:-1]+[1])
softmax = x_exp / x_exp_row_sum
return softmax
def softmax_flatten(x):
x = x.flatten()
x_row_max = x.max()
x = x - x_row_max
x_exp = np.exp(x)
x_exp_row_sum = x_exp.sum()
softmax = x_exp / x_exp_row_sum
return softmax
def efficientnet_test():
model = EfficientNet.from_pretrained('efficientnet-b0')
# Preprocess image
tfms = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ])
img = tfms(Image.open('./data/imgs/elephant.jpg')).unsqueeze(0)
print(img.shape) # torch.Size([1, 3, 224, 224])
# Load ImageNet class names
labels_map = json.load(open('.\data\labels_map.txt'))
labels_map = [labels_map[str(i)] for i in range(1000)]
# Classify
model.eval()
with torch.no_grad():
outputs = model(img)
# Print predictions
print('-----')
for idx in torch.topk(outputs, k=5).indices.squeeze(0).tolist():
prob = torch.softmax(outputs, dim=1)[0, idx].item()
print('{label:<75} ({p:.2f}%)'.format(label=labels_map[idx], p=prob * 100))
# print('{label:<75} ({p:.2f}%)'.format(label=idx, p=prob * 100))
def imshow(inp, title=None):
"""Imshow for Tensor."""
# 先把tensor转为numpy,然后将通道维放到最后方便广播
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
# 当inp为0-1之间的浮点数和0-255之间的整数都能显示成功
# inp = np.clip(inp, 0, 1)*255
# inp = inp.astype(np.int32)
# print(inp)
plt.imshow(inp)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
if title is not None:
plt.title(title)
# plt.pause(0.001) # pause a bit so that plots are updated
plt.show()
def showimage(dataloader, class_names):
# 获取一批训练数据
inputs, classes = next(iter(dataloader))
# 批量制作网格 Make a grid of images
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
# #一个通用的展示少量预测图片的函数
# def visualize_model(model, dataloader,device):
# model.eval()
# model.to(device)
# with torch.no_grad():
# inputs, labels = next(iter(dataloader))
# inputs = inputs.to(device)
# labels = labels.to(device)
# outputs = model(inputs)
# _, preds = torch.max(outputs, 1)
# out = torchvision.utils.make_grid(inputs).cpu()
# # title = "predect/label"
# title = ""
# for i,label in enumerate(labels):
# # title+=" {}/{} ".format(preds[i],label)
# title += " {} ".format(label_name[preds[i]])
# imshow(out, title=title)
# def visualize_pred():
#
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
# model = EfficientNet.from_name('efficientnet-b0',num_classes=10)
# # 加载模型参数
# # path = r"checkpoint/B0/000/B0_acc=99.8528.pth"
# path = r"checkpoint\B0\111\B0_acc=99.5540.pth"
# checkpoint = torch.load(path)
# model.load_state_dict(checkpoint["net"])
# print("loaded model with acc:{}".format(checkpoint["acc"]))
# # data_path = r"E:\Datasets\state-farm-distracted-driver-detection\imgs\train"
# # # data_path = r".\data\hymenoptera_data\train"
# # train_dataset = datasets.ImageFolder(root=data_path, transform=data_transform)
# # train_dataloader = DataLoader(dataset=train_dataset,
# # batch_size=4,
# # shuffle=True,
# # num_workers=0)
# val_dataset = MyDataset("./data/dval.txt", data_transform)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=4,
# shuffle=True,
# num_workers=0)
# # visualize_model(model,train_dataloader,device)
# visualize_model(model, val_dataloader, device)
class MyDataset(Dataset):
def __init__(self, names_file, transform=None):
self.names_file = names_file
self.transform = transform
self.names_list = []
if not os.path.isfile(self.names_file):
print(self.names_file + 'does not exist!')
with open(self.names_file, "r", encoding="utf-8") as f:
lists = f.readlines()
for l in lists:
self.names_list.append(l)
def __len__(self):
return len(self.names_list)
def __getitem__(self, idx):
image_path = self.names_list[idx].split(' ')[0]
# print(image_path)
if not os.path.isfile(image_path):
print(image_path + 'does not exist!')
return None
image = Image.open(image_path).convert('RGB') #
if self.transform:
image = self.transform(image)
label = int(self.names_list[idx].split(' ')[1])
sample = image, label
return sample
# #
# train_transform = transforms.Compose([
#
# transforms.RandomResizedCrop((224, 224), scale=(0.8, 1.0), ratio=(3. / 4., 4. / 3.), ),
# # transforms.RandomResizedCrop((320, 320), scale=(0.8, 1.0), ratio=(3. / 4., 4. / 3.), ),
# transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
# # transforms.RandomRotation(10, resample=False, expand=False, center=None),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
#
# val_transform = transforms.Compose([
# transforms.Resize((224, 224)),
# # transforms.Resize((320, 320)),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
# train_transform = transforms.Compose([
#
# transforms.Resize((224, 224)),
# # transforms.ColorJitter(brightness=0.8, contrast=0.5, saturation=0.5, hue=0.1),
# # transforms.RandomRotation(10, resample=False, expand=False, center=None),
# # transforms.RandomCrop(224, padding=16),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
# train_transform = transforms.Compose([
#
# transforms.Resize((224, 224)),
# transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1),
# # transforms.ColorJitter(brightness=0.8, contrast=0.5, saturation=0.5, hue=0.1),
# # transforms.RandomRotation(20, resample=False, expand=False, center=None),
# # transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
# transforms.RandomRotation(10, resample=False, expand=False, center=None),
# # transforms.RandomHorizontalFlip(p=0.5),
# # # transforms.RandomVerticalFlip(p=0.5),
# # # ToTensor()能够把灰度范围从0-255变换到0-1之间,
# # # transform.Normalize()则把0-1变换到(-1,1).具体地说,对每个通道而言,Normalize执行以下操作:
# # # image=(image-mean)/std
# # transforms.RandomResizedCrop((224,224)),
# # transforms.Resize((224, 224)),
# transforms.RandomCrop(224, padding=16),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
#
train_transform = transforms.Compose([
transforms.Resize((240, 240)),
transforms.RandomCrop(224),
# transforms.Resize((224, 224)),
transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1),
# transforms.RandomRotation(20, resample=False, expand=False, center=None),
# transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
transforms.RandomRotation(10, resample=False, expand=False, center=None),
# transforms.RandomHorizontalFlip(p=0.5),
# # transforms.RandomVerticalFlip(p=0.5),
# # ToTensor()能够把灰度范围从0-255变换到0-1之间,
# # transform.Normalize()则把0-1变换到(-1,1).具体地说,对每个通道而言,Normalize执行以下操作:
# # image=(image-mean)/std
# transforms.RandomResizedCrop((224,224)),
# transforms.Resize((224, 224)),
# transforms.RandomCrop(224, padding=16),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# train_transform = transforms.Compose([
# transforms.Resize((224, 224)),
# # transforms.RandomCrop(320),
# transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1),
# transforms.RandomRotation(10, resample=False, expand=False, center=None),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
val_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# train_transform = transforms.Compose([
# transforms.Resize((320, 320)),
# # transforms.RandomCrop(320),
# transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1),
# transforms.RandomRotation(10, resample=False, expand=False, center=None),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
#
# val_transform = transforms.Compose([
# transforms.Resize((320, 320)),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
# train_transform = transforms.Compose([
# transforms.Resize((340, 340)),
# transforms.RandomCrop(320),
# transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1),
# transforms.RandomRotation(10, resample=False, expand=False, center=None),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
#
# val_transform = transforms.Compose([
# transforms.Resize((320, 320)),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
# train_transform = transforms.Compose([
# transforms.Resize((160, 160)),
# # transforms.RandomCrop(320),
# transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1),
# transforms.RandomRotation(10, resample=False, expand=False, center=None),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
#
# val_transform = transforms.Compose([
# transforms.Resize((160, 160)),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
# train_transform = transforms.Compose([
#
# transforms.Resize((160, 160)),
# transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1),
# # transforms.RandomRotation(20, resample=False, expand=False, center=None),
# # transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
# transforms.RandomRotation(10, resample=False, expand=False, center=None),
# # transforms.RandomHorizontalFlip(p=0.5),
# # # transforms.RandomVerticalFlip(p=0.5),
# # # ToTensor()能够把灰度范围从0-255变换到0-1之间,
# # # transform.Normalize()则把0-1变换到(-1,1).具体地说,对每个通道而言,Normalize执行以下操作:
# # # image=(image-mean)/std
# # # transforms.RandomResizedCrop((224,224)),
# # # transforms.Resize((224, 224)),
# transforms.RandomCrop(160, padding=16),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# device = 'cpu'
best_acc = 0 # best test accuracy
best_val_acc = 0
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# kaggle dataset
# num_classes = 10
# label_name = ["正常","右持手机","右接电话","左持手机","左接电话","操作仪器","喝水","向后侧身","整理仪容","侧视"]
# label_name = ["normal", "texting-R", "answering_R", "texting-L", "answering_L", "operating", "drinking", "leaning_back", "makeup", "side_view"]
# # 新的类别
# # # label_name = ["正常","右接电话",左接电话","低头","操作仪器","喝水","吸烟","向后侧身","整理仪容","侧视"]
# # label_name =["normal", "right to answer the phone", left to answer the phone "," head down "," operating instruments "," drinking water "," smoking "," leaning back ","makeup "," side view "]
# # mydataset
# classes_path = r"data/drive_classes.txt"
# with open(classes_path) as f:
# label_name = [c.strip() for c in f.readlines()]
# num_classes = len(label_name)
# num_classes = 100
# num_classes = 10
num_classes = 9
# num_classes = 8
# num_classes = 7
# num_classes = 6
# net = EfficientNet.from_pretrained('efficientnet-b0',num_classes=num_classes)
# net = models.resnet18(pretrained=True)
# net = models.resnext50_32x4d(pretrained=True)
# net = models.resnext50_32x4d(pretrained=False,num_classes=num_classes)
# net = models.resnet50(pretrained=False,num_classes=num_classes)
# net = models.mobilenet_v2(pretrained=True)
# net = models.mobilenet_v2(pretrained=False, num_classes=num_classes, width_mult=1.0)
# net = models.mobilenet_v2(pretrained=False, num_classes=num_classes, width_mult=0.5)
# net = models.mobilenet_v2(pretrained=False, num_classes=num_classes, width_mult=0.3)
# net = models.mobilenet_v2(pretrained=False, num_classes=num_classes, width_mult=0.1)
# net = models.mobilenet_v2(pretrained=False, width_mult=1.0)
net = models.mobilenet_v2(pretrained=True, width_mult=1.0)
num_in = net.classifier[1].in_features
net.classifier[1] = nn.Linear(num_in, num_classes)
# net = mobilenet_v3_small(pretrained=True)
# # net = mobilenet_v3_large(pretrained=True)
# num_in = net.classifier[3].in_features
# net.classifier[3] = nn.Linear(num_in, num_classes)
#
# net = models.resnext50_32x4d(pretrained=True)
# num_in = net.fc.in_features
# net.fc = nn.Linear(num_in, num_classes)
# # 加载模型权重,忽略不同
# model_path = r"checkpoint/data_12_23/mobilenetv2/000/mobilenetv2_1_12_23_acc=92.1389.pth"
# model_dict =net.state_dict()
# checkpoint = torch.load(model_path, map_location=device)
# pretrained_dict = checkpoint["net"]
# # pretrained_dict = checkpoint
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
# model_dict.update(pretrained_dict)
# net.load_state_dict(model_dict)
# net = MobileNetV2_cbam(num_classes=num_classes, width_mult=1.0, add_location=16)
#
# # 更新权重 add_location=16
# model_path = r"weights/mobilenet_v2-b0353104.pth"
# model_dict =net.state_dict()
# checkpoint = torch.load(model_path, map_location=device)
# pretrained_dict = checkpoint
# new_key = list(model_dict.keys())
# pre_key = list(pretrained_dict.keys())
# ignore_num = 3
# start_index = new_key.index('features.2.fc1.weight')
# print(new_key[start_index+3], pre_key[start_index])
# for i in range(len(pre_key)):
# if i<start_index:
# j = i
# else:
# j = i+3
# if np.shape(model_dict[new_key[j]]) == np.shape(pretrained_dict[pre_key[i]]):
# model_dict[new_key[j]] = pretrained_dict[pre_key[i]]
# net.load_state_dict(model_dict)
# net = MobileNetV2_cbam(num_classes=num_classes, width_mult=1.0, add_location=64)
#
# # 更新权重 add_location=64
# model_path = r"weights/mobilenet_v2-b0353104.pth"
# model_dict =net.state_dict()
# checkpoint = torch.load(model_path, map_location=device)
# pretrained_dict = checkpoint
# new_key = list(model_dict.keys())
# pre_key = list(pretrained_dict.keys())
# ignore_num = 3
# start_index = new_key.index('features.11.fc1.weight')
# print(new_key[start_index+3], pre_key[start_index])
# for i in range(len(pre_key)):
# if i<start_index:
# j = i
# else:
# j = i+3
# if np.shape(model_dict[new_key[j]]) == np.shape(pretrained_dict[pre_key[i]]):
# model_dict[new_key[j]] = pretrained_dict[pre_key[i]]
# net.load_state_dict(model_dict)
# net = models.shufflenet_v2_x1_0(pretrained=True)
# # net = models.shufflenet_v2_x0_5(pretrained=True)
# # net = models.resnet50(pretrained=True)w
# num_in = net.fc.in_features
# net.fc = nn.Linear(num_in, num_classes)
# net = ghost_net_Cifar(num_classes=num_classes, width_mult=0.1)
# net = ghost_net(num_classes=num_classes, width_mult=1.)
# net = ghost_net(num_classes=num_classes, width_mult=0.5)
# net = ghost_net(num_classes=num_classes, width_mult=0.3)
# net = ghost_net(num_classes=num_classes, width_mult=0.1)
# net = ghostnet(num_classes=num_classes, width=1.)
# net = ghostnet(num_classes=num_classes, width=0.5)
# net = ghostnet(num_classes=num_classes, width=0.3)
# net = ghostnet(num_classes=num_classes, width=0.1)
# net = mnext(num_classes=num_classes, width_mult=1.)
# net = mnext(num_classes=num_classes, width_mult=0.5)
# net = my_mobilenext_2(num_classes=num_classes, width_mult=1.)
# num_in = net.fc.in_features
# # 创建的层默认参数需要训练
# net.fc = nn.Linear(num_in, num_classes)
# net = MobileNetV3(n_class=num_classes, mode="small", dropout=0.2, width_mult=1.0)
# net = MobileNetV3(n_class=num_classes, mode="large", dropout=0.2, width_mult=1.0)
# net = MobileNetV3_Small(num_classes=num_classes)
# net = MobileNetV3(n_class=num_classes, mode="large", dropout=0.2, width_mult=1.0)
# # 加载模型权重,忽略不同
# # model_path = r"checkpoint/imagenet/imagenet100/mobilenetv2/111/mobilenetv2_1_imagenet_acc=68.9234.pth"
# # model_path = r"checkpoint/kaggle/v1/mobilenetv2/pre/000/mobilenetv2_1_kg1_acc=85.2244.pth"
# # model_path = r"checkpoint/imagenet/imagenet100/ghostnet/000/ghostnet_1_imagenet_acc=63.0497.pth"
# # model_path = r"checkpoint/imagenet/imagenet100/mnext/000/mnext_1_imagenet_acc=65.5769.pth"
# model_path = r"weights/mnext.pth.tar"
# model_dict =net.state_dict()
# checkpoint = torch.load(model_path, map_location=device)
# # pretrained_dict = checkpoint["net"]
# pretrained_dict = checkpoint
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
# model_dict.update(pretrained_dict)
# net.load_state_dict(model_dict)
# # print("loaded model with acc:{}".format(checkpoint["acc"]))
# # # # # 预加载
# # path = r"checkpoint\resnet18\000\B0_acc=83.4532.pth"
# # path = r"checkpoint/mobilenetv2/000/mv2_acc=82.7338.pth"
# path = r"checkpoint/resnext50/333/resnext50_my_acc=72.6619.pth"
# checkpoint = torch.load(path)
# net.load_state_dict(checkpoint["net"],strict=False) # 模型参数大小不一样仍然报错!可能因为其通过参数名确定是否加载,但参数名相同默认参数大小一样,而这里刚好不一样故报错
# print("loaded model with acc:{}".format(checkpoint["acc"]))
# num_in = net.fc.in_features
# # 创建的层默认参数需要训练
# net.fc = nn.Linear(num_in, num_classes)
# 按照dim,将index指定位置的值取出
# gather() For a 3-D tensor the output is specified by:
# out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
# out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
# out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
# 按照dim,将值放入index指定的位置
# scatter_() For a 3-D tensor, self is updated as:
# self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
# self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
# self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
def CrossEntropy(outputs, targets):
log_softmax_outputs = F.log_softmax(outputs, dim=1)
batch_size, class_num = outputs.shape
onehot_targets = torch.zeros(batch_size, class_num).to(targets.device).scatter_(1, targets.view(batch_size, 1), 1)
return -(log_softmax_outputs * onehot_targets).sum(dim=1).mean()
def CrossEntropy_KD(outputs, targets):
log_softmax_outputs = F.log_softmax(outputs, dim=1)
softmax_targets = F.softmax(targets, dim=1)
return -(log_softmax_outputs * softmax_targets).sum(dim=1).mean()
def change_lr2(epoch, T=20, factor=0.3, min=1e-4):
mul = 1.
if epoch < T:
mul = mul
elif epoch < T * 2:
mul = mul * factor
elif epoch < T * 3:
mul = mul * factor * factor
elif epoch < T * 4:
mul = mul * factor * factor * factor
elif epoch < T * 5:
mul = mul * factor * factor * factor * factor
else:
return min
# print(max((1 + math.cos(math.pi * (epoch % T) / T)) * mul/2, min))
return max((1 + math.cos(math.pi * (epoch % T) / T)) * mul / 2, min)
def change_lr3(epoch, T=15, factor=0.3, min=1e-4):
mul = 1.
if epoch < T * 3:
mul = mul
elif epoch < T * 7:
mul = mul * factor
elif epoch < T * 11:
mul = mul * factor * factor
else:
return min
# print(max((1 + math.cos(math.pi * epoch/ T)) * mul/2, min))
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
def change_lr4(epoch, T=10, factor=0.3, min=1e-4):
mul = 1.
if epoch < T * 3:
mul = mul
elif epoch < T * 5:
mul = mul * factor
elif epoch < T * 7:
mul = mul * factor * factor
elif epoch < T * 9:
mul = mul * factor * factor * factor
else:
return min
# print(max((1 + math.cos(math.pi * epoch/ T)) * mul/2, min))
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
def change_lr5(epoch, T=10, factor=0.3, min=1e-3):
mul = 1.
if epoch < T * 3:
mul = mul
elif epoch < T * 5:
mul = mul * factor
elif epoch < T * 7:
mul = mul * factor * factor
else:
return min
# print(max((1 + math.cos(math.pi * epoch/ T)) * mul/2, min))
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
def change_lr6(epoch, T=6, factor=0.3, min=1e-3):
mul = 1.
if epoch < T * 3:
mul = mul
elif epoch < T * 5:
mul = mul * factor
elif epoch < T * 7:
mul = mul * factor * factor
else:
return min
# print(max((1 + math.cos(math.pi * epoch/ T)) * mul/2, min))
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
def change_lr7(epoch, T=8, factor=0.3, min=1e-3):
mul = 1.
if epoch < T * 3:
mul = mul
elif epoch < T * 5:
mul = mul * factor
else:
return min
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
# 注意 new_lr = lr * mul
def change_lr8(epoch, T=6, factor=0.3, min=1e-2):
mul = 1.
if epoch < T * 3:
mul = mul
elif epoch < T * 5:
mul = mul * factor
elif epoch < T * 7:
mul = mul * factor * factor
else:
return min
# print(max((1 + math.cos(math.pi * epoch/ T)) * mul/2, min))
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
def change_lr9(epoch, T=6, factor=0.3, min=1e-3):
mul = 1.
if epoch < T * 3:
mul = mul
elif epoch < T * 7:
mul = mul * factor
else:
return min
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
def change_lr10(epoch, T=5, factor=0.3, min=1e-2):
mul = 1.
if epoch < T * 3:
mul = mul
elif epoch < T * 5:
mul = mul * factor
else:
return min
# print(max((1 + math.cos(math.pi * epoch/ T)) * mul/2, min))
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
def change_lr11(epoch, T=8, min=1e-3):
mul = 1.
if epoch < T:
mul = mul
else:
return min
# print(max((1 + math.cos(math.pi * epoch/ T)) * mul/2, min))
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
def change_lr12(epoch, T=6, factor=0.3, min=1e-3):
mul = 1.
if epoch < T:
mul = mul
elif epoch < T * 3:
mul = mul * factor
else:
return min
return max((1 + math.cos(math.pi * epoch / T)) * mul / 2, min)
criterion = nn.CrossEntropyLoss()
criterion = CrossEntropy
# epoches = 48
# epoches = 30
# epoches = 30
# epoches = 16
# epoches = 30
# optimizer = optim.Adam(net.parameters(), lr=5e-3)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr9)
# optimizer = optim.Adam(net.parameters(), lr=1e-2)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr9)
# optimizer = optim.Adam(net.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr9)
# optimizer = optim.Adam(net.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr10)
# optimizer = optim.SGD(net.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr9)
# optimizer = optim.Adam(net.parameters(), lr=1e-2)
# optimizer = optim.Adam(net.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr6)
# optimizer = optim.SGD(net.parameters(), lr=1e-1,
# momentum=0.9, weight_decay=5e-4)
# optimizer = optim.SGD(net.parameters(), lr=1e-2,
# momentum=0.9, weight_decay=5e-4)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2, 8], gamma=0.1)
#
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3,
# verbose=True, threshold=1e-4, threshold_mode='rel',
# cooldown=0, min_lr=1e-7, eps=1e-8)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr4)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr7)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr8)
# epoches = 48
# optimizer = optim.Adam(net.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr9)
# epoches = 30
# optimizer = optim.Adam(net.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr10)
epoches = 18
optimizer = optim.Adam(net.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr12)
# epoches = 16
# optimizer = optim.Adam(net.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[6, 12], gamma=0.1)
# epoches = 8
# optimizer = optim.Adam(net.parameters(), lr=1e-3)
# scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=change_lr11)
net.to(device)
# data_path = r"E:\Datasets\state-farm-distracted-driver-detection\imgs\train"
# data_path = r".\data\hymenoptera_data\train"
# datasets.ImageFolder读取图片文件夹,要求图片按照类别分文件夹存放
# root:在root指定的路径下寻找图片
# transform:对PIL Image进行的转换操作,transform的输入是使用loader读取图片的返回对象
# target_transform:对label的转换
# loader:给定路径后如何读取图片,默认读取为RGB格式的PIL Image对象
# # kaggle dataset 100
# train_dataset = MyDataset("data/imagenet/imagenet2012_100_train.txt", train_transform)
# val_dataset = MyDataset("data/imagenet/imagenet2012_100_val.txt", val_transform)
# # train_dataloader = DataLoader(dataset=train_dataset,
# # batch_size=128,
# # shuffle=True,
# # num_workers=0)
# #
# #
# # val_dataloader = DataLoader(dataset=val_dataset,
# # batch_size=128,
# # shuffle=True,
# # num_workers=0)
#
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=96,
# shuffle=True,
# num_workers=0)
#
#
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=96,
# shuffle=True,
# num_workers=0)
# # # kaggle dataset
# # train_dataset = datasets.ImageFolder(root=data_path, transform=data_transform)
# train_dataset = MyDataset("data/txt/kg_train224.txt", train_transform)
# val_dataset = MyDataset("data/txt/kg_val224.txt", val_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
#
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
#
# kaggle dataset 2
# train_dataset = MyDataset("data/txt/kg_train2.txt", train_transform)
# val_dataset = MyDataset("data/txt/kg_val2.txt", val_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
#
#
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# train_dataset = MyDataset("data/txt/kg_train2_224.txt", train_transform)
# val_dataset = MyDataset("data/txt/kg_val2_224.txt", val_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
#
#
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# # AUC v1 dataset
# train_dataset = MyDataset("data/txt/aucv1_trainVal224.txt", train_transform)
# val_dataset = MyDataset("data/txt/aucv1_test224.txt", val_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# AUC v2 dataset
# train_dataset = MyDataset("data/txt/auc_trainVal224.txt", train_transform)
# val_dataset = MyDataset("data/txt/auc_test224.txt", val_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# # AUC v2 dataset
# train_dataset = MyDataset("data/txt/auc_trainVal224.txt", train_transform)
# val_dataset = MyDataset("data/txt/auc_test224.txt", val_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# drive119
# train_dataset = MyDataset("./data/train11_9.txt", train_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=32,
# shuffle=True,
# num_workers=0)
# val_dataset = MyDataset("./data/val11_9.txt", val_transform)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=32,
# shuffle=True,
# num_workers=0)
# # drive224 将drive119图片预处理为224,提高速度
# train_dataset = MyDataset("./data/train224.txt", train_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=32,
# shuffle=True,
# num_workers=0)
# train_dataset = MyDataset("./data/kgAddmy_add.txt", train_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# val_dataset = MyDataset("./data/val224.txt", val_transform)
# val_dataset = MyDataset("./data/test11_9s.txt", val_transform)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=32,
# shuffle=True,
# num_workers=0)
#
# test_dataset = MyDataset("./data/test11_9s.txt", val_transform)
# test_dataloader = DataLoader(dataset=test_dataset,
# batch_size=32,
# shuffle=True,
# num_workers=0)
# # dataset 11_16
# # train_dataset = MyDataset("data/kgAddmy_add.txt", train_transform)
# # train_dataset = MyDataset("data/total_train.txt", train_transform)
# # train_dataset = MyDataset("data/train224_116_119.txt", train_transform)
# # train_dataset = MyDataset("data/txt/116_119trainAddcrop224.txt", train_transform)
# train_dataset = MyDataset("data/txt/116_119trainAddcrop224_kg.txt", train_transform)
# # train_dataset = MyDataset("data/txt/116_traincrop224.txt", train_transform)
# # train_dataset = MyDataset("data/txt/116_119traincrop224.txt", train_transform)
# # train_dataset = MyDataset("data/txt/116_119traincrop224_kg.txt", train_transform)
# # train_dataset = MyDataset("data/txt/116_119traincrop224_kg_auc2.txt", train_transform)
# # train_dataset = MyDataset("data/train224_11_16_train.txt", train_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=64,
# # batch_size=32,
# shuffle=True,
# num_workers=0)
# val_dataset = MyDataset("data/test224_11_16.txt", val_transform)
# # val_dataset = MyDataset("data/txt/116_testcrop224.txt", val_transform)
# # val_dataset = MyDataset("data/train224_11_16_val.txt", val_transform)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=64,
# shuffle=True,
# num_workers=0)
# test_dataset = MyDataset("data/test224_11_16.txt", val_transform)
# test_dataloader = DataLoader(dataset=test_dataset,
# batch_size=32,
# shuffle=True,
# num_workers=0)
#
# # # dataset kg_total
# train_dataset = MyDataset("data/kg_total_add_t.txt", train_transform)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=128,
# shuffle=True,
# num_workers=0)
# val_dataset = MyDataset("data/test224_11_16.txt", val_transform)
# # val_dataset = MyDataset("data/kg_total_add_v.txt", val_transform)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=128,
# shuffle=True,
# num_workers=0)
# # dataset 12_23
# train_dataset = MyDataset("data/txt/12_23_2_train224.txt", train_transform)
# val_dataset = MyDataset("data/txt/12_23_2_test224.txt", val_transform)
# train_dataset = MyDataset("data/txt/12_23_1_train224.txt", train_transform)
# val_dataset = MyDataset("data/txt/12_23_1_test224.txt", val_transform)
# train_dataset = MyDataset("data/txt/12_23_12_train224.txt", train_transform)
# val_dataset = MyDataset("data/txt/12_23_12_test224.txt", val_transform)
# train_dataset = MyDataset("data/txt/12_23_12_addpre_train224.txt", train_transform)
# val_dataset = MyDataset("data/txt/12_23_12_addpre_test224.txt", val_transform)
# train_dataset = MyDataset("data/txt/12_23_12_addpre_train224_kg2my.txt", train_transform)
# val_dataset = MyDataset("data/txt/12_23_12_addpre_test224.txt", val_transform)
# train_dataset = MyDataset("data/txt/12_23_12_addpre_train224_kg2my_aucv2_my.txt", train_transform)
# val_dataset = MyDataset("data/txt/12_23_12_addpre_test224.txt", val_transform)
# crop 12_23
# train_dataset = MyDataset("data/txt/12_23_12_addpre_train_crop224.txt", train_transform)
# train_dataset = MyDataset("data/txt/12_23_12_addpre_train_crop224_kg2my.txt", train_transform)
# train_dataset = MyDataset("data/txt/12_23_12_addpre_train_crop224_kg2my_aucv2_my.txt", train_transform)
# val_dataset = MyDataset("data/txt/12_23_12_addpre_test_crop224.txt", val_transform)
# train_dataset = MyDataset("data/txt/12_23_12_addpre_train224_addcrop.txt", train_transform)
# train_dataset = MyDataset("data/txt/12_23_12_addpre_train224_kg2my_aucv2_my_addcrop.txt", train_transform)
# val_dataset = MyDataset("data/txt/12_23_12_addpre_test224_addcrop.txt", val_transform)
# # class6
# train_dataset = MyDataset("data/txt6/12_23_12_addpre_train224_6.txt", train_transform)
# val_dataset = MyDataset("data/txt6/12_23_12_addpre_test224_6.txt", val_transform)
# train_dataset = MyDataset("data/txt6/12_23_12_addpre_train224_addcrop_6.txt", train_transform)
# val_dataset = MyDataset("data/txt6/12_23_12_addpre_test224_addcrop_6.txt", val_transform)
# train_dataset = MyDataset("data/txt6/12_23_12_addpre_train224_kg2my_aucv2_my_addcrop_6.txt", train_transform)
# val_dataset = MyDataset("data/txt6/12_23_12_addpre_test224_addcrop_6.txt", val_transform)
# train_dataset = MyDataset("data/txt6/12_23_12_addpre_train224_kg2my_aucv2_my_6.txt", train_transform)
# val_dataset = MyDataset("data/txt6/12_23_12_addpre_test224_6.txt", val_transform)
# class7
# train_dataset = MyDataset("data/txt7/12_23_12_addpre_train224_7.txt", train_transform)
# val_dataset = MyDataset("data/txt7/12_23_12_addpre_test224_7.txt", val_transform)
# train_dataset = MyDataset("data/txt7/12_23_12_addpre_train224_addcrop_7.txt", train_transform)
# val_dataset = MyDataset("data/txt7/12_23_12_addpre_test224_addcrop_7.txt", val_transform)
# train_dataset = MyDataset("data/txt7/12_23_12_addpre_train224_kg2my_aucv2_my_addcrop_7.txt", train_transform)
# val_dataset = MyDataset("data/txt7/12_23_12_addpre_test224_addcrop_7.txt", val_transform)
# train_dataset = MyDataset("data/txt7/12_23_12_addpre_train224_kg2my_aucv2_my_7.txt", train_transform)
# val_dataset = MyDataset("data/txt7/12_23_12_addpre_test224_7.txt", val_transform)
# txt_raw
# train_dataset = MyDataset("data/txt_raw/total_train.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw/total_test.txt", val_transform)
# train_dataset = MyDataset("data/txt_raw/total_train_c6.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw/total_test_c6.txt", val_transform)
#
# train_dataset = MyDataset("data/txt_raw/total_train_c7.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw/total_test_c7.txt", val_transform)
#3_23
# train_dataset = MyDataset("data/txt_3_23/bus_train.txt", train_transform)
# val_dataset = MyDataset("data/txt_3_23/bus_test.txt", val_transform)
# train_dataset = MyDataset("data/txt_3_23/he_train.txt", train_transform)
# val_dataset = MyDataset("data/txt_3_23/he_test.txt", val_transform)
# train_dataset = MyDataset("data/txt_3_23/wen_train.txt", train_transform)
# val_dataset = MyDataset("data/txt_3_23/wen_test.txt", val_transform)
# train_dataset = MyDataset("data/txt_3_23/he_wen_train.txt", train_transform)
# val_dataset = MyDataset("data/txt_3_23/he_wen_test.txt", val_transform)
# # 3_25
# train_dataset = MyDataset("data/txt_3_25/train325.txt", train_transform)
# val_dataset = MyDataset("data/txt_3_25/test325.txt", val_transform)
# # 3_25 crop
# train_dataset = MyDataset("data/txt_3_25/train325_crop.txt", train_transform)
# val_dataset = MyDataset("data/txt_3_25/test325_crop.txt", val_transform)
# 3-25_all
# train_dataset = MyDataset("data/txt_3_25/train325_all.txt", train_transform)
# val_dataset = MyDataset("data/txt_3_25/test325_all.txt", val_transform)
# # 3-25_all crop
# train_dataset = MyDataset("data/txt_raw_crop/total_train_crop.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw_crop/total_test_crop.txt", val_transform)
# all class7_2
# train_dataset = MyDataset("data/txt_raw/total_train_c7_2.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw/total_test_c7_2.txt", val_transform)
# all class72_crop
# train_dataset = MyDataset("data/txt_raw_crop/total_train_crop_72.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw_crop/total_test_crop_72.txt", val_transform)
# # all class73_crop
# train_dataset = MyDataset("data/txt_raw_crop/total_train_crop_73.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw_crop/total_test_crop_73.txt", val_transform)
# # all class8
# train_dataset = MyDataset("data/txt_raw/total_train_8.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw/total_test_8.txt", val_transform)
# # all class8_crop
# train_dataset = MyDataset("data/txt_raw_crop/total_train_crop_8.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw_crop/total_test_crop_8.txt", val_transform)
# all class9
# train_dataset = MyDataset("data/txt_raw/total_train.txt", train_transform)
# val_dataset = MyDataset("data/txt_raw/total_test.txt", val_transform)
# # # all class9_crop
train_dataset = MyDataset("data/txt_raw_crop/total_train_crop.txt", train_transform)
val_dataset = MyDataset("data/txt_raw_crop/total_test_crop.txt", val_transform)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=64,
shuffle=True,
num_workers=0)
val_dataloader = DataLoader(dataset=val_dataset,
batch_size=64,
shuffle=True,
num_workers=0)
# train_dataloader = DataLoader(dataset=train_dataset,
# batch_size=32,
# shuffle=True,
# num_workers=0)
# val_dataloader = DataLoader(dataset=val_dataset,
# batch_size=32,
# shuffle=True,
# num_workers=0)
# Training
def train(epoch):
print('\nEpoch: %d' % (epoch + 1))
net.train()
global best_acc
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(train_dataloader):
inputs, targets = inputs.to(device), targets.to(device)
# print("inputs.shape",inputs.shape)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
average_loss = train_loss / (batch_idx + 1)
train_acc = correct / total
progress_bar(batch_idx, len(train_dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (average_loss, 100. * train_acc, correct, total))
lr = optimizer.state_dict()['param_groups'][0]['lr']
scheduler.step()
# scheduler.step(average_loss)
return average_loss, train_acc, lr
# # Save checkpoint.
# acc = 100.*correct/total
# if acc > best_acc:
# print('Saving..')
# state = {
# 'net': net.state_dict(),
# 'acc': acc,
# 'epoch': epoch,
# }
# if not os.path.isdir('checkpoint/B0'):
# os.mkdir('checkpoint/B0')
# torch.save(state, './checkpoint/B0/111/B0_acc={:.4f}.pth'.format(acc))
# best_acc = acc
# Save checkpoint.
# kaggle v1
# savepath = 'checkpoint/kaggle/v1/mobilenetv2/pre/000/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/kaggle/v1/mobilenetv2/nopre/000/' # randcrop 16 rotation 10 colorjit 0.5
# kaggle v2
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/000/' # no augment
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/111/' # rotation 10
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/222/' # randcrop 16
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/333/' # randcrop 16 rotation 10
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/444/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/555/' # randcrop 16 rotation 10 colorjit 0.2
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/666/' # randcrop 16 rotation 30 colorjit 0.5
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/777/' # randcrop 16 rotation 20 colorjit 0.5
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/888/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/pre/000/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/kaggle/v2/mobilenetv2/pre/111/' # randcrop 16 rotation 20 colorjit 0.5
# AUC v2
# savepath = 'checkpoint/AUC/v2/mobilenetv2/000/'
# savepath = 'checkpoint/AUC/v2/mobilenetv2/111/'
# savepath = 'checkpoint/AUC/v2/mobilenetv2/222/'
# savepath = 'checkpoint/AUC/v2/mobilenetv2/333/'
# savepath = 'checkpoint/AUC/v2/mobilenetv2/444/'
# savepath = 'checkpoint/AUC/v2/mobilenetv2/pre/444/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/AUC/v2/mobilenetv2/pre/000/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/AUC/v2/mnext/000/'
# savepath = 'checkpoint/AUC/v2/resnet50/000/'
# savepath = 'checkpoint/AUC/v2/resnet50/111/'
# AUC v1
# savepath = 'checkpoint/AUC/v1/mobilenetv2/000/'
# savepath = 'checkpoint/AUC/v1/mobilenetv2/111/'
# savepath = 'checkpoint/AUC/v1/mobilenetv2/222/'
# savepath = 'checkpoint/AUC/v1/mobilenetv2/333/'
# savepath = 'checkpoint/AUC/v1/mobilenetv2/444/'
# savepath = 'checkpoint/AUC/v1/mnext/000/'
# savepath = 'checkpoint/AUC/v1/ghostnet/000/'
# savepath = 'checkpoint/AUC/v1/resnet50/000/'
# 11_16
# savepath = 'checkpoint/data_11_16/mobilenetv2/nopre/333/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/000/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/111/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/222/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/333/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/ghostnet/pre/000/' # randcrop 16 rotation 10 colorjit 0.5 change_lr8 1e-3
# savepath = 'checkpoint/data_11_16/ghostnet/pre/111/' # randcrop 16 rotation 10 colorjit 0.5 change_lr6 1e-3
# savepath = 'checkpoint/data_11_16/ghostnet/pre/222/' # randcrop 16 rotation 10 colorjit 0.5 change_lr6 1e-3 addcrop
# savepath = 'checkpoint/data_11_16/ghostnet/pre/333/' # randcrop 16 rotation 10 colorjit 0.5 change_lr6 1e-3 addcrop
# savepath = 'checkpoint/data_11_16/mnext/pre/000/' # randcrop 16 rotation 10 colorjit 0.5 change_lr8
# savepath = 'checkpoint/data_11_16/mnext/pre/111/' # randcrop 16 rotation 10 colorjit 0.5 change_lr8
# savepath = 'checkpoint/data_11_16/mnext/pre/222/' # randcrop 16 rotation 10 colorjit 0.5 change_lr6 1e-3 addcrop
# crop224
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/444/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/555/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/666/' # randcrop 16 rotation 10 colorjit 0.5 160
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/777/' # randcrop 16 rotation 10 colorjit 0.5 add kg
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/888/' # randcrop 16 rotation 10 colorjit 0.5 add kg 160
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/999/' # randcrop 16 rotation 10 colorjit 0.5 add kg auc2
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/0/000/' # 16 rotation 10 colorjit 0.5 224 116_119
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/0/111/' # randcrop 16 rotation 10 colorjit 0.5 224 116_119 add crop
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/0/222/' # add andcrop 16 rotation 10 colorjit 0.5 224 116_119 add crop kg
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/0/333/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/0/444/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/0/555/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/mobilenetv2/pre/0/666/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/shufflenetv2/pre/000/' # add andcrop 16 rotation 10 colorjit 0.5 224 116_119 add crop kg
# savepath = 'checkpoint/data_11_16/shufflenetv2/pre/000/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/shufflenetv2/pre/111/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/shufflenetv2/pre/222/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/shufflenetv2/pre/333/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/shufflenetv2/pre/444/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/data_11_16/shufflenetv2/pre/555/' # randcrop 16 rotation 10 colorjit 0.5
# imagenet
# savepath = 'checkpoint/imagenet/imagenet100/mobilenetv2/000/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/imagenet/imagenet100/mobilenetv2/111/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/imagenet/imagenet100/ghostnet/000/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/imagenet/imagenet100/mnext/000/' # randcrop 16 rotation 10 colorjit 0.5
# savepath = 'checkpoint/imagenet/imagenet100/my_mnextv2/000/' # randcrop 16 rotation 10 colorjit 0.5
# dataset 12_23
# savepath = 'checkpoint/data_12_23/mobilenetv2/000/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr6 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/111/' # randcrop 16 rotation 10 colorjit 0.5 12_23_1 change_lr6 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/222/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12 change_lr6 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/333/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre change_lr6 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/444/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre kg2my change_lr6 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/555/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre change_lr9 5e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/666/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre change_lr9 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/777/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre kg2my change_lr9 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/888/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre kg2my aucv2 change_lr9 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/0/999/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre kg2my aucv2 change_lr9 sgd 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/0/000/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre kg2my aucv2 change_lr9 sgd 1e-1
# savepath = 'checkpoint/data_12_23/mobilenetv2/0/111/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre kg2my aucv2 change_lr9
# savepath = 'checkpoint/data_12_23/mobilenetv2/0/222/' # flip randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre change_lr9
# savepath = 'checkpoint/data_12_23/mobilenetv2/0/333/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre kg2my aucv2 change_lr9
# savepath = 'checkpoint/data_12_23/mobilenetv2/0/444/' # change_lr10 brightness=0.8 mypre
# savepath = 'checkpoint/data_12_23/mobilenetv2/0/555/' # change_lr10 brightness=0.8 mypre
# savepath = 'checkpoint/data_12_23/mobilenetv2/nopre/000/' # nopre randcrop 16 rotation 10 colorjit 0.5 12_23_12_change_lr9 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/nopre/111/' # pre randcrop 16 rotation 10 colorjit 0.5 12_23_12_change_lr9 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/nopre/0/000/' # nopre randcrop 16 rotation 10 colorjit 0.5 12_23_12_change_lr9 1e-2 cbam c=64
# savepath = 'checkpoint/data_12_23/mobilenetv2/nopre/0/111/' # nopre randcrop 16 rotation 10 colorjit 0.5 12_23_12_change_lr9 1e-3 cbam c=64
# savepath = 'checkpoint/data_12_23/mobilenetv2/nopre/1/000/' # nopre randcrop 16 rotation 10 colorjit 0.5 12_23_12_change_lr9 1e-2 cbam c=16
# savepath = 'checkpoint/data_12_23/mobilenetv2/nopre/1/111/' # pre randcrop 16 rotation 10 colorjit 0.5 12_23_12_change_lr9 1e-2 cbam c=16
# savepath = 'checkpoint/data_12_23/mobilenetv2/nopre/1/222/' # pre randcrop 16 rotation 10 colorjit 0.5 12_23_12_ addpre kg2my aucv2 change_lr9 1e-2 cbam c=16
# savepath = 'checkpoint/data_12_23/mobilenetv2/nopre/1/333/' # pre randcrop 16 rotation 10 colorjit 0.5 12_23_12_ addpre kg2my aucv2 change_lr9 1e-2 cbam c=64
# savepath = 'checkpoint/data_12_23/mnext/000/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre kg2my aucv2 change_lr9
# savepath = 'checkpoint/data_12_23/mnext/111/' # randcrop 16 rotation 10 colorjit 0.5 12_23_12_addpre change_lr9
# dataset 12_23
# savepath = 'checkpoint/data_12_23/mobilenetv2/crop/000/' # crop randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr6 1e-3
# savepath = 'checkpoint/data_12_23/mobilenetv2/crop/111/' # crop randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr6 1e-3 addpre kg2my
# savepath = 'checkpoint/data_12_23/mobilenetv2/crop/222/' # crop randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr6 1e-3 addpre kg2my aucv2
# savepath = 'checkpoint/data_12_23/mobilenetv2/crop/333/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr6 1e-3 addpre addcrop
# savepath = 'checkpoint/data_12_23/mobilenetv2/crop/444/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr6 1e-3 addpre kg2my aucv2 addcrop
# # dataset class6
# # savepath = 'checkpoint/data_12_23/class6/mobilenetv2/000/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr9 1e-3
# savepath = 'checkpoint/data_12_23/class6/mobilenetv2/111/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr9 1e-3 addcrop
# savepath = 'checkpoint/data_12_23/class6/mobilenetv2/222/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr9 1e-3 addpre kg2my aucv2 addcrop
# savepath = 'checkpoint/data_12_23/class6/mobilenetv2/333/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr9 1e-3 a ddpre kg2my aucv2
# dataset class7
# savepath = 'checkpoint/data_12_23/class7/mobilenetv2/000/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr9 1e-3
# savepath = 'checkpoint/data_12_23/class7/mobilenetv2/111/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr91e-3 addcrop
# savepath = 'checkpoint/data_12_23/class7/mobilenetv2/222/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr9 1e-3 addpre kg2my aucv2 addcrop
# savepath = 'checkpoint/data_12_23/class7/mobilenetv2/333/' # randcrop 16 rotation 10 colorjit 0.5 12_23_2 change_lr9 1e-3 a ddpre kg2my aucv2
# dataset txt_raw
# savepath = 'checkpoint/txt_raw/mobilenetv2/224/000/' # change_lr9 totol_test
# savepath = 'checkpoint/txt_raw/mobilenetv2/224/111/' # change_lr9 totol_test
# savepath = 'checkpoint/txt_raw/mobilenetv2/224/222/' # change_lr9 totol_test 240-》224
# savepath = 'checkpoint/txt_raw/mobilenetv2/224/333/' # change_lr9 totol_test
# savepath = 'checkpoint/txt_raw/mobilenetv2/224/444/' # change_lr9 totol_test 240-》224
# savepath = 'checkpoint/txt_raw/mobilenetv2/320/000/' # change_lr9 totol_test
# savepath = 'checkpoint/txt_raw/mobilenetv2/000/' # change_lr10 totol_test 240-》224
# savepath = 'checkpoint/txt_raw/mobilenetv2/111/' # change_lr10 totol_test
# savepath = 'checkpoint/txt_raw/mobilenetv2/222/' # change_lr10 totol_test brightness=0.8
# savepath = 'checkpoint/txt_raw/mobilenetv2/class6/000/' # change_lr10 totol_test 240-》224 c9
# savepath = 'checkpoint/txt_raw/mobilenetv2/class6/111/' # change_lr10 totol_test 240-》224
# savepath = 'checkpoint/txt_raw/mobilenetv2/class6/222/' # change_lr10 totol_test
# savepath = 'checkpoint/txt_raw/mobilenetv2/class6/333/' # change_lr10 totol_test brightness=0.8
# savepath = 'checkpoint/txt_raw/mobilenetv2/class7/000/' # change_lr10 totol_test 240-》224
# savepath = 'checkpoint/txt_raw/mobilenetv2/class7/111/' # change_lr10 totol_test
# savepath = 'checkpoint/txt_raw/mobilenetv2/class7/222/' # change_lr10 totol_test brightness=0.8
# 3_23
# savepath = 'checkpoint/data_3_23/mobilenetv2/000/' # change_lr10 brightness=0.8
# savepath = 'checkpoint/data_3_23/mobilenetv2/111/' # change_lr10 brightness=0.5 240>>224
# savepath = 'checkpoint/data_3_23/mobilenetv2/222/' # change_lr10 no data augment
# savepath = 'checkpoint/data_3_23/mobilenetv2/he/000/' # change_lr10 brightness=0.5 240>>224
# savepath = 'checkpoint/data_3_23/mobilenetv2/he/111/' # change_lr10 brightness=0.8
# savepath = 'checkpoint/data_3_23/mobilenetv2/wen/000/' # change_lr10 brightness=0.5 240>>224
# savepath = 'checkpoint/data_3_23/mobilenetv2/wen/111/' # change_lr10 brightness=0.8
# savepath = 'checkpoint/data_3_23/mobilenetv2/he_wen/000/' # change_lr10 brightness=0.5 240>>224
# savepath = 'checkpoint/data_3_23/mobilenetv2/he_wen/111/' # change_lr10 brightness=0.8
# savepath = 'checkpoint/data_3_23/mobilenetv2/he_wen/222/' # change_lr10 brightness=0.8 nopre
# savepath = 'checkpoint/data_3_23/mobilenetv2/he_wen/333/' # change_lr10 brightness=0.8 mypre
# savepath = 'checkpoint/data_3_23/mobilenetv2/he_wen/333/' # change_lr10 brightness=0.5 mypre
# savepath = 'checkpoint/data_3_23/mobilenetv2/he_wen/444/' # change_lr19 brightness=0.5
# 325
# savepath = 'checkpoint/data_3_25/mobilenetv2/000/' # change_lr10 brightness=0.5
# savepath = 'checkpoint/data_3_25/mobilenetv2/111/' # change_lr9 brightness=0.5
# 325 crop
# savepath = 'checkpoint/data_3_25_crop/mobilenetv2/000/' # change_lr10 brightness=0.5
# savepath = 'checkpoint/data_3_25_crop/mobilenetv2/111/' # change_lr9 brightness=0.5
# savepath = 'checkpoint/data_3_25_crop/mobilenetv2/222/' # change_lr9 brightness=0.5 nopre
# 325 all
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/111/' # change_lr9 brightness=0.5
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/222/' # change_lr9 brightness=0.5 nopre
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/333/' # change_lr9 brightness=0.5 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/444/' # change_lr9 brightness=0.5 crop nopre
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/555/' # change_lr9 brightness=0.5 crop 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/666/' # change_lr9 brightness=0.5 crop 240->224 nopre
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/777/' # change_lr9 brightness=0.5 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/888/' # change_lr9 brightness=0.5 240->224 nopre
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/16/000/' # 16 milestones brightness=0.5 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/16/111/' # 16 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/18/000/' # 18 change_lr12 brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/160/000/' # 18 change_lr12 milestones brightness=0.5 160 no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/160/111/' # 18 change_lr12 milestones brightness=0.5 160 crop no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/224/000/' # 18 change_lr12 milestones brightness=0.5 224 no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/224/111/' # 18 change_lr12 milestones brightness=0.5 224 crop no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/224/222/' # 18 change_lr12 milestones brightness=0.5 224 no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/224/333/' # 18 change_lr12 milestones brightness=0.5 224 crop no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/224/444/' # 18 change_lr12 milestones brightness=0.5 224 crop 240->224
savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/224/555/' # 18 change_lr12 milestones brightness=0.5 224 crop 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/320/000/' # 18 change_lr12 milestones brightness=0.5 320 no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class9/320/111/' # 18 change_lr12 milestones brightness=0.5 320 crop no randcrop
# class7_2
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class7_2/000/' # 16 milestones brightness=0.5 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class7_2/111/' # 16 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class7_2/222/' # change_lr10 brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class7_2/333/' # change_lr9 brightness=0.5 240->224 crop
# class7_3
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class7_3/000/' # 16 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class7_3/111/' # change_lr10 brightness=0.5 240->224 crop
# class8
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/000/' # 16 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/111/' # change_lr12 brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/222/' # 16 milestones brightness=0.5 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/333/' # 18 change_lr12 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/444/' # 18 change_lr12 milestones brightness=0.5 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/555/' # 18 change_lr12 milestones brightness=0.5 240->224 crop no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/666/' # 18 change_lr12 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/777/' # 18 change_lr12 milestones brightness=0.5 240->224 crop nopre
# class8 320
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/320/000/' # 18 change_lr12 milestones brightness=0.5 320 crop no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/320/111/' # 18 change_lr12 milestones brightness=0.5 320 randcrop
# class8 160
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/160/000/' # 18 change_lr12 milestones brightness=0.5 160 crop no randcrop
# savepath = 'checkpoint/data_3_25_all/mobilenetv2/class8/160/111/' # 18 change_lr12 milestones brightness=0.5 160 nocrop no randcrop
# resnext50
# savepath = 'checkpoint/data_3_25_all/resnext50/224/class8/000/' # 18 change_lr12 milestones brightness=0.5 240->224 crop
# mobilenetv3_small
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_s/224/class8/000/' # 18 change_lr12 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_s/224/class8/111/' # 18 change_lr12 milestones brightness=0.5 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_s/224/class9/000/' # 18 change_lr12 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_s/224/class9/111/' # 18 change_lr12 milestones brightness=0.5 240->224
# mobilenetv3_LARGE
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_L/224/class8/000/' # 18 change_lr12 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_L/224/class8/111/' # 18 change_lr12 milestones brightness=0.5 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_L/320/class8/000/' # 18 change_lr12 milestones brightness=0.5 320 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_L/224/class9/000/' # 18 change_lr12 milestones brightness=0.5 240->224 crop
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_L/224/class9/111/' # 18 change_lr12 milestones brightness=0.5 240->224
# savepath = 'checkpoint/data_3_25_all/mobilenetv3_L/320/class9/111/' # 18 change_lr12 milestones brightness=0.5 320
def val(epoch):
global best_val_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(val_dataloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
average_loss = test_loss / (batch_idx + 1)
test_acc = correct / total
progress_bar(batch_idx, len(val_dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (average_loss, 100. * test_acc, correct, total))
acc = 100. * correct / total
if acc >= best_val_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir(savepath):
# os.mkdir(savepath)
os.makedirs(savepath)
print("best_acc:{:.4f}".format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_imagenet_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'ghostnet_1_imagenet_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mnext_1_imagenet_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'my_mnextv2_1_imagenet_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_my_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'shufflenetv2_1_my_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'shufflenetv2_05_my_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'ghostnet_1_my_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mnext_1_my_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_kg1_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_kg2_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'resnet50_1_kg2_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mnext_1_kg2_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv3s_1_kg2_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_aucv2_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mnext_1_aucv2_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv3s_1_aucv2_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'resnet50_aucv2_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_aucv1_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mnext_1_aucv1_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv3s_1_aucv1_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'resnet50_1_aucv1_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'ghostnet_1_aucv1_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_12_23_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_cbam_1_12_23_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mnext_1_12_23_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_c6_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_1_c7_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_224_crop_72_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_224_crop_73_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_224_crop_8_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_160_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_160_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_224_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_224_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_320_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_320_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_320_crop_8_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_160_crop_8_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_160_8_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_224_8_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_224_9_acc={:.4f}.pth'.format(acc))
torch.save(state, savepath + 'mobilenetv2_224_9_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mobilenetv2_320_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'resnext50_224_8_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_s_224_8_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_s_224_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_s_224_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_l_224_8_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_s_224_9_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_s_224_9_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_s_224_8_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_l_224_8_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_l_224_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_l_224_crop_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_l_224_9_acc={:.4f}.pth'.format(acc))
# torch.save(state, savepath + 'mv3_l_224_9_crop_acc={:.4f}.pth'.format(acc))
best_val_acc = acc
return average_loss, test_acc
# B0/000 kaggle dataset without val
# B0/111 kaggle dataset with val
# B0/222 my dataset with normal transfrom
# B0/333 my dataset with randomcrop random flip
# B0/444 my dataset with random flip
# B0/555 my dataset with random flip with val
# ghost_net/000 kaggle dataset with random flip w = 1
# ghost_net/111 kaggle dataset with random flip w = 0.5
# ghost_net/222 kaggle dataset with random flip w = 0.3
# ghost_net/333 my dataset with random flip w = 0.5
# ghost_net/444 my dataset with random flip w = 1
# ghost_net/555 my dataset no val with random flip w = 0.5
# ghost_net/666 kaggle dataset with random flip w = 0.1
# ghost_net/777 kaggle dataset with random flip w = 0.1
# mobilenetv2/000 my dataset with random flip
# mobilenetv2/111 kaggle dataset with random flip
# mobilenetv2/111 kaggle dataset with random flip w =0.5
# resnet18/000 my dataset with random flip
# resnet18/111 kaggle dataset with random flip
# resnext50/000 my dataset with random flip
# resnext50/111 kaggle dataset with random flip
# resnext50/222 my dataset with random flip with 111 pretrain
# resnext50/333 my dataset with random flip without pretrain
# resnext50/444 kaggle dataset with random flip without pretrain
def main(epoches=epoches):
x = []
lrs = []
train_loss = []
test_loss = []
train_acc = []
test_acc = []
start_time = time.time()
for epoch in range(start_epoch, start_epoch + epoches):
train_l, train_a, lr = train(epoch)
test_l, test_a = val(epoch)
x.append(epoch)
lrs.append(lr)
train_loss.append(train_l)
test_loss.append(test_l)
train_acc.append(train_a)
test_acc.append(test_a)
print("epoch={}/{},lr={},train_loss={:.3f},test_loss={:.3f},train_acc={:.3f},test_acc={:.3f}"
.format(epoch + 1, epoches, lr, train_l, test_l, train_a, test_a))
# # # earlystop
# if lr < 1e-4-1e-5:
# break
# if lr < 1e-6 - 1e-7:
# break
print("total train time ={}".format(format_time(time.time() - start_time)))
fig = plt.figure(figsize=(16, 9))
sub1 = fig.add_subplot(1, 3, 1)
sub1.set_title("loss")
sub1.plot(x, train_loss, label="train_loss")
sub1.plot(x, test_loss, label="test_loss")
plt.legend()
sub2 = fig.add_subplot(1, 3, 2)
sub2.set_title("acc")
sub2.plot(x, train_acc, label="train_acc")
sub2.plot(x, test_acc, label="test_acc")
plt.legend()
sub3 = fig.add_subplot(1, 3, 3)
sub3.set_title("lr")
sub3.plot(x, lrs, label="lr")
plt.title(savepath)
plt.legend()
# 保存图片
plt.savefig(savepath + 'learing.jpg')
plt.show()
def net_test():
# num_classes = 6
# num_classes = 7
# num_classes = 8
num_classes = 9
net = models.mobilenet_v2(pretrained=False, num_classes=num_classes, width_mult=1.0)
# # model_path = r"checkpoint/data_11_16/mobilenetv2/pre/555/mobilenetv2_1_my_acc=96.1749.pth" # crop 160=0.7486338797814208
# # model_path = r"checkpoint/data_11_16/mobilenetv2/pre/222/mobilenetv2_1_my_acc=92.3497.pth" # 160=0.5846994535519126
# # model_path = r"checkpoint/data_11_16/mobilenetv2/pre/666/mobilenetv2_1_my_acc=95.6284.pth" # 160=0.9562841530054644 224=0.7486338797814208
# # model_path = r"checkpoint/data_11_16/mobilenetv2/pre/777/mobilenetv2_1_my_acc=95.6284.pth" # 160=0.8142076502732241
# # model_path = r"checkpoint/data_11_16/mobilenetv2/pre/0/111/mobilenetv2_1_my_acc=93.4426.pth" # crop=0.9398907103825137 crop_160=0.907103825136612
#
# # model_path = r"checkpoint/data_12_23/mobilenetv2/222/mobilenetv2_1_12_23_acc=93.6898.pth"
# # model_path = r"checkpoint/data_12_23/mobilenetv2/333/mobilenetv2_1_12_23_acc=89.9061.pth"
# model_path = r"checkpoint/data_12_23/mobilenetv2/888/mobilenetv2_1_12_23_acc=91.6275.pth"
# model_path = r"checkpoint/data_12_23/mobilenetv2/0/222/mnext_1_12_23_acc=88.2629.pth" # mobilenetv2
# model_path = r"checkpoint/data_12_23/mobilenetv2/0/333/mobilenetv2_1_12_23_acc=84.8983.pth"
# model_path = r"checkpoint/data_12_23/mobilenetv2/crop/333/mobilenetv2_1_crop_acc=90.8059.pth"
# model_path = r"checkpoint/data_12_23/mobilenetv2/crop/444/mobilenetv2_1_crop_acc=90.9233.pth"
# model_path = r"checkpoint/data_3_25_all/mobilenetv2/111/mobilenetv2_224_acc=85.6154.pth"
model_path = r"checkpoint/data_3_25_all/mobilenetv2/class8/666/mobilenetv2_224_8_acc=90.5516.pth"
model_path = r"checkpoint/data_3_25_all/mobilenetv2/555/mobilenetv2_224_crop_acc=89.1415.pth"
model_path = r"checkpoint/data_3_25_all/mobilenetv2/111/mobilenetv2_224_acc=85.6154.pth"
# class6
# model_path = r"checkpoint/data_12_23/class6/mobilenetv2/222/mobilenetv2_1_c6_acc=95.1313.pth"
# class7
# model_path = r"checkpoint/data_12_23/class7/mobilenetv2/000/mobilenetv2_1_c7_acc=92.0188.pth"
# net = mnext(num_classes=num_classes, width_mult=1.)
# model_path = r"checkpoint/data_12_23/mnext/000/mnext_1_12_23_acc=92.1753.pth"
# 加载模型权重,忽略不同
model_dict = net.state_dict()
checkpoint = torch.load(model_path, map_location=device)
pretrained_dict = checkpoint["net"]
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
print("loaded model with acc:{}".format(checkpoint["acc"]))
net.to(device)
net.eval()
test_loss = 0
correct = 0
total = 0
test_transform = transforms.Compose([
transforms.Resize((224, 224)),
# transforms.Resize((160, 160)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# test_dataset = MyDataset("data/txt/116_testcrop224.txt", test_transform)
# test_dataset = MyDataset("data/txt/119_testcrop224.txt", test_transform)
# test_dataset = MyDataset("data/test224_11_16.txt", test_transform)
# test_dataset = MyDataset("data/test_116_119.txt", test_transform)
# test_dataset = MyDataset("data/train224_11_16.txt", test_transform)
# test_dataset = MyDataset("data/txt/12_23_1_test224.txt", test_transform)
# test_dataset = MyDataset("data/txt/12_23_2_test224.txt", test_transform)
# test_dataset = MyDataset("data/txt/12_23_12_test224.txt", test_transform)
# test_dataset = MyDataset("data/txt/12_23_12_addpre_test224.txt", test_transform)
# test_dataset = MyDataset("data/txt/12_23_12_addpre_test_crop224.txt", test_transform)
# test_dataset = MyDataset("data/txt_raw/total_test.txt", test_transform)
# class6
# test_dataset = MyDataset("data/txt6/12_23_12_addpre_test224_6.txt", test_transform)
# test_dataset = MyDataset("data/txt6/12_23_12_addpre_test224_addcrop_6.txt", test_transform)
# class7
# test_dataset = MyDataset("data/txt7/12_23_12_addpre_test224_7.txt", test_transform)
# test_dataset = MyDataset("data/txt7/12_23_12_addpre_test224_addcrop_7.txt", test_transform)
# class8
# all class8
# test_dataset = MyDataset("data/txt_raw/total_test_8.txt", test_transform)
# test_dataset = MyDataset("data/txt_raw_crop/total_test_crop_8.txt", test_transform)
# test_dataset = MyDataset("data/txt_raw_crop/total_test_crop.txt", test_transform)
test_dataset = MyDataset("data/txt_raw/total_test.txt", test_transform)
test_dataloader = DataLoader(dataset=test_dataset,
batch_size=64,
shuffle=True,
num_workers=0)
# Confusion_matrix
cm = np.zeros((num_classes, num_classes), dtype=np.int)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_dataloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
# print(targets, predicted)
for i in range(targets.shape[0]):
cm[targets[i]][predicted[i]] += 1
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
average_loss = test_loss / (batch_idx + 1)
test_acc = correct / total
progress_bar(batch_idx, len(test_dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (average_loss, 100. * test_acc, correct, total))
# print(average_loss, test_acc)
print("test_acc: ", test_acc)
if num_classes == 9:
labels = ["正常", "侧视", "喝水", "吸烟", "操作中控", "玩手机", "侧身拿东西", "整理仪容", "接电话"]
elif num_classes == 6:
labels = ["正常", "喝水", "吸烟", "操作中控", "玩手机", "接电话"]
elif num_classes == 7:
labels = ["正常", "喝水", "吸烟", "操作中控", "玩手机", "接电话", "其他"]
else:
labels = ["正常", "侧视", "喝水", "吸烟", "操作中控", "玩手机", "侧身拿东西", "接电话"]
print(labels)
print("row:target col:predict")
print(cm)
true_label = np.zeros((num_classes, ), dtype=np.int)
predicted_label = np.zeros((num_classes,), dtype=np.int)
total = 0
for i in range(num_classes):
for j in range(num_classes):
true_label[i] += cm[i][j]
predicted_label[i] += cm[j][i]
total += cm[i][j]
print("true label:", true_label)
print("predicted label:", predicted_label)
TP = np.zeros((num_classes, ), dtype=np.int)
FP = np.zeros((num_classes,), dtype=np.int)
FN = np.zeros((num_classes,), dtype=np.int)
TN = np.zeros((num_classes,), dtype=np.int)
Accuracy = np.zeros((num_classes,), dtype=np.float)
Precision = np.zeros((num_classes,), dtype=np.float)
Recall = np.zeros((num_classes,), dtype=np.float)
F1 = np.zeros((num_classes,), dtype=np.float)
for i in range(num_classes):
TP[i] = cm[i][i]
FP[i] = true_label[i] - TP[i]
FN[i] = predicted_label[i] - TP[i]
TN[i] = total - true_label[i] - FN[i]
Accuracy[i] = (TP[i]+TN[i])/total
Precision[i] = TP[i]/predicted_label[i]
Recall[i] = TP[i]/true_label[i]
F1[i] = Precision[i]*Recall[i]/(Precision[i]+Recall[i])*2
print("TP:", TP)
print("TN:", TN)
print("FP:", FP)
print("FN:", FN)
print("Accuracy:", Accuracy)
print("Precision:", Precision)
print("Recall:", Recall)
print("F1:", F1)
dict = {}
dict["准确率"] = Accuracy.tolist() # 样本被分类正确的概率, 包括TP和TF
dict["精确率"] = Precision.tolist() # 样本识别正确的概率,
dict["召回率"] = Recall.tolist() # 样本被正确识别出的概率,检出率
dict["F1-score"] = F1.tolist()
test_path = os.path.dirname(model_path)
with open(os.path.join(test_path, "test.json"), "w", encoding='utf-8') as f:
json.dump(dict, f)
# 保存excel
df = pd.DataFrame(dict, index=labels)
df.to_excel(os.path.join(test_path, 'test.xlsx'))
# 配置环境变量
# cd D:\Program Files (x86)\Intel\openvino_2020.3.341\bin
# setupvars.bat
# cd D:\code\EfficientNet-PyTorch-master
def net_test_onnx():
# Load dataset
dataset_path = r"data/txt/12_23_12_addpre_test224.txt"
with open(dataset_path) as f:
datasets = [c.strip() for c in f.readlines()]
path = r"checkpoint/data_12_23/mobilenetv2/888/mobilenetv2_1_12_23_acc=91.6275.onnx"
# num_classes = 9
num_classes = 6
onnx_session = onnxruntime.InferenceSession(path, None)
dnn_net = cv2.dnn.readNetFromONNX(path)
# xml_path = r"checkpoint/data_12_23/mobilenetv2/888/mobilenetv2_1_12_23_acc=91.6275.xml"
# bin_path = r"checkpoint/data_12_23/mobilenetv2/888/mobilenetv2_1_12_23_acc=91.6275.bin"
# # # FP16
# # xml_path = r"checkpoint/data_12_23/mobilenetv2/8888/mobilenetv2_1_12_23_acc=91.6275.xml"
# # bin_path = r"checkpoint/data_12_23/mobilenetv2/8888/mobilenetv2_1_12_23_acc=91.6275.bin"
# dnn_net = cv2.dnn.readNet(xml_path, bin_path)
# dnn_net = cv2.dnn.readNetFromModelOptimizer(xml_path, bin_path)
# dnn_net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
# dnn_net.setPreferableTarget(cv2.dnn.DNN_BACKEND_HALIDE)
# dnn_net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL)
model = models.mobilenet_v2(pretrained=False, num_classes=num_classes, width_mult=1.0)
# 加载模型参数
path = r"checkpoint/data_12_23/mobilenetv2/888/mobilenetv2_1_12_23_acc=91.6275.pth"
checkpoint = torch.load(path)
model.load_state_dict(checkpoint["net"])
model.cpu()
model.eval()
total = 0
right = 0
dnn_right = 0
model_right = 0
for data in datasets:
img_path = data.split(" ")[0]
label = int(data.split(" ")[1])
src = cv2.imread(img_path)
# print(src.shape) # height,weight,channel
src2 = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
image = cv2.resize(src2, (224, 224))
# print(image.shape)
image = np.float32(image) / 255.0
image[:, :, ] -= (np.float32(0.485), np.float32(0.456), np.float32(0.406))
image[:, :, ] /= (np.float32(0.229), np.float32(0.224), np.float32(0.225))
blob = cv2.dnn.blobFromImage(image, 1.0, (224, 224), (0, 0, 0), False)
dnn_net.setInput(blob)
start = time.time()
dnn_probs = dnn_net.forward()
print("dnn inference time:", time.time() - start)
dnn_index = np.argmax(dnn_probs) #By default, the index is into the flattened array, otherwise along the specified axis.
# dnn_softmax = softmax_np(dnn_probs)
# print(dnn_index, dnn_softmax.max())
# print(image.shape)
image = image.transpose(2, 0, 1) # 转换轴,pytorch为channel first
image = image.reshape(1, 3, 224, 224) # barch,channel,height,weight
# image = []
# image.append(image)
# image = np.asarray(image)
inputs = {onnx_session.get_inputs()[0].name: image}
probs = onnx_session.run(None, inputs)
probs = np.array(probs)
# print(probs.shape)
index = np.argmax(probs)
print(index)
# softmax = softmax_np(probs)
softmax = softmax_flatten(probs)
print(index, softmax.max())
model_image = torch.from_numpy(image)
output = model(model_image)
model_index = np.argmax(output.detach().numpy())
# print("dnn_probs:{},probs:{},output:{}".format(dnn_probs, probs, output))
# print("dnn_index:{},index:{},model_index:{},label:{}".format(dnn_index, index, model_index, label))
total += 1
if index == label:
right += 1
if dnn_index == label:
dnn_right += 1
if model_index == label:
model_right += 1
print("acc:{},dnn_acc:{},model_acc :{}".format(right / total, dnn_right / total, model_right / total))
if __name__ == '__main__':
# 测试
# test_efficientnet()
# 训练
# for epoch in range(start_epoch, start_epoch + 48):
# train(epoch)
# val(epoch)
main()
# net_test()
# net_test_onnx()
# 展示预测结果
# visualize_pred()
| 46.697019
| 195
| 0.676993
| 12,227
| 86,156
| 4.559336
| 0.059459
| 0.061671
| 0.054855
| 0.035482
| 0.818878
| 0.786482
| 0.752722
| 0.70994
| 0.665883
| 0.619423
| 0
| 0.094608
| 0.18759
| 86,156
| 1,844
| 196
| 46.722343
| 0.701846
| 0.727622
| 0
| 0.361538
| 0
| 0
| 0.064221
| 0.034514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.061538
| 0.001923
| 0.173077
| 0.057692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0d1afb099f92e05854e9dce4d4aad6216cde7b00
| 145
|
py
|
Python
|
testdata/simplerepo/a/__init__.py
|
sourcegraph/python-deps
|
a7f1b28cc53bfdc3c71f70d0c0f3ae759e68c6f3
|
[
"BSD-2-Clause"
] | 1
|
2018-06-22T10:13:13.000Z
|
2018-06-22T10:13:13.000Z
|
testdata/simplerepo/a/__init__.py
|
sourcegraph/python-deps
|
a7f1b28cc53bfdc3c71f70d0c0f3ae759e68c6f3
|
[
"BSD-2-Clause"
] | null | null | null |
testdata/simplerepo/a/__init__.py
|
sourcegraph/python-deps
|
a7f1b28cc53bfdc3c71f70d0c0f3ae759e68c6f3
|
[
"BSD-2-Clause"
] | 4
|
2015-04-19T15:59:00.000Z
|
2020-12-18T11:25:41.000Z
|
from .a import oauth
from ...a.b.c import oauth
from .. import foo
from . import a
from .b import *
import a.b as b_alias
import a.b
import json
| 16.111111
| 26
| 0.717241
| 30
| 145
| 3.433333
| 0.333333
| 0.058252
| 0.291262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193103
| 145
| 8
| 27
| 18.125
| 0.880342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b4b48ce75303a204ed2e775623be1ef6d1ac67fa
| 418
|
py
|
Python
|
polymers/poly_hgraph/__init__.py
|
Amir-Mehrpanah/hgraph2graph
|
6d37153afe09f7684381ce56e8366675e22833e9
|
[
"MIT"
] | 182
|
2019-11-15T15:59:31.000Z
|
2022-03-31T09:17:40.000Z
|
polymers/poly_hgraph/__init__.py
|
Amir-Mehrpanah/hgraph2graph
|
6d37153afe09f7684381ce56e8366675e22833e9
|
[
"MIT"
] | 30
|
2020-03-03T16:35:52.000Z
|
2021-12-16T04:06:57.000Z
|
polymers/poly_hgraph/__init__.py
|
Amir-Mehrpanah/hgraph2graph
|
6d37153afe09f7684381ce56e8366675e22833e9
|
[
"MIT"
] | 60
|
2019-11-15T05:06:11.000Z
|
2022-03-31T16:43:12.000Z
|
from poly_hgraph.mol_graph import MolGraph
from poly_hgraph.encoder import HierMPNEncoder
from poly_hgraph.decoder import HierMPNDecoder
from poly_hgraph.vocab import Vocab, PairVocab, common_atom_vocab
from poly_hgraph.hgnn import HierVAE, HierVGNN, HierCondVGNN
from poly_hgraph.dataset import MoleculeDataset, MolPairDataset, DataFolder, MolEnumRootDataset
from poly_hgraph.chemutils import find_fragments, get_mol
| 52.25
| 95
| 0.87799
| 55
| 418
| 6.454545
| 0.509091
| 0.157746
| 0.276056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086124
| 418
| 7
| 96
| 59.714286
| 0.929319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b4d5fb03e2c073ff2c0dd1b22febb3c7d32c9267
| 184
|
py
|
Python
|
repos/system_upgrade/el8toel9/actors/scancryptopolicies/tests/component_test_scancryptopolicies.py
|
AsM0DeUz/leapp-repository
|
b67a395ee3d67d3d628037c250a210bb52e9187c
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el8toel9/actors/scancryptopolicies/tests/component_test_scancryptopolicies.py
|
AsM0DeUz/leapp-repository
|
b67a395ee3d67d3d628037c250a210bb52e9187c
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el8toel9/actors/scancryptopolicies/tests/component_test_scancryptopolicies.py
|
AsM0DeUz/leapp-repository
|
b67a395ee3d67d3d628037c250a210bb52e9187c
|
[
"Apache-2.0"
] | null | null | null |
from leapp.models import CryptoPolicyInfo
def test_actor_execution(current_actor_context):
current_actor_context.run()
assert current_actor_context.consume(CryptoPolicyInfo)
| 26.285714
| 58
| 0.842391
| 22
| 184
| 6.681818
| 0.636364
| 0.244898
| 0.387755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103261
| 184
| 6
| 59
| 30.666667
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
370d98051ba250a43738f9c0a5c1ad62333f0942
| 58
|
py
|
Python
|
mlalgo/tree/__init__.py
|
zeyad-kay/mlalgo
|
72ac1cb10bc81cb9db50d3511019373a8c4f50ad
|
[
"MIT"
] | null | null | null |
mlalgo/tree/__init__.py
|
zeyad-kay/mlalgo
|
72ac1cb10bc81cb9db50d3511019373a8c4f50ad
|
[
"MIT"
] | null | null | null |
mlalgo/tree/__init__.py
|
zeyad-kay/mlalgo
|
72ac1cb10bc81cb9db50d3511019373a8c4f50ad
|
[
"MIT"
] | null | null | null |
from .DecisionTreeClassifier import DecisionTreeClassifier
| 58
| 58
| 0.931034
| 4
| 58
| 13.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 58
| 1
| 58
| 58
| 0.981818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2ebd4205414ee5ea2a277d78b41329c8660fc1fa
| 187
|
py
|
Python
|
src/MicEMD/__init__.py
|
UndergroundDetection/MICEMD
|
5e55f323c6464d93e56554f8b6cca2d0b7724b23
|
[
"MIT"
] | 4
|
2020-09-17T02:44:20.000Z
|
2022-03-15T06:30:52.000Z
|
src/MicEMD/__init__.py
|
UndergroundDetection/MicEMD
|
5e55f323c6464d93e56554f8b6cca2d0b7724b23
|
[
"MIT"
] | null | null | null |
src/MicEMD/__init__.py
|
UndergroundDetection/MicEMD
|
5e55f323c6464d93e56554f8b6cca2d0b7724b23
|
[
"MIT"
] | 2
|
2020-08-28T02:30:03.000Z
|
2020-08-28T14:11:47.000Z
|
from .optimization import *
from .classification import *
from .fdem import *
from .handler import *
from .tdem import *
from .preprocessor import *
from .utils import *
__all__ = dir()
| 18.7
| 29
| 0.737968
| 23
| 187
| 5.826087
| 0.478261
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171123
| 187
| 9
| 30
| 20.777778
| 0.864516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.875
| 0
| 0.875
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2c227eccbe41103377a8b45d71877d0301c9ed57
| 66
|
py
|
Python
|
IcisNet Msges/test.py
|
pitGitHub2016/Phonograph-Lazyvan-AlgorAI
|
bc6587b0e430611c690b01ae430a5d00209c3169
|
[
"MIT"
] | null | null | null |
IcisNet Msges/test.py
|
pitGitHub2016/Phonograph-Lazyvan-AlgorAI
|
bc6587b0e430611c690b01ae430a5d00209c3169
|
[
"MIT"
] | null | null | null |
IcisNet Msges/test.py
|
pitGitHub2016/Phonograph-Lazyvan-AlgorAI
|
bc6587b0e430611c690b01ae430a5d00209c3169
|
[
"MIT"
] | null | null | null |
import datetime
print(datetime.datetime.now().strftime("%Y%m%d"))
| 22
| 49
| 0.742424
| 10
| 66
| 4.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 66
| 3
| 49
| 22
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.089552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
2c319bc589b66a49c1501732c41ecfa02befdffc
| 27,019
|
py
|
Python
|
src/HttpxLibrary/SessionKeywords.py
|
bli74/robotframework-httpx
|
0f1010c20a721b6ccdff024210f7d02d037beaab
|
[
"MIT"
] | 1
|
2021-08-03T06:28:20.000Z
|
2021-08-03T06:28:20.000Z
|
src/HttpxLibrary/SessionKeywords.py
|
bli74/robotframework-httpx
|
0f1010c20a721b6ccdff024210f7d02d037beaab
|
[
"MIT"
] | null | null | null |
src/HttpxLibrary/SessionKeywords.py
|
bli74/robotframework-httpx
|
0f1010c20a721b6ccdff024210f7d02d037beaab
|
[
"MIT"
] | null | null | null |
import logging
import sys
import httpx
from httpx import Client, HTTPTransport, Response
# noinspection PyProtectedMember
from httpx._config import DEFAULT_LIMITS, DEFAULT_MAX_REDIRECTS, DEFAULT_TIMEOUT_CONFIG
from robot.api import logger
from robot.api.deco import keyword
from robot.utils.asserts import assert_equal
from HttpxLibrary import utils, log
from HttpxLibrary.compat import httplib
from HttpxLibrary.exceptions import InvalidResponse, InvalidExpectedStatus
from HttpxLibrary.utils import is_file_descriptor, is_string_type
from .HttpxKeywords import HttpxKeywords
try:
# noinspection PyUnresolvedReferences
from httpx_ntlm import HttpNtlmAuth
except ImportError:
pass
class SessionKeywords(HttpxKeywords):
DEFAULT_RETRIES = 3
def _create_session(
self,
alias,
url,
*,
# optional named args
auth=None,
cert=None,
cookies=None,
debug=0,
disable_warnings=0,
headers=None,
http1=True,
http2=False,
limits=DEFAULT_LIMITS,
max_redirects=DEFAULT_MAX_REDIRECTS,
params=None,
retries=DEFAULT_RETRIES,
timeout=DEFAULT_TIMEOUT_CONFIG,
verify=False
) -> httpx.Client:
if params is None:
params = {}
if headers is None:
headers = {}
if cookies is None:
cookies = {}
if isinstance(cert, list):
cert=tuple(cert)
logger.info('Create Session parameters:\n'
f'- alias={alias}\n'
f'- url={url}\n'
f'- auth={auth}\n'
f'- cert={cert}\n'
f'- cookies={cookies}\n'
f'- headers={headers}\n'
f'- http1={http1}\n'
f'- http2={http2}\n'
f'- limits={limits}\n'
f'- max_redirects={max_redirects}\n'
f'- params={params}\n'
f'- retries={retries}\n'
f'- timeout={timeout}\n'
f'- verify={verify}\n'
)
transport = None
# Retries parameter not supported directly by Client()
if retries is not None and retries > 0:
transport = HTTPTransport(
verify=verify,
cert=cert,
http1=http1,
http2=http2,
limits=limits,
retries=retries
)
s = session = Client(
auth=auth,
params=params,
headers=headers,
cookies=cookies,
verify=verify,
cert=cert,
http1=http1,
http2=http2,
timeout=timeout,
limits=limits,
max_redirects=max_redirects,
transport=transport
)
# Disable requests warnings, useful when you have large number of testcase
# you will observe drastical changes in Robot log.html and output.xml files size
if disable_warnings:
# you need to initialize logging, otherwise you will not see anything from requests
logging.basicConfig()
logging.getLogger().setLevel(logging.ERROR)
httpx_log = logging.getLogger("httpx")
httpx_log.setLevel(logging.ERROR)
httpx_log.propagate = True
s.url = url
# Enable http verbosity
if int(debug) >= 1:
self.debug = int(debug)
httplib.HTTPConnection.debuglevel = self.debug
self._cache.register(session, alias=alias)
return session
@keyword("Create Session")
def create_session(
self,
alias,
url,
*,
# optional named args
auth=None,
cert=None,
cookies=None,
debug=0,
disable_warnings=0,
headers=None,
http1=True,
http2=False,
limits=DEFAULT_LIMITS,
max_redirects=DEFAULT_MAX_REDIRECTS,
params=None,
retries=DEFAULT_RETRIES,
timeout=DEFAULT_TIMEOUT_CONFIG,
verify=False
):
""" Create Session: create a HTTP session to a server
``alias`` Robot Framework alias to identify the session
``url`` Base url of the server
``auth`` Username and password pair or None for Basic Authentication
to use when sending requests.
See httpx.BasicAuth()
``cert`` An SSL certificate used by the requested host to authenticate the client.
Either a path to an SSL certificate file, or two-tuple of (certificate file,
key file), or a three-tuple of (certificate file, key file, password).
See httpx.Client()
``cookies`` Dictionary of Cookie items to include when sending requests.
See httpx.Client()
``debug`` Enable http verbosity option more information
https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel
``disable_warnings`` Disable httpx warning useful when you have large number of testcases
``headers`` Dictionary of HTTP headers to include when sending requests.
See httpx.Client()
``http1`` Switch to enable/disable HTTP/1.1 support
See httpx.Client()
``http2`` Switch to enable/disable HTTP/2 support
See httpx.Client()
``limits`` The limits configuration to use.
See httpx.Client()
``max_redirects`` The maximum number of redirect responses that should be followed.
See httpx.Client()
``params`` Query parameters to include in request URLs, as
a string, dictionary, or sequence of two-tuples.
See httpx.Client()
``retries`` Number of maximum retries each connection should attempt.
By default it will retry 3 times in case of connection errors only.
A 0 value will disable any kind of retries regardless of other retry settings.
In case the number of retries is reached a retry exception is raised.
``timeout`` The timeout configuration to use when sending requests.
See httpx.Client()
``verify`` SSL certificates (a.k.a CA bundle) used to verify the identity
of requested hosts. Either `True` (default CA bundle),
a path to an SSL certificate file, or `False` (disable verification).
See httpx.Client()
"""
if params is None:
params = {}
if headers is None:
headers = {}
if cookies is None:
cookies = {}
if auth is not None:
auth = httpx.BasicAuth(*auth)
logger.info('Create Session with Basic Authentication')
return self._create_session(
alias,
url,
auth=auth,
params=params,
headers=headers,
cookies=cookies,
verify=verify,
cert=cert,
http1=http1,
http2=http2,
timeout=timeout,
limits=limits,
max_redirects=max_redirects,
debug=debug,
disable_warnings=disable_warnings,
retries=retries
)
@keyword("Create HTTP2 Session")
def create_http2_session(
self,
alias,
url,
*,
# optional named args
auth=None,
cert=None,
cookies=None,
debug=0,
disable_warnings=0,
headers=None,
limits=DEFAULT_LIMITS,
max_redirects=DEFAULT_MAX_REDIRECTS,
params=None,
retries=DEFAULT_RETRIES,
timeout=DEFAULT_TIMEOUT_CONFIG,
verify=False):
""" Create Session: create a HTTP/2 only session to a server
``alias`` Robot Framework alias to identify the session
``url`` Base url of the server
``auth`` Username and password pair or None for Basic Authentication
to use when sending requests.
See httpx.BasicAuth()
``cert`` An SSL certificate used by the requested host to authenticate the client.
Either a path to an SSL certificate file, or two-tuple of (certificate file,
key file), or a three-tuple of (certificate file, key file, password).
See httpx.Client()
``cookies`` Dictionary of Cookie items to include when sending requests.
See httpx.Client()
``debug`` Enable http verbosity option more information
https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel
``disable_warnings`` Disable httpx warning useful when you have large number of testcases
``headers`` Dictionary of HTTP headers to include when sending requests.
See httpx.Client()
``limits`` The limits configuration to use.
See httpx.Client()
``max_redirects`` The maximum number of redirect responses that should be followed.
See httpx.Client()
``params`` Query parameters to include in request URLs, as
a string, dictionary, or sequence of two-tuples.
See httpx.Client()
``retries`` Number of maximum retries each connection should attempt.
By default it will retry 3 times in case of connection errors only.
A 0 value will disable any kind of retries regardless of other retry settings.
In case the number of retries is reached a retry exception is raised.
``timeout`` The timeout configuration to use when sending requests.
See httpx.Client()
``verify`` SSL certificates (a.k.a CA bundle) used to verify the identity
of requested hosts. Either `True` (default CA bundle),
a path to an SSL certificate file, or `False` (disable verification).
See httpx.Client()
"""
if cookies is None:
cookies = {}
if headers is None:
headers = {}
if params is None:
params = {}
if auth is not None:
auth = httpx.BasicAuth(*auth)
logger.info('Create Session with Basic Authentication')
return self._create_session(
alias,
url,
auth=auth,
params=params,
headers=headers,
cookies=cookies,
verify=verify,
cert=cert,
http1=False,
http2=True,
timeout=timeout,
limits=limits,
max_redirects=max_redirects,
debug=debug,
disable_warnings=disable_warnings,
retries=retries
)
@keyword("Create Custom Session")
def create_custom_session(
self,
alias,
url,
*,
# optional named args
auth=None,
cert=None,
cookies=None,
debug=0,
disable_warnings=0,
headers=None,
http1=True,
http2=False,
limits=DEFAULT_LIMITS,
max_redirects=DEFAULT_MAX_REDIRECTS,
params=None,
retries=DEFAULT_RETRIES,
timeout=DEFAULT_TIMEOUT_CONFIG,
verify=False):
""" Create Session: create a HTTP session to a server
``alias`` Robot Framework alias to identify the session
``url`` Base url of the server
``auth`` A Custom Authentication object to be passed on to the requests library.
http://docs.python-requests.org/en/master/user/advanced/#custom-authentication
``cert`` An SSL certificate used by the requested host to authenticate the client.
Either a path to an SSL certificate file, or two-tuple of (certificate file,
key file), or a three-tuple of (certificate file, key file, password).
See httpx.Client()
``cookies`` Dictionary of Cookie items to include when sending requests.
See httpx.Client()
``debug`` Enable http verbosity option more information
https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel
``disable_warnings`` Disable httpx warning useful when you have large number of testcases
``headers`` Dictionary of HTTP headers to include when sending requests.
See httpx.Client()
``http1`` Switch to enable/disable HTTP/1.1 support
See httpx.Client()
``http2`` Switch to enable/disable HTTP/2 support
See httpx.Client()
``limits`` The limits configuration to use.
See httpx.Client()
``max_redirects`` The maximum number of redirect responses that should be followed.
See httpx.Client()
``params`` Query parameters to include in request URLs, as
a string, dictionary, or sequence of two-tuples.
See httpx.Client()
``retries`` Number of maximum retries each connection should attempt.
By default it will retry 3 times in case of connection errors only.
A 0 value will disable any kind of retries regardless of other retry settings.
In case the number of retries is reached a retry exception is raised.
``timeout`` The timeout configuration to use when sending requests.
See httpx.Client()
``verify`` SSL certificates (a.k.a CA bundle) used to verify the identity
of requested hosts. Either `True` (default CA bundle),
a path to an SSL certificate file, or `False` (disable verification).
See httpx.Client()
"""
if cookies is None:
cookies = {}
if headers is None:
headers = {}
if params is None:
params = {}
logger.info('Creating Custom Authenticated Session')
return self._create_session(
alias=alias,
url=url,
auth=auth,
params=params,
headers=headers,
cookies=cookies,
verify=verify,
cert=cert,
http1=http1,
http2=http2,
timeout=timeout,
limits=limits,
max_redirects=max_redirects,
debug=debug,
disable_warnings=disable_warnings,
retries=retries)
@keyword("Create Digest Session")
def create_digest_session(
self,
alias,
url,
*,
# optional named args
auth=None,
cert=None,
cookies=None,
debug=0,
disable_warnings=0,
headers=None,
http1=True,
http2=False,
limits=DEFAULT_LIMITS,
max_redirects=DEFAULT_MAX_REDIRECTS,
params=None,
retries=DEFAULT_RETRIES,
timeout=DEFAULT_TIMEOUT_CONFIG,
verify=False):
""" Create Session: create a HTTP session to a server
``alias`` Robot Framework alias to identify the session
``url`` Base url of the server
``auth`` Username and password pair or None for Digest Authentication
to use when sending requests.
See httpx.DigestAuth()
``cert`` An SSL certificate used by the requested host to authenticate the client.
Either a path to an SSL certificate file, or two-tuple of (certificate file,
key file), or a three-tuple of (certificate file, key file, password).
See httpx.Client()
``cookies`` Dictionary of Cookie items to include when sending requests.
See httpx.Client()
``debug`` Enable http verbosity option more information
https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel
``disable_warnings`` Disable httpx warning useful when you have large number of testcases
``headers`` Dictionary of HTTP headers to include when sending requests.
See httpx.Client()
``http1`` Switch to enable/disable HTTP/1.1 support
See httpx.Client()
``http2`` Switch to enable/disable HTTP/2 support
See httpx.Client()
``limits`` The limits configuration to use.
See httpx.Client()
``max_redirects`` The maximum number of redirect responses that should be followed.
See httpx.Client()
``params`` Query parameters to include in request URLs, as
a string, dictionary, or sequence of two-tuples.
See httpx.Client()
``retries`` Number of maximum retries each connection should attempt.
By default it will retry 3 times in case of connection errors only.
A 0 value will disable any kind of retries regardless of other retry settings.
In case the number of retries is reached a retry exception is raised.
``timeout`` The timeout configuration to use when sending requests.
See httpx.Client()
``verify`` SSL certificates (a.k.a CA bundle) used to verify the identity
of requested hosts. Either `True` (default CA bundle),
a path to an SSL certificate file, or `False` (disable verification).
See httpx.Client()
"""
if cookies is None:
cookies = {}
if headers is None:
headers = {}
if params is None:
params = {}
if auth is not None:
auth = httpx.DigestAuth(*auth)
logger.info('Creating Session with Digest Authentication')
return self._create_session(
alias=alias,
url=url,
auth=auth,
params=params,
headers=headers,
cookies=cookies,
verify=verify,
cert=cert,
http1=http1,
http2=http2,
timeout=timeout,
limits=limits,
max_redirects=max_redirects,
debug=debug,
disable_warnings=disable_warnings,
retries=retries)
@keyword("Create Ntlm Session")
def create_ntlm_session(
self,
alias,
url,
*,
# optional named args
auth=None,
cert=None,
cookies=None,
debug=0,
disable_warnings=0,
headers=None,
http1=True,
http2=False,
limits=DEFAULT_LIMITS,
max_redirects=DEFAULT_MAX_REDIRECTS,
params=None,
retries=DEFAULT_RETRIES,
timeout=DEFAULT_TIMEOUT_CONFIG,
verify=False):
""" Create Session: create a HTTP session to a server
``alias`` Robot Framework alias to identify the session
``url`` Base url of the server
``auth`` ['DOMAIN', 'username', 'password'] for NTLM Authentication.
See httpx_ntlm.HttpNtlmAuth()
``cert`` An SSL certificate used by the requested host to authenticate the client.
Either a path to an SSL certificate file, or two-tuple of (certificate file,
key file), or a three-tuple of (certificate file, key file, password).
See httpx.Client()
``cookies`` Dictionary of Cookie items to include when sending requests.
See httpx.Client()
``debug`` Enable http verbosity option more information
https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel
``disable_warnings`` Disable httpx warning useful when you have large number of testcases
``headers`` Dictionary of HTTP headers to include when sending requests.
See httpx.Client()
``http1`` Switch to enable/disable HTTP/1.1 support
See httpx.Client()
``http2`` Switch to enable/disable HTTP/2 support
See httpx.Client()
``limits`` The limits configuration to use.
See httpx.Client()
``max_redirects`` The maximum number of redirect responses that should be followed.
See httpx.Client()
``params`` Query parameters to include in request URLs, as
a string, dictionary, or sequence of two-tuples.
See httpx.Client()
``retries`` Number of maximum retries each connection should attempt.
By default it will retry 3 times in case of connection errors only.
A 0 value will disable any kind of retries regardless of other retry settings.
In case the number of retries is reached a retry exception is raised.
``timeout`` The timeout configuration to use when sending requests.
See httpx.Client()
``verify`` SSL certificates (a.k.a CA bundle) used to verify the identity
of requested hosts. Either `True` (default CA bundle),
a path to an SSL certificate file, or `False` (disable verification).
See httpx.Client()
"""
if cookies is None:
cookies = {}
if headers is None:
headers = {}
if params is None:
params = {}
try:
HttpNtlmAuth
except NameError:
raise AssertionError('httpx-ntlm module not installed')
if len(auth) != 3:
raise AssertionError('Incorrect number of authentication arguments'
' - expected 3, got {}'.format(len(auth)))
else:
auth = HttpNtlmAuth('{}\\{}'.format(auth[0], auth[1]),
auth[2])
logger.info('Creating NTLM Session')
return self._create_session(
alias=alias,
url=url,
auth=auth,
params=params,
headers=headers,
cookies=cookies,
verify=verify,
cert=cert,
http1=http1,
http2=http2,
timeout=timeout,
limits=limits,
max_redirects=max_redirects,
debug=debug,
disable_warnings=disable_warnings,
retries=retries)
@keyword("Session Exists")
def session_exists(self, alias):
"""Return True if the session has been already created
``alias`` that has been used to identify the Session object in the cache
"""
try:
self._cache[alias]
return True
except RuntimeError:
return False
@keyword("Delete All Sessions")
def delete_all_sessions(self):
""" Removes all the session objects """
logger.info('Delete All Sessions')
self._cache.empty_cache()
# TODO this is not covered by any tests
@keyword("Update Session")
def update_session(self, alias, headers=None, cookies=None):
"""Update Session Headers: update a HTTP Session Headers
``alias`` Robot Framework alias to identify the session
``headers`` Dictionary of headers merge into session
"""
session = self._cache.switch(alias)
if headers is not None:
session.headers.update(headers)
if cookies is not None:
session.cookies.update(cookies)
def _common_request(
self,
method,
session,
uri,
**kwargs):
method_function = getattr(session, method)
self._capture_output()
# if method = get atch the api in _api from httpx
resp = method_function(
self._get_url(session, uri),
**kwargs)
log.log_request(resp)
self._print_debug()
session.last_resp = resp
log.log_response(resp)
data = kwargs.get('data', None)
# epkcfsm remove this if request was a get
if method == "get":
if is_file_descriptor(data):
data.close()
return resp
@staticmethod
def _check_status(expected_status, resp, msg=None):
"""
Helper method to check HTTP status
"""
if not isinstance(resp, Response):
raise InvalidResponse(resp)
if expected_status is None:
resp.raise_for_status()
else:
if not is_string_type(expected_status):
raise InvalidExpectedStatus(expected_status)
if expected_status.lower() in ['any', 'anything']:
return
try:
expected_status = int(expected_status)
except ValueError:
expected_status = utils.parse_named_status(expected_status)
msg = '' if msg is None else '{} '.format(msg)
msg = "{}Url: {} Expected status".format(msg, resp.url)
assert_equal(resp.status_code, expected_status, msg)
@staticmethod
def _get_url(session, uri):
"""
Helper method to get the full url
"""
url = session.url
if uri:
slash = '' if uri.startswith('/') else '/'
url = "%s%s%s" % (session.url, slash, uri)
return url
# FIXME might be broken we need a test for this
@staticmethod
def _get_timeout(timeout):
return float(timeout) if timeout is not None else DEFAULT_TIMEOUT_CONFIG
def _capture_output(self):
if self.debug >= 1:
self.http_log = utils.WritableObject()
sys.stdout = self.http_log
def _print_debug(self):
if self.debug >= 1:
sys.stdout = sys.__stdout__ # Restore stdout
debug_info = ''.join(
self.http_log.content).replace(
'\\r',
'').replace(
'\'',
'')
# Remove empty lines
debug_info = "\n".join(
[ll.rstrip() for ll in debug_info.splitlines() if ll.strip()])
logger.debug(debug_info)
| 35.458005
| 102
| 0.558718
| 2,903
| 27,019
| 5.133999
| 0.105408
| 0.027912
| 0.045089
| 0.02657
| 0.725845
| 0.719941
| 0.719002
| 0.716989
| 0.708266
| 0.708266
| 0
| 0.005856
| 0.367963
| 27,019
| 761
| 103
| 35.504599
| 0.866897
| 0.443244
| 0
| 0.624402
| 0
| 0
| 0.060016
| 0.002311
| 0
| 0
| 0
| 0.002628
| 0.009569
| 1
| 0.035885
| false
| 0.002392
| 0.035885
| 0.002392
| 0.105263
| 0.004785
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
259194ba9db51ccd548c747207ef364d3e67b6e3
| 943
|
py
|
Python
|
src/pandas_profiling_study/report/presentation/flavours/html/__init__.py
|
lucasiscoviciMoon/pandas-profiling-study
|
142d3b0f5e3139cdb531819f637a407682fa5684
|
[
"MIT"
] | null | null | null |
src/pandas_profiling_study/report/presentation/flavours/html/__init__.py
|
lucasiscoviciMoon/pandas-profiling-study
|
142d3b0f5e3139cdb531819f637a407682fa5684
|
[
"MIT"
] | null | null | null |
src/pandas_profiling_study/report/presentation/flavours/html/__init__.py
|
lucasiscoviciMoon/pandas-profiling-study
|
142d3b0f5e3139cdb531819f637a407682fa5684
|
[
"MIT"
] | 1
|
2020-04-25T15:20:39.000Z
|
2020-04-25T15:20:39.000Z
|
from .....report.presentation.flavours.html.sequence import HTMLSequence
from .....report.presentation.flavours.html.table import HTMLTable
from .....report.presentation.flavours.html.variable import HTMLVariable
from .....report.presentation.flavours.html.image import HTMLImage
from .....report.presentation.flavours.html.frequency_table import (
HTMLFrequencyTable,
)
from .....report.presentation.flavours.html.frequency_table_small import (
HTMLFrequencyTableSmall,
)
from .....report.presentation.flavours.html.variable_info import (
HTMLVariableInfo,
)
from .....report.presentation.flavours.html.html import HTMLHTML
from .....report.presentation.flavours.html.sample import HTMLSample
from .....report.presentation.flavours.html.toggle_button import (
HTMLToggleButton,
)
from .....report.presentation.flavours.html.warnings import HTMLWarnings
from .....report.presentation.flavours.html.collapse import HTMLCollapse
| 44.904762
| 74
| 0.796394
| 101
| 943
| 7.386139
| 0.29703
| 0.160858
| 0.353887
| 0.482574
| 0.605898
| 0.241287
| 0.128686
| 0
| 0
| 0
| 0
| 0
| 0.076352
| 943
| 20
| 75
| 47.15
| 0.856487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
25b987bcc20499d56a31f32942adb6428e389348
| 84
|
py
|
Python
|
q/__init__.py
|
rymurr/q
|
af44753108d2c569d520b6c1ef719a4e0b616f3e
|
[
"MIT"
] | null | null | null |
q/__init__.py
|
rymurr/q
|
af44753108d2c569d520b6c1ef719a4e0b616f3e
|
[
"MIT"
] | null | null | null |
q/__init__.py
|
rymurr/q
|
af44753108d2c569d520b6c1ef719a4e0b616f3e
|
[
"MIT"
] | null | null | null |
from parser import parse
from unparser import format_bits
from conn import connect
| 21
| 33
| 0.845238
| 13
| 84
| 5.384615
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154762
| 84
| 3
| 34
| 28
| 0.985915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
25d5a186abb2a04662a3c3917256e4b24a395385
| 126
|
py
|
Python
|
tests/conftest.py
|
pritul95/pycicd-probable-octo-palm-tree
|
9c550d75155b4db5d3dbba19ff0647207fe1bd56
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
pritul95/pycicd-probable-octo-palm-tree
|
9c550d75155b4db5d3dbba19ff0647207fe1bd56
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
pritul95/pycicd-probable-octo-palm-tree
|
9c550d75155b4db5d3dbba19ff0647207fe1bd56
|
[
"MIT"
] | null | null | null |
import pytest
from sdk.client import Client
@pytest.fixture
def client() -> Client:
client = Client()
yield client
| 12.6
| 29
| 0.698413
| 16
| 126
| 5.5
| 0.5
| 0.409091
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 126
| 9
| 30
| 14
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
25e9d1fa5b0e4a306e7157871d3639c333a86d02
| 46
|
py
|
Python
|
__init__.py
|
cburggie/pytrace
|
078b9e712be00d309062d1de82f3bbe3bad20848
|
[
"MIT"
] | null | null | null |
__init__.py
|
cburggie/pytrace
|
078b9e712be00d309062d1de82f3bbe3bad20848
|
[
"MIT"
] | null | null | null |
__init__.py
|
cburggie/pytrace
|
078b9e712be00d309062d1de82f3bbe3bad20848
|
[
"MIT"
] | null | null | null |
from src import Camera, Tracer, World, Stereo
| 23
| 45
| 0.782609
| 7
| 46
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 1
| 46
| 46
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
25ea5bfaa423a17ddfabc5cb72b10c8d3513e322
| 206
|
py
|
Python
|
molecule_podman/test/conftest.py
|
javierpena/molecule-podman
|
92e21b64547ab14b0f3478851e0ab16334443abd
|
[
"MIT"
] | null | null | null |
molecule_podman/test/conftest.py
|
javierpena/molecule-podman
|
92e21b64547ab14b0f3478851e0ab16334443abd
|
[
"MIT"
] | null | null | null |
molecule_podman/test/conftest.py
|
javierpena/molecule-podman
|
92e21b64547ab14b0f3478851e0ab16334443abd
|
[
"MIT"
] | null | null | null |
"""Pytest Fixtures."""
import pytest
from molecule.test.conftest import random_string, temp_dir # noqa
@pytest.fixture
def DRIVER():
"""Return name of the driver to be tested."""
return "podman"
| 20.6
| 66
| 0.708738
| 28
| 206
| 5.142857
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169903
| 206
| 9
| 67
| 22.888889
| 0.842105
| 0.300971
| 0
| 0
| 0
| 0
| 0.045113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
25fde38ee393e5eebd6c8093c23dfbcb6a9c1719
| 42
|
py
|
Python
|
modules/2.79/bpy/types/NodeSocketInterface.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/NodeSocketInterface.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/NodeSocketInterface.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
def from_socket(node, socket):
pass
| 8.4
| 30
| 0.666667
| 6
| 42
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 42
| 4
| 31
| 10.5
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d35a6571363d570e71580c6bc391a7e0bc6d3285
| 21
|
py
|
Python
|
src/glyphs/backports/__init__.py
|
slorg1/glyphs
|
db498156897a1406545f041382913e2af69edc12
|
[
"MIT"
] | 1
|
2019-05-09T14:35:31.000Z
|
2019-05-09T14:35:31.000Z
|
src/glyphs/__init__.py
|
slorg1/glyphs
|
db498156897a1406545f041382913e2af69edc12
|
[
"MIT"
] | null | null | null |
src/glyphs/__init__.py
|
slorg1/glyphs
|
db498156897a1406545f041382913e2af69edc12
|
[
"MIT"
] | 1
|
2019-05-10T16:06:30.000Z
|
2019-05-10T16:06:30.000Z
|
# this space for rent
| 21
| 21
| 0.761905
| 4
| 21
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0.904762
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d3a6e08f0c04737c577e5f56302f98b4d52febd9
| 87
|
py
|
Python
|
test/smokes/pylint/errors_only/bad.py
|
ybiquitous/runners
|
2aaef06a04f481f385d54e86503b2eaf3d61873d
|
[
"MIT"
] | 10
|
2019-08-20T06:52:57.000Z
|
2021-11-07T17:51:23.000Z
|
test/smokes/pylint/errors_only/bad.py
|
ybiquitous/runners
|
2aaef06a04f481f385d54e86503b2eaf3d61873d
|
[
"MIT"
] | 1,662
|
2019-08-20T01:43:30.000Z
|
2022-03-28T03:34:32.000Z
|
test/smokes/pylint/errors_only/bad.py
|
ybiquitous/runners
|
2aaef06a04f481f385d54e86503b2eaf3d61873d
|
[
"MIT"
] | 11
|
2019-08-19T07:04:52.000Z
|
2022-03-25T05:29:51.000Z
|
class TestFile():
test = temp
def temp_method():
print('temp_method')
| 14.5
| 28
| 0.586207
| 10
| 87
| 4.9
| 0.7
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287356
| 87
| 5
| 29
| 17.4
| 0.790323
| 0
| 0
| 0
| 0
| 0
| 0.126437
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6cca8baf1b155ca571b2075adb5e7b37748cdd8d
| 12
|
py
|
Python
|
1.py
|
KrishPagarSchool/Python
|
3d43550128392979bf5147fc77a408235315608b
|
[
"MIT",
"Unlicense"
] | null | null | null |
1.py
|
KrishPagarSchool/Python
|
3d43550128392979bf5147fc77a408235315608b
|
[
"MIT",
"Unlicense"
] | null | null | null |
1.py
|
KrishPagarSchool/Python
|
3d43550128392979bf5147fc77a408235315608b
|
[
"MIT",
"Unlicense"
] | null | null | null |
print (2+3)
| 6
| 11
| 0.583333
| 3
| 12
| 2.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.166667
| 12
| 1
| 12
| 12
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6cf4fc37e60d9f07781cf7ad5155badad67ddbe1
| 138
|
py
|
Python
|
pyex_pkg/pyex_pkg/module1.py
|
uiuc-bioinf-club/cheetSheets
|
537f85debfb3d98cd718963721b87a255913161b
|
[
"MIT"
] | null | null | null |
pyex_pkg/pyex_pkg/module1.py
|
uiuc-bioinf-club/cheetSheets
|
537f85debfb3d98cd718963721b87a255913161b
|
[
"MIT"
] | null | null | null |
pyex_pkg/pyex_pkg/module1.py
|
uiuc-bioinf-club/cheetSheets
|
537f85debfb3d98cd718963721b87a255913161b
|
[
"MIT"
] | 2
|
2019-02-18T23:18:31.000Z
|
2021-07-21T19:23:58.000Z
|
print("Imported module1 of pyex_pkg")
import nltk
print("successfully imported nltk")
def func1(x):
return x+1
def func2(x):
return x+2
| 17.25
| 37
| 0.746377
| 24
| 138
| 4.25
| 0.666667
| 0.137255
| 0.156863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042017
| 0.137681
| 138
| 7
| 38
| 19.714286
| 0.815126
| 0
| 0
| 0
| 0
| 0
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.428571
| 0.285714
| 1
| 0.285714
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
9f036b9732458fc8933afbf0274600f660ac4746
| 828
|
py
|
Python
|
express/parsers/mixins/reciprocal.py
|
Exabyte-io/exabyte-express
|
579cc1ad3666352848e0ac8eeec84cb410a9a9c7
|
[
"Apache-2.0"
] | null | null | null |
express/parsers/mixins/reciprocal.py
|
Exabyte-io/exabyte-express
|
579cc1ad3666352848e0ac8eeec84cb410a9a9c7
|
[
"Apache-2.0"
] | null | null | null |
express/parsers/mixins/reciprocal.py
|
Exabyte-io/exabyte-express
|
579cc1ad3666352848e0ac8eeec84cb410a9a9c7
|
[
"Apache-2.0"
] | null | null | null |
from abc import abstractmethod
class ReciprocalDataMixin(object):
"""
Defines reciprocal interfaces.
Note:
THE FORMAT OF DATA STRUCTURE RETURNED MUST BE PRESERVED IN IMPLEMENTATION.
"""
@abstractmethod
def ibz_k_points(self):
"""
Returns ibz_k_points.
Returns:
ndarray
Example:
[
[ 0.00000000e+00 0.00000000e+00 0.00000000e+00]
[ -4.84710133e-17 -4.84710133e-17 -5.00000000e-01]
[ 0.00000000e+00 -5.00000000e-01 0.00000000e+00]
[ -4.84710133e-17 -5.00000000e-01 -5.00000000e-01]
[ -5.00000000e-01 6.58404272e-17 0.00000000e+00]
[ -5.00000000e-01 -5.00000000e-01 0.00000000e+00]
]
"""
pass
| 26.709677
| 82
| 0.536232
| 90
| 828
| 4.888889
| 0.455556
| 0.159091
| 0.190909
| 0.088636
| 0.463636
| 0.463636
| 0
| 0
| 0
| 0
| 0
| 0.374291
| 0.361111
| 828
| 30
| 83
| 27.6
| 0.457467
| 0.663043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
9f2ae3f6600f22846b311da570f2cfa5087fa6e2
| 310
|
py
|
Python
|
Data_Generator_Tool/Generator/CreditCardDetails.py
|
Jag46/IBM_Test_Data_generator_tool
|
ef14a8dc9bf1bc8dd5c3de5cb7f0fe2634906168
|
[
"Apache-2.0"
] | null | null | null |
Data_Generator_Tool/Generator/CreditCardDetails.py
|
Jag46/IBM_Test_Data_generator_tool
|
ef14a8dc9bf1bc8dd5c3de5cb7f0fe2634906168
|
[
"Apache-2.0"
] | null | null | null |
Data_Generator_Tool/Generator/CreditCardDetails.py
|
Jag46/IBM_Test_Data_generator_tool
|
ef14a8dc9bf1bc8dd5c3de5cb7f0fe2634906168
|
[
"Apache-2.0"
] | null | null | null |
from faker import Faker
global obj
obj = Faker()
def get_credit_card_number():
return obj.credit_card_number(card_type=None)
def get_credit_card_provider():
return obj.credit_card_provider(card_type=None)
def get_credit_card_security_code():
return obj.credit_card_security_code(card_type=None)
| 23.846154
| 56
| 0.806452
| 49
| 310
| 4.693878
| 0.326531
| 0.26087
| 0.156522
| 0.208696
| 0.243478
| 0.243478
| 0.243478
| 0
| 0
| 0
| 0
| 0
| 0.116129
| 310
| 13
| 56
| 23.846154
| 0.839416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.111111
| 0.333333
| 0.777778
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9f43cf103e1e8a16b73bb083dd18e7802b68b844
| 130
|
py
|
Python
|
tests/web_platform/CSS2/box/test_ltr_ib.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 71
|
2015-04-13T09:44:14.000Z
|
2019-03-24T01:03:02.000Z
|
tests/web_platform/CSS2/box/test_ltr_ib.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 35
|
2019-05-06T15:26:09.000Z
|
2022-03-28T06:30:33.000Z
|
tests/web_platform/CSS2/box/test_ltr_ib.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 139
|
2015-05-30T18:37:43.000Z
|
2019-03-27T17:14:05.000Z
|
from tests.utils import W3CTestCase
class TestLtrIb(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'ltr-ib'))
| 21.666667
| 61
| 0.769231
| 16
| 130
| 5.9375
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.107692
| 130
| 5
| 62
| 26
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0.046154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9fb2a13e6e176d6f3e8ff5b40427ebf47b9e8faa
| 191
|
py
|
Python
|
Lintcode/Ladder_28_S/1_Easy/53. Reverse Words in a String.py
|
ctc316/algorithm-python
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
[
"MIT"
] | null | null | null |
Lintcode/Ladder_28_S/1_Easy/53. Reverse Words in a String.py
|
ctc316/algorithm-python
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
[
"MIT"
] | null | null | null |
Lintcode/Ladder_28_S/1_Easy/53. Reverse Words in a String.py
|
ctc316/algorithm-python
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
[
"MIT"
] | null | null | null |
class Solution:
"""
@param: s: A string
@return: A string
"""
def reverseWords(self, s):
return " ".join(list(filter(lambda x: x != "", s.split(" ")))[::-1])
| 27.285714
| 76
| 0.492147
| 23
| 191
| 4.086957
| 0.73913
| 0.148936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007407
| 0.293194
| 191
| 7
| 76
| 27.285714
| 0.688889
| 0.193717
| 0
| 0
| 0
| 0
| 0.015504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
4c85a889eccfd0f11e1ca747a85a291160fc0eb5
| 622
|
py
|
Python
|
utils/tank_fun.py
|
zero-eclipse/battle_game
|
2294b946b631cf9fd9b55515cf6b3b112841009b
|
[
"Apache-2.0"
] | null | null | null |
utils/tank_fun.py
|
zero-eclipse/battle_game
|
2294b946b631cf9fd9b55515cf6b3b112841009b
|
[
"Apache-2.0"
] | 1
|
2020-01-06T05:24:40.000Z
|
2020-01-06T05:24:40.000Z
|
utils/tank_fun.py
|
zero-eclipse/battle_game
|
2294b946b631cf9fd9b55515cf6b3b112841009b
|
[
"Apache-2.0"
] | null | null | null |
from abc import *
from utils.params import Direction
class Display(metaclass=ABCMeta):
@abstractmethod
def show(self):
pass
class Move(metaclass=ABCMeta):
@abstractmethod
def move(self, direct):
pass
@abstractmethod
def is_inflict_wall(self, block):
pass
class Block(metaclass=ABCMeta):
pass
class Order(metaclass=ABCMeta):
@abstractmethod
def get_order(self):
pass
class AutoMove(Move, ABC):
@abstractmethod
def is_inflict_wall(self, block):
pass
@abstractmethod
def move(self,direct=Direction.NONE):
pass
| 14.809524
| 41
| 0.655949
| 70
| 622
| 5.757143
| 0.357143
| 0.253102
| 0.223325
| 0.245658
| 0.367246
| 0.2134
| 0.2134
| 0.2134
| 0
| 0
| 0
| 0
| 0.26045
| 622
| 41
| 42
| 15.170732
| 0.876087
| 0
| 0
| 0.576923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0.269231
| 0.076923
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
4c85e7bee248c1d928362eeb655c26cd60283b8e
| 4,984
|
py
|
Python
|
gazenet/models/shared_components/attentive_convlstm/model.py
|
modular-ml/gasp-gated-attention-for-saliency-prediction
|
e2e1b008ab916ae5f7e51fbf09aa1da8be22be6d
|
[
"MIT"
] | 1
|
2021-09-22T07:50:39.000Z
|
2021-09-22T07:50:39.000Z
|
gazenet/models/shared_components/attentive_convlstm/model.py
|
modular-ml/gasp-gated-attention-for-saliency-prediction
|
e2e1b008ab916ae5f7e51fbf09aa1da8be22be6d
|
[
"MIT"
] | null | null | null |
gazenet/models/shared_components/attentive_convlstm/model.py
|
modular-ml/gasp-gated-attention-for-saliency-prediction
|
e2e1b008ab916ae5f7e51fbf09aa1da8be22be6d
|
[
"MIT"
] | 1
|
2022-01-14T22:55:38.000Z
|
2022-01-14T22:55:38.000Z
|
import torch.nn as nn
nb_timestep = 4
# https://github.com/PanoAsh/Saliency-Attentive-Model-Pytorch/blob/master/main.py
class AttentiveLSTM(nn.Module):
def __init__(self, nb_features_in, nb_features_out, nb_features_att, nb_rows, nb_cols):
super(AttentiveLSTM, self).__init__()
# define the fundamantal parameters
self.nb_features_in = nb_features_in
self.nb_features_out = nb_features_out
self.nb_features_att = nb_features_att
self.nb_rows = nb_rows
self.nb_cols = nb_cols
# define convs
self.W_a = nn.Conv2d(in_channels=self.nb_features_att, out_channels=self.nb_features_att,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_a = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_att,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.V_a = nn.Conv2d(in_channels=self.nb_features_att, out_channels=1,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=False)
self.W_i = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_i = nn.Conv2d(in_channels=self.nb_features_out, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.W_f = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_f = nn.Conv2d(in_channels=self.nb_features_out, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.W_c = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_c = nn.Conv2d(in_channels=self.nb_features_out, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.W_o = nn.Conv2d(in_channels=self.nb_features_in, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.U_o = nn.Conv2d(in_channels=self.nb_features_out, out_channels=self.nb_features_out,
kernel_size=self.nb_rows, stride=1, padding=1, dilation=1, groups=1, bias=True)
# define activations
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=-1)
# define number of temporal steps
self.nb_ts = nb_timestep
def forward(self, x):
# get the current cell memory and hidden state
h_curr, c_curr = x, x
for i in range(self.nb_ts):
# the attentive model
my_Z = self.V_a(self.tanh(self.W_a(h_curr) + self.U_a(x)))
my_A = self.softmax(my_Z)
AM_cL = my_A * x
# the convLSTM model
my_I = self.sigmoid(self.W_i(AM_cL) + self.U_i(h_curr))
my_F = self.sigmoid(self.W_f(AM_cL) + self.U_f(h_curr))
my_O = self.sigmoid(self.W_o(AM_cL) + self.U_o(h_curr))
my_G = self.tanh(self.W_c(AM_cL) + self.U_c(h_curr))
c_next = my_G * my_I + my_F * c_curr
h_next = self.tanh(c_next) * my_O
c_curr = c_next
h_curr = h_next
return h_curr
class SequenceAttentiveLSTM(AttentiveLSTM):
def __init__(self, *args, sequence_len=2, sequence_norm=True, **kwargs):
super().__init__(*args, **kwargs)
if sequence_norm:
self.sequence_norm = nn.BatchNorm3d(sequence_len)
# self.sequence_len = sequence_len
else:
self.sequence_norm = lambda x : x
# self.sequence_len = None
def forward(self, x):
x = self.sequence_norm(x)
# get the current cell memory and hidden state
h_curr, c_curr = x[:,0], x[:,0]
for i in range(x.shape[1]): # for i in range(self.sequence_len):
# the attentive model
my_Z = self.V_a(self.tanh(self.W_a(h_curr) + self.U_a(x[:,i])))
my_A = self.softmax(my_Z)
AM_cL = my_A * x[:,i]
# the convLSTM model
my_I = self.sigmoid(self.W_i(AM_cL) + self.U_i(h_curr))
my_F = self.sigmoid(self.W_f(AM_cL) + self.U_f(h_curr))
my_O = self.sigmoid(self.W_o(AM_cL) + self.U_o(h_curr))
my_G = self.tanh(self.W_c(AM_cL) + self.U_c(h_curr))
c_next = my_G * my_I + my_F * c_curr
h_next = self.tanh(c_next) * my_O
c_curr = c_next
h_curr = h_next
return h_curr
| 44.106195
| 97
| 0.635032
| 802
| 4,984
| 3.647132
| 0.127182
| 0.082051
| 0.119658
| 0.157949
| 0.748376
| 0.725812
| 0.708034
| 0.708034
| 0.704957
| 0.704957
| 0
| 0.016895
| 0.251806
| 4,984
| 112
| 98
| 44.5
| 0.767498
| 0.087881
| 0
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.013514
| 0
| 0.121622
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4c9d43038ab2e70919efa4a32302db8dde3ba317
| 58
|
py
|
Python
|
ctools/model/dqn/__init__.py
|
XinyuJing/DI-star
|
b573a5462e3d0ab72298c767eb945742e36fa6d8
|
[
"Apache-2.0"
] | 267
|
2021-07-08T02:18:08.000Z
|
2022-03-02T11:37:33.000Z
|
ctools/model/dqn/__init__.py
|
XinyuJing/DI-star
|
b573a5462e3d0ab72298c767eb945742e36fa6d8
|
[
"Apache-2.0"
] | 5
|
2021-07-15T22:55:22.000Z
|
2022-01-11T15:28:10.000Z
|
ctools/model/dqn/__init__.py
|
XinyuJing/DI-star
|
b573a5462e3d0ab72298c767eb945742e36fa6d8
|
[
"Apache-2.0"
] | 35
|
2021-07-08T08:01:51.000Z
|
2022-02-10T07:00:24.000Z
|
from .dqn_network import FCDQN, ConvDQN, FCDRQN, ConvDRQN
| 29
| 57
| 0.810345
| 8
| 58
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 1
| 58
| 58
| 0.901961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4ca4ec6df14d126d62113d72e70355ebe38a113e
| 268
|
py
|
Python
|
StanfordQuadruped/src/Utilities.py
|
jensanjo/QuadrupedRobot
|
78b010d92e6302c92372b56384025a75b0124a27
|
[
"MIT"
] | 1,125
|
2020-02-23T01:00:57.000Z
|
2022-03-31T10:45:38.000Z
|
StanfordQuadruped/src/Utilities.py
|
jensanjo/QuadrupedRobot
|
78b010d92e6302c92372b56384025a75b0124a27
|
[
"MIT"
] | 37
|
2021-06-01T00:12:14.000Z
|
2022-03-28T11:29:17.000Z
|
StanfordQuadruped/src/Utilities.py
|
jensanjo/QuadrupedRobot
|
78b010d92e6302c92372b56384025a75b0124a27
|
[
"MIT"
] | 326
|
2020-03-09T15:32:11.000Z
|
2022-03-26T15:55:54.000Z
|
import numpy as np
def deadband(value, band_radius):
return max(value - band_radius, 0) + min(value + band_radius, 0)
def clipped_first_order_filter(input, target, max_rate, tau):
rate = (target - input) / tau
return np.clip(rate, -max_rate, max_rate)
| 24.363636
| 68
| 0.705224
| 42
| 268
| 4.285714
| 0.52381
| 0.15
| 0.25
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009132
| 0.182836
| 268
| 10
| 69
| 26.8
| 0.812785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
4cf64b7b4fad45afa67724748c2d35bf98eb3d5c
| 231
|
py
|
Python
|
htsinfer/infer_read_orientation.py
|
dcpb0/htsinfer
|
426f34186917020815298999bd93ea23229d9d1b
|
[
"Apache-2.0"
] | null | null | null |
htsinfer/infer_read_orientation.py
|
dcpb0/htsinfer
|
426f34186917020815298999bd93ea23229d9d1b
|
[
"Apache-2.0"
] | null | null | null |
htsinfer/infer_read_orientation.py
|
dcpb0/htsinfer
|
426f34186917020815298999bd93ea23229d9d1b
|
[
"Apache-2.0"
] | null | null | null |
"""Infer read orientation from sample data."""
def infer():
"""Main function coordinating the execution of all other functions.
Should be imported/called from main app and return results to it.
"""
# implement me
| 25.666667
| 71
| 0.69697
| 31
| 231
| 5.193548
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21645
| 231
| 8
| 72
| 28.875
| 0.889503
| 0.800866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| true
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
e28dee8a9bdca650dbc4b2db7f6475f38085f7a8
| 2,040
|
py
|
Python
|
teszt/test_feladat01.py
|
python-feladatok-tesztekkel/07-01-05-osszefoglalas
|
95b5ff6c2e7020ca71cb2c577d793d3920f938a6
|
[
"CC0-1.0"
] | null | null | null |
teszt/test_feladat01.py
|
python-feladatok-tesztekkel/07-01-05-osszefoglalas
|
95b5ff6c2e7020ca71cb2c577d793d3920f938a6
|
[
"CC0-1.0"
] | null | null | null |
teszt/test_feladat01.py
|
python-feladatok-tesztekkel/07-01-05-osszefoglalas
|
95b5ff6c2e7020ca71cb2c577d793d3920f938a6
|
[
"CC0-1.0"
] | null | null | null |
from unittest import TestCase
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import feladatok
class TestMegtatlahtoMegye(TestCase):
def test_feladat_ures(self):
megyek=[]
keresett="Baranya"
aktualis = feladatok.megtalalhato(megyek,keresett)
elvart = False
print(megyek)
self.assertEqual(elvart, aktualis, "Rosszul határozta meg, hogy "+ keresett+" megye megtalálható-e a a listába.")
def test_feladat_elso(self):
megyek=["Bács-Kiskun","Csongrád-Csanád","Fejér","Nógrád","Pest","Tolna"]
keresett="Bács-Kiskun"
aktualis = feladatok.megtalalhato(megyek,keresett)
elvart = True
print(megyek)
self.assertEqual(elvart, aktualis, "Rosszul határozta meg, hogy "+ keresett+" megye megtalálható-e a a listába.")
def test_feladat_utolso(self):
megyek=["Bács-Kiskun","Csongrád-Csanád","Fejér","Nógrád","Pest","Tolna"]
keresett="Tolna"
aktualis = feladatok.megtalalhato(megyek,keresett)
elvart = True
print(megyek)
self.assertEqual(elvart, aktualis, "Rosszul határozta meg, hogy "+ keresett+" megye megtalálható-e a a listába.")
def test_feladat_kozepe(self):
megyek=["Bács-Kiskun","Csongrád-Csanád","Fejér","Nógrád","Pest","Tolna"]
keresett="Nógrád"
aktualis = feladatok.megtalalhato(megyek,keresett)
elvart = True
print(megyek)
self.assertEqual(elvart, aktualis, "Rosszul határozta meg, hogy "+ keresett+" megye megtalálható-e a a listába.")
def test_feladat_nincs(self):
megyek=["Bács-Kiskun","Csongrád-Csanád","Fejér","Nógrád","Pest","Tolna"]
keresett="Vas"
aktualis = feladatok.megtalalhato(megyek,keresett)
elvart = False
print(megyek)
self.assertEqual(elvart, aktualis, "Rosszul határozta meg, hogy "+ keresett+" megye megtalálható-e a a listába.")
| 46.363636
| 121
| 0.67598
| 231
| 2,040
| 5.909091
| 0.246753
| 0.061538
| 0.051282
| 0.128205
| 0.777289
| 0.777289
| 0.777289
| 0.777289
| 0.777289
| 0.777289
| 0
| 0.000609
| 0.195588
| 2,040
| 44
| 122
| 46.363636
| 0.8312
| 0
| 0
| 0.571429
| 0
| 0
| 0.257717
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 1
| 0.119048
| false
| 0
| 0.071429
| 0
| 0.214286
| 0.119048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2c35d8bee675fa1f9a42cd2585db579b4a98e190
| 133
|
py
|
Python
|
moonleap/outputpath/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
moonleap/outputpath/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
moonleap/outputpath/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from moonleap.resource import Resource
@dataclass
class OutputPath(Resource):
location: str
| 14.777778
| 38
| 0.804511
| 15
| 133
| 7.133333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150376
| 133
| 8
| 39
| 16.625
| 0.946903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2c3612fa5a5cdb10c96a1317f3f0dc4afb33ad89
| 166
|
py
|
Python
|
simulator.py
|
AndreaKarlova/BLOX
|
f103c232009ebff61dcfb1de09a3f5df6c130877
|
[
"MIT"
] | 19
|
2020-02-21T07:22:31.000Z
|
2021-09-15T22:00:19.000Z
|
simulator.py
|
rob8718/BLOX
|
f103c232009ebff61dcfb1de09a3f5df6c130877
|
[
"MIT"
] | null | null | null |
simulator.py
|
rob8718/BLOX
|
f103c232009ebff61dcfb1de09a3f5df6c130877
|
[
"MIT"
] | 4
|
2020-08-08T21:37:38.000Z
|
2021-04-24T12:20:15.000Z
|
import csv
def simulation(parameter):
#Please call your simulation program with the input parameter
#and return its result
print('Simulation')
| 18.444444
| 65
| 0.698795
| 20
| 166
| 5.8
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246988
| 166
| 8
| 66
| 20.75
| 0.928
| 0.487952
| 0
| 0
| 0
| 0
| 0.120482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2c8d41f2bea794a70ad7ec39d84322fe3304e038
| 99
|
py
|
Python
|
etc/scripts/pygments_style.py
|
bkchung/dotfiles_old
|
396582eaea2a593f5f05908e136dca2cdf0fd29c
|
[
"Vim",
"curl",
"MIT"
] | 852
|
2015-01-15T23:22:27.000Z
|
2022-03-12T04:13:45.000Z
|
etc/scripts/pygments_style.py
|
bkchung/dotfiles_old
|
396582eaea2a593f5f05908e136dca2cdf0fd29c
|
[
"Vim",
"curl",
"MIT"
] | 6
|
2015-10-05T02:47:13.000Z
|
2022-03-11T15:34:31.000Z
|
etc/scripts/pygments_style.py
|
bkchung/dotfiles_old
|
396582eaea2a593f5f05908e136dca2cdf0fd29c
|
[
"Vim",
"curl",
"MIT"
] | 326
|
2015-02-26T12:37:39.000Z
|
2022-03-13T12:34:46.000Z
|
from pygments.styles import get_all_styles
styles = list(get_all_styles())
print '\n'.join(styles)
| 24.75
| 42
| 0.787879
| 16
| 99
| 4.625
| 0.625
| 0.162162
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 99
| 3
| 43
| 33
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0.020202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2cb160031efbfbd4c732cd7067c12adbb78cef10
| 105
|
py
|
Python
|
enthought/appscripting/lazy_namespace.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/appscripting/lazy_namespace.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/appscripting/lazy_namespace.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from apptools.appscripting.lazy_namespace import *
| 26.25
| 50
| 0.857143
| 13
| 105
| 6.461538
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104762
| 105
| 3
| 51
| 35
| 0.893617
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2cca39bb3a93d03cb3422c1b43bff4a7063a1ec0
| 49
|
py
|
Python
|
PokeType/__main__.py
|
Daggy1234/PokeType
|
a79c8115ca9bb13e24c4fd4db4931b3094a96549
|
[
"MIT"
] | 2
|
2021-11-06T14:09:40.000Z
|
2021-11-14T21:24:56.000Z
|
PokeType/__main__.py
|
Daggy1234/PokeType
|
a79c8115ca9bb13e24c4fd4db4931b3094a96549
|
[
"MIT"
] | null | null | null |
PokeType/__main__.py
|
Daggy1234/PokeType
|
a79c8115ca9bb13e24c4fd4db4931b3094a96549
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
print("CLI soon:tm:")
| 24.5
| 26
| 0.653061
| 7
| 49
| 3.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 49
| 2
| 27
| 24.5
| 0.55814
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e2ce54f32272884d0e530dc16ff5f54bf6fe663b
| 117
|
py
|
Python
|
dotloop/account.py
|
spentaur/dotloop-python
|
5374ab5f5e16f9b826438a9c4f051a4be53d433b
|
[
"MIT"
] | null | null | null |
dotloop/account.py
|
spentaur/dotloop-python
|
5374ab5f5e16f9b826438a9c4f051a4be53d433b
|
[
"MIT"
] | null | null | null |
dotloop/account.py
|
spentaur/dotloop-python
|
5374ab5f5e16f9b826438a9c4f051a4be53d433b
|
[
"MIT"
] | 1
|
2021-07-28T14:28:17.000Z
|
2021-07-28T14:28:17.000Z
|
from .bases import DotloopObject
class Account(DotloopObject):
def get(self):
return self.fetch('get')
| 16.714286
| 32
| 0.692308
| 14
| 117
| 5.785714
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 117
| 6
| 33
| 19.5
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e2d619a84a462d6fdd783d378745924b2315b0ef
| 159
|
py
|
Python
|
src/flask_batteries/template/src/routes/__init__.py
|
graydenshand/flask_boot
|
2aeb0d47543fc85a15e752a00bfa0d0ba9e23988
|
[
"MIT"
] | 1
|
2021-09-17T13:41:10.000Z
|
2021-09-17T13:41:10.000Z
|
src/flask_batteries/template/src/routes/__init__.py
|
graydenshand/flask_boot
|
2aeb0d47543fc85a15e752a00bfa0d0ba9e23988
|
[
"MIT"
] | null | null | null |
src/flask_batteries/template/src/routes/__init__.py
|
graydenshand/flask_boot
|
2aeb0d47543fc85a15e752a00bfa0d0ba9e23988
|
[
"MIT"
] | null | null | null |
from .index import index_view
def register_routes(app):
app.add_url_rule("/", view_func=index_view)
app.add_url_rule("/index", view_func=index_view)
| 22.714286
| 52
| 0.748428
| 26
| 159
| 4.192308
| 0.461538
| 0.330275
| 0.165138
| 0.238532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125786
| 159
| 6
| 53
| 26.5
| 0.784173
| 0
| 0
| 0
| 0
| 0
| 0.044025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e2f29f6c1091b4f5c1f3e2b8564d06c9c1301394
| 20
|
py
|
Python
|
checkov/version.py
|
athurn/checkov
|
de59dcc91c1f2224facec01c68e150c7da813491
|
[
"Apache-2.0"
] | null | null | null |
checkov/version.py
|
athurn/checkov
|
de59dcc91c1f2224facec01c68e150c7da813491
|
[
"Apache-2.0"
] | null | null | null |
checkov/version.py
|
athurn/checkov
|
de59dcc91c1f2224facec01c68e150c7da813491
|
[
"Apache-2.0"
] | null | null | null |
version = '1.0.801'
| 10
| 19
| 0.6
| 4
| 20
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 0.15
| 20
| 1
| 20
| 20
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3910cf27e046fee390b77eddb768d9d51431720e
| 644
|
py
|
Python
|
keras/applications/nasnet.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 300
|
2018-04-04T05:01:21.000Z
|
2022-02-25T18:56:04.000Z
|
keras/applications/nasnet.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 163
|
2018-04-03T17:41:22.000Z
|
2021-09-03T16:44:04.000Z
|
keras/applications/nasnet.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 94
|
2016-02-17T20:59:27.000Z
|
2021-04-19T08:18:16.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_applications import nasnet
from . import keras_modules_injection
@keras_modules_injection
def NASNetMobile(*args, **kwargs):
return nasnet.NASNetMobile(*args, **kwargs)
@keras_modules_injection
def NASNetLarge(*args, **kwargs):
return nasnet.NASNetLarge(*args, **kwargs)
@keras_modules_injection
def decode_predictions(*args, **kwargs):
return nasnet.decode_predictions(*args, **kwargs)
@keras_modules_injection
def preprocess_input(*args, **kwargs):
return nasnet.preprocess_input(*args, **kwargs)
| 23.851852
| 53
| 0.791925
| 76
| 644
| 6.328947
| 0.289474
| 0.16632
| 0.218295
| 0.199584
| 0.212058
| 0.212058
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113354
| 644
| 26
| 54
| 24.769231
| 0.842382
| 0
| 0
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.235294
| true
| 0
| 0.294118
| 0.235294
| 0.764706
| 0.058824
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
39442aaaf4dd7ba09d2a967f592094a150a782a4
| 88
|
py
|
Python
|
notifiers/wechat.py
|
AzusaChino/iris-python
|
92aa6bf23d5bf8f2ac4f3d2b0ee5f36e177d97d2
|
[
"MIT"
] | null | null | null |
notifiers/wechat.py
|
AzusaChino/iris-python
|
92aa6bf23d5bf8f2ac4f3d2b0ee5f36e177d97d2
|
[
"MIT"
] | null | null | null |
notifiers/wechat.py
|
AzusaChino/iris-python
|
92aa6bf23d5bf8f2ac4f3d2b0ee5f36e177d97d2
|
[
"MIT"
] | null | null | null |
import notifiers
class Wechat(notifiers):
def __init__(self) -> None:
pass
| 14.666667
| 31
| 0.659091
| 10
| 88
| 5.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 88
| 6
| 32
| 14.666667
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
3951c149406c38c9ba1728a90d3ed08b419f64c5
| 51
|
py
|
Python
|
rogal/tiles/__init__.py
|
kosciak/ecs-rogal
|
d553104e0ea350d11272d274a900419620b9389e
|
[
"MIT"
] | 4
|
2021-01-23T13:25:46.000Z
|
2021-03-19T03:08:05.000Z
|
rogal/tiles/__init__.py
|
kosciak/ecs-rogal
|
d553104e0ea350d11272d274a900419620b9389e
|
[
"MIT"
] | null | null | null |
rogal/tiles/__init__.py
|
kosciak/ecs-rogal
|
d553104e0ea350d11272d274a900419620b9389e
|
[
"MIT"
] | null | null | null |
from .core import RenderOrder, Glyph, Colors, Tile
| 25.5
| 50
| 0.784314
| 7
| 51
| 5.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 1
| 51
| 51
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.