hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07735a9f620af68e0e83b32173c18b049fe12c82
| 176
|
py
|
Python
|
virtual/bin/django-admin.py
|
Brayonski/Instagram
|
189ec32577f950fe14bd199e767379416e9d4f94
|
[
"MIT"
] | null | null | null |
virtual/bin/django-admin.py
|
Brayonski/Instagram
|
189ec32577f950fe14bd199e767379416e9d4f94
|
[
"MIT"
] | null | null | null |
virtual/bin/django-admin.py
|
Brayonski/Instagram
|
189ec32577f950fe14bd199e767379416e9d4f94
|
[
"MIT"
] | null | null | null |
#!/media/root/Alpha/projects/core/django/Instagram/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 29.333333
| 69
| 0.795455
| 23
| 176
| 5.608696
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085227
| 176
| 5
| 70
| 35.2
| 0.801242
| 0.386364
| 0
| 0
| 0
| 0
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
07983e0fdb4c56c6c89fc7aa6fe201858208d96c
| 154
|
py
|
Python
|
examples/user/py/__main__.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-07-01T17:03:33.000Z
|
2022-03-01T19:29:04.000Z
|
examples/user/py/__main__.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 102
|
2021-07-14T13:12:58.000Z
|
2022-03-31T18:34:04.000Z
|
examples/user/py/__main__.py
|
Hacker0x01/pulumi-snowflake
|
f6ebcf2c3f73b103a7c2001fae231998ce1323b2
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-25T07:24:45.000Z
|
2022-03-25T07:24:45.000Z
|
"""A Python Pulumi program"""
import pulumi
import pulumi_snowflake as snowflake
user = snowflake.User("py-user")
pulumi.export("username", user.name)
| 17.111111
| 36
| 0.753247
| 21
| 154
| 5.47619
| 0.571429
| 0.208696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116883
| 154
| 8
| 37
| 19.25
| 0.845588
| 0.149351
| 0
| 0
| 0
| 0
| 0.12
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
07cb7dd092f421c365ac59c83203c974c406ee1d
| 193
|
py
|
Python
|
pyrt/__init__.py
|
Oleg595/pyRT
|
6fc0ccbc6fb24dcc2a8532aa22eb9574f1afdb3a
|
[
"MIT"
] | 74
|
2016-09-02T08:15:39.000Z
|
2021-08-09T08:16:23.000Z
|
pyrt/__init__.py
|
Oleg595/pyRT
|
6fc0ccbc6fb24dcc2a8532aa22eb9574f1afdb3a
|
[
"MIT"
] | 22
|
2016-09-02T08:15:14.000Z
|
2021-02-22T19:52:21.000Z
|
pyrt/__init__.py
|
Oleg595/pyRT
|
6fc0ccbc6fb24dcc2a8532aa22eb9574f1afdb3a
|
[
"MIT"
] | 27
|
2016-09-04T12:55:27.000Z
|
2022-03-19T11:21:24.000Z
|
"""
This is the main module "pyrt"
Usually you do not need to import it directly, see examples.
"""
from .camera import *
from .geometry import *
from .renderer import *
from .scene import *
| 17.545455
| 60
| 0.715026
| 29
| 193
| 4.758621
| 0.758621
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19171
| 193
| 10
| 61
| 19.3
| 0.884615
| 0.476684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
07cf3662f89648b524ce4071f804b530f14be385
| 826
|
py
|
Python
|
pytglib/api/types/passport_element_rental_agreement.py
|
iTeam-co/pytglib
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 6
|
2019-10-30T08:57:27.000Z
|
2021-02-08T14:17:43.000Z
|
pytglib/api/types/passport_element_rental_agreement.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 1
|
2021-08-19T05:44:10.000Z
|
2021-08-19T07:14:56.000Z
|
pytglib/api/types/passport_element_rental_agreement.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 5
|
2019-12-04T05:30:39.000Z
|
2021-05-21T18:23:32.000Z
|
from ..utils import Object
class PassportElementRentalAgreement(Object):
"""
A Telegram Passport element containing the user's rental agreement
Attributes:
ID (:obj:`str`): ``PassportElementRentalAgreement``
Args:
rental_agreement (:class:`telegram.api.types.personalDocument`):
Rental agreement
Returns:
PassportElement
Raises:
:class:`telegram.Error`
"""
ID = "passportElementRentalAgreement"
def __init__(self, rental_agreement, **kwargs):
self.rental_agreement = rental_agreement # PersonalDocument
@staticmethod
def read(q: dict, *args) -> "PassportElementRentalAgreement":
rental_agreement = Object.read(q.get('rental_agreement'))
return PassportElementRentalAgreement(rental_agreement)
| 25.030303
| 72
| 0.680387
| 71
| 826
| 7.760563
| 0.535211
| 0.245009
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.226392
| 826
| 32
| 73
| 25.8125
| 0.862285
| 0.389831
| 0
| 0
| 0
| 0
| 0.168889
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.444444
| 0.111111
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
07faf6a2811708d5e0ab5d34fccb8ce190cebeb2
| 73
|
py
|
Python
|
setup.py
|
Enchan1207/ProcessObserving
|
d11055dd84559e30c28b3b5d0351daac63fdaa78
|
[
"MIT"
] | null | null | null |
setup.py
|
Enchan1207/ProcessObserving
|
d11055dd84559e30c28b3b5d0351daac63fdaa78
|
[
"MIT"
] | 1
|
2021-11-19T04:12:51.000Z
|
2021-11-19T04:37:17.000Z
|
setup.py
|
Enchan1207/ProcessObserving
|
d11055dd84559e30c28b3b5d0351daac63fdaa78
|
[
"MIT"
] | null | null | null |
#
# pipが読んでライブラリの諸々を設定するためのファイル
#
from setuptools import setup
setup()
| 9.125
| 29
| 0.780822
| 6
| 73
| 9.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150685
| 73
| 7
| 30
| 10.428571
| 0.919355
| 0.369863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
6af447bfa6d0d2be9e1e535859c6074ceda78b36
| 121
|
py
|
Python
|
dpFreeDine/log.py
|
jeffreyyzwu/pythonTools
|
9cb390d93a0ac8ab1f4f2a68428bdacbfd7371ce
|
[
"MIT"
] | null | null | null |
dpFreeDine/log.py
|
jeffreyyzwu/pythonTools
|
9cb390d93a0ac8ab1f4f2a68428bdacbfd7371ce
|
[
"MIT"
] | null | null | null |
dpFreeDine/log.py
|
jeffreyyzwu/pythonTools
|
9cb390d93a0ac8ab1f4f2a68428bdacbfd7371ce
|
[
"MIT"
] | 1
|
2020-06-25T03:57:37.000Z
|
2020-06-25T03:57:37.000Z
|
import logging
import logging.config
logging.config.fileConfig("conf/logger.conf")
logger = logging.getLogger("dpfree")
| 20.166667
| 45
| 0.801653
| 15
| 121
| 6.466667
| 0.533333
| 0.268041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07438
| 121
| 5
| 46
| 24.2
| 0.866071
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ed05a5861aaf9f9c6b7148625e873ff48b47754c
| 2,375
|
py
|
Python
|
memrise/core/modules/actions/empty_actions.py
|
kolesnikov-bn/django-memrise-scraper
|
00dddb53f04ce2f794fe3611ea97190ef8265079
|
[
"MIT"
] | null | null | null |
memrise/core/modules/actions/empty_actions.py
|
kolesnikov-bn/django-memrise-scraper
|
00dddb53f04ce2f794fe3611ea97190ef8265079
|
[
"MIT"
] | null | null | null |
memrise/core/modules/actions/empty_actions.py
|
kolesnikov-bn/django-memrise-scraper
|
00dddb53f04ce2f794fe3611ea97190ef8265079
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import List, ClassVar, TYPE_CHECKING
from memrise.core.modules.actions.base import Actions
if TYPE_CHECKING:
from memrise.core.domains.entities import CourseEntity, LevelEntity, WordEntity
class EmptyCourseActions(Actions):
def create(self, entities: List[CourseEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Добавление новых курсов{self.postfix}"
)
def update(self, entities: List[CourseEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Обновление курсов{self.postfix}")
def equal(self, entities: List[CourseEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Курсы без изменений{self.postfix}"
)
def delete(self, entities: List[CourseEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Удаление курсов{self.postfix}")
class EmptyLevelActions(Actions):
prefix: ClassVar[str] = "Курс $course_id --> "
def create(self, entities: List[LevelEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Добавление новых уровней{self.postfix}"
)
def update(self, entities: List[LevelEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Обновление уровней{self.postfix}")
def equal(self, entities: List[LevelEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Уровни без изменений{self.postfix}"
)
def delete(self, entities: List[LevelEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Удаление уровней{self.postfix}")
class EmptyWordActions(Actions):
prefix: ClassVar[str] = "Уровень $level_id --> "
def create(self, entities: List[WordEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Добавление новых слов{self.postfix}"
)
def update(self, entities: List[WordEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Обновление слов{self.postfix}")
def equal(self, entities: List[WordEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Слова без изменений{self.postfix}"
)
def delete(self, entities: List[WordEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Удаление слов{self.postfix}")
| 35.447761
| 88
| 0.676211
| 274
| 2,375
| 5.832117
| 0.19708
| 0.090113
| 0.12015
| 0.165207
| 0.759074
| 0.71965
| 0.705882
| 0.632666
| 0.632666
| 0.538799
| 0
| 0
| 0.195368
| 2,375
| 66
| 89
| 35.984848
| 0.836211
| 0
| 0
| 0.130435
| 0
| 0
| 0.246737
| 0.140211
| 0
| 0
| 0
| 0
| 0
| 1
| 0.26087
| false
| 0
| 0.086957
| 0
| 0.456522
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ed0c79a039d908ee6ce419a8ed7b3735194829ba
| 254
|
py
|
Python
|
wingedsheep/carcassonne/objects/road.py
|
SuryaVikram/CarcassonneMaster
|
cc0201638dfea05b254833097329729e9b8a410c
|
[
"MIT"
] | 11
|
2020-05-19T17:29:18.000Z
|
2022-03-24T06:22:50.000Z
|
wingedsheep/carcassonne/objects/road.py
|
SuryaVikram/CarcassonneMaster
|
cc0201638dfea05b254833097329729e9b8a410c
|
[
"MIT"
] | 6
|
2020-05-18T09:24:26.000Z
|
2022-03-12T00:30:21.000Z
|
wingedsheep/carcassonne/objects/road.py
|
SuryaVikram/CarcassonneMaster
|
cc0201638dfea05b254833097329729e9b8a410c
|
[
"MIT"
] | 5
|
2021-09-16T11:53:26.000Z
|
2022-03-30T12:08:56.000Z
|
from wingedsheep.carcassonne.objects.coordinate_with_side import CoordinateWithSide
class Road:
def __init__(self, road_positions: [CoordinateWithSide], finished: bool):
self.road_positions = road_positions
self.finished = finished
| 31.75
| 83
| 0.775591
| 27
| 254
| 6.962963
| 0.62963
| 0.207447
| 0.180851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15748
| 254
| 7
| 84
| 36.285714
| 0.878505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ed1af59adda910a4594541daabe9bb4402f689da
| 365
|
py
|
Python
|
manipulacao_de_arquivos/List-Comprehension/comprehension_v3.py
|
andremartins746/curso_de_PYTHON
|
3b4d79e3310b2442cf57a98f213a153492f2a89a
|
[
"MIT"
] | null | null | null |
manipulacao_de_arquivos/List-Comprehension/comprehension_v3.py
|
andremartins746/curso_de_PYTHON
|
3b4d79e3310b2442cf57a98f213a153492f2a89a
|
[
"MIT"
] | null | null | null |
manipulacao_de_arquivos/List-Comprehension/comprehension_v3.py
|
andremartins746/curso_de_PYTHON
|
3b4d79e3310b2442cf57a98f213a153492f2a89a
|
[
"MIT"
] | null | null | null |
# usando generator, ele comsome menos memoria
generator = (i ** 2 for i in range(10) if i % 2 == 0)
#o next serve para extrair o valor do generator, importante o generator nbao e uma tupla
print(next(generator))
print(next(generator))
print(next(generator))
print(next(generator))
print(next(generator))
# print(next(generator)) aqui vai dar um ERRO apartir do 64
| 33.181818
| 88
| 0.745205
| 60
| 365
| 4.533333
| 0.566667
| 0.198529
| 0.397059
| 0.422794
| 0.397059
| 0.397059
| 0.397059
| 0.397059
| 0.397059
| 0.397059
| 0
| 0.022508
| 0.147945
| 365
| 10
| 89
| 36.5
| 0.85209
| 0.515068
| 0
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.833333
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
ed2e924af7f02f74df4d4f102ee84e3e3f011848
| 222
|
py
|
Python
|
Lists_as_Stacks_and_Queues/01_reverse_strings.py
|
MihailMarkovski/Python-Advanced-2020
|
8edea78cbe5588a409ba9bc3767861250f58c1a6
|
[
"MIT"
] | 4
|
2020-09-19T13:53:19.000Z
|
2020-11-01T18:34:53.000Z
|
Lists_as_Stacks_and_Queues/01_reverse_strings.py
|
MNikov/Python-Advanced-September-2020
|
1d65039de7f094d908411afffa8aee9689ab4220
|
[
"MIT"
] | null | null | null |
Lists_as_Stacks_and_Queues/01_reverse_strings.py
|
MNikov/Python-Advanced-September-2020
|
1d65039de7f094d908411afffa8aee9689ab4220
|
[
"MIT"
] | null | null | null |
def reverse_string(string):
reversed_list = []
string_as_list = list(string)
while string_as_list:
reversed_list.append(string_as_list.pop())
print(''.join(reversed_list))
reverse_string(input())
| 22.2
| 50
| 0.702703
| 29
| 222
| 5
| 0.413793
| 0.248276
| 0.248276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175676
| 222
| 9
| 51
| 24.666667
| 0.79235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ed3b21943c2f9baae29dd5ad2e45a0e03bc0e035
| 4,284
|
py
|
Python
|
compliance/verify_submission/mlperf_submission_helper/crypto.py
|
sanyalington/mlperf_training_mitest
|
d07b360e475afb87c7da57f173952822d84ed212
|
[
"Apache-2.0"
] | 1
|
2019-02-19T09:53:42.000Z
|
2019-02-19T09:53:42.000Z
|
compliance/verify_submission/mlperf_submission_helper/crypto.py
|
sanyalington/mlperf_training_mitest
|
d07b360e475afb87c7da57f173952822d84ed212
|
[
"Apache-2.0"
] | 1
|
2018-11-06T06:03:30.000Z
|
2018-11-06T06:03:30.000Z
|
compliance/verify_submission/mlperf_submission_helper/crypto.py
|
sanyalington/mlperf_training_mitest
|
d07b360e475afb87c7da57f173952822d84ed212
|
[
"Apache-2.0"
] | 3
|
2019-01-14T13:57:03.000Z
|
2019-02-22T23:19:41.000Z
|
import fnmatch
import os
import shutil
from Cryptodome.PublicKey import RSA
from Cryptodome.Random import get_random_bytes
from Cryptodome.Cipher import AES, PKCS1_OAEP
def encrypt_file(public_key, src_file, dest_file):
try:
with open(src_file) as f:
rsa_key = RSA.import_key(open(public_key).read())
session_key = get_random_bytes(16)
# Encrypt session key
cipher_rsa = PKCS1_OAEP.new(rsa_key)
encrypted_session_key = cipher_rsa.encrypt(session_key)
# Encrypt data
cipher_aes = AES.new(session_key, AES.MODE_EAX)
ciphertext, tag = cipher_aes.encrypt_and_digest(f.read().encode("utf-8"))
except Exception as e:
print("Unable to encrypt file: {}".format(src_file))
raise e
try:
with open(dest_file, "wb") as f:
for x in (encrypted_session_key, cipher_aes.nonce, tag, ciphertext):
f.write(x)
except Exception as e:
print("Unable to write output file {}".format(dest_file))
raise e
def decrypt_file(private_key, src_file, dest_file):
try:
with open(src_file, "rb") as f:
rsa_key = RSA.import_key(open(private_key).read())
encrypted_session_key = f.read(rsa_key.size_in_bytes())
nonce = f.read(16)
tag = f.read(16)
ciphertext = f.read(-1)
# Decrypt session key
cipher_rsa = PKCS1_OAEP.new(rsa_key)
session_key = cipher_rsa.decrypt(encrypted_session_key)
# Decrypt data
cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)
data = cipher_aes.decrypt_and_verify(ciphertext, tag)
data = data.decode("utf-8")
except Exception as e:
print("Unable to decrypt file: {}".format(src_file))
raise e
try:
with open(dest_file, "w") as f:
f.write(data)
except Exception as e:
print("Unable to write output file: {}".format(dest_file))
raise e
def encrypt_submission(key, src_dir, dest_dir):
if os.path.isdir(dest_dir):
raise Exception("Output directory already exists.")
os.mkdir(dest_dir, mode=0o755)
for root, dirs, files in os.walk(src_dir):
# identify result files and encrypt, else directly copy
if fnmatch.fnmatch(root, os.path.join(src_dir, "results", "*", "*")):
for f in files:
from_file = os.path.join(root, f)
to_file = from_file.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
encrypt_file(key, from_file, to_file)
else:
for d in dirs:
from_dir = os.path.join(root, d)
to_dir = from_dir.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
os.mkdir(to_dir, mode=0o755)
for f in files:
from_file = os.path.join(root, f)
to_file = from_file.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
shutil.copyfile(from_file, to_file)
def decrypt_submission(key, src_dir, dest_dir):
if os.path.isdir(dest_dir):
raise Exception("Output directory already exists.")
os.mkdir(dest_dir, mode=0o755)
for root, dirs, files in os.walk(src_dir):
# identify result files and encrypt, else directly copy
if fnmatch.fnmatch(root, os.path.join(src_dir, "results", "*", "*")):
for f in files:
from_file = os.path.join(root, f)
to_file = from_file.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
decrypt_file(key, from_file, to_file)
else:
for d in dirs:
from_dir = os.path.join(root, d)
to_dir = from_dir.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
os.mkdir(to_dir, mode=0o755)
for f in files:
from_file = os.path.join(root, f)
to_file = from_file.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
shutil.copyfile(from_file, to_file)
| 39.302752
| 85
| 0.582166
| 587
| 4,284
| 4.0477
| 0.155026
| 0.030303
| 0.055556
| 0.070707
| 0.719697
| 0.719697
| 0.719697
| 0.719697
| 0.698653
| 0.638047
| 0
| 0.011569
| 0.313959
| 4,284
| 108
| 86
| 39.666667
| 0.79687
| 0.040383
| 0
| 0.593407
| 0
| 0
| 0.05117
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043956
| false
| 0
| 0.087912
| 0
| 0.131868
| 0.043956
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ed4ad40a5feb7715ec988d30d5b40215038f747c
| 703
|
py
|
Python
|
dataprofiler/profilers/__init__.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | null | null | null |
dataprofiler/profilers/__init__.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | 1
|
2021-11-20T01:08:12.000Z
|
2021-11-20T01:08:12.000Z
|
dataprofiler/profilers/__init__.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | null | null | null |
from .base_column_profilers import BaseColumnProfiler
from .categorical_column_profile import CategoricalColumn
from .data_labeler_column_profile import DataLabelerColumn
from .datetime_column_profile import DateTimeColumn
from .float_column_profile import FloatColumn
from .int_column_profile import IntColumn
from .numerical_column_stats import NumericStatsMixin
from .order_column_profile import OrderColumn
from .profile_builder import Profiler, StructuredProfiler, UnstructuredProfiler
from .text_column_profile import TextColumn
from .unstructured_labeler_profile import UnstructuredLabelerProfile
"""
The purpose of this package is to provide statistics and predictions for a
given dataset.
"""
| 43.9375
| 79
| 0.880512
| 83
| 703
| 7.192771
| 0.554217
| 0.174204
| 0.222781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091038
| 703
| 16
| 80
| 43.9375
| 0.934272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
ed573f4ad6bfc2195c37cb3f44adcb705871394b
| 54
|
py
|
Python
|
mahiru/policy/__init__.py
|
SecConNet/proof_of_concept
|
80f6b27ff6b97796803e554387ca2881a792be79
|
[
"Apache-2.0"
] | 4
|
2021-03-26T09:17:51.000Z
|
2021-05-17T10:31:59.000Z
|
mahiru/policy/__init__.py
|
SecConNet/proof_of_concept
|
80f6b27ff6b97796803e554387ca2881a792be79
|
[
"Apache-2.0"
] | 58
|
2020-03-02T10:02:51.000Z
|
2021-07-09T09:23:49.000Z
|
mahiru/policy/__init__.py
|
SecConNet/proof_of_concept
|
80f6b27ff6b97796803e554387ca2881a792be79
|
[
"Apache-2.0"
] | null | null | null |
"""Defining, distributing and evaluating policies."""
| 27
| 53
| 0.759259
| 5
| 54
| 8.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 54
| 1
| 54
| 54
| 0.836735
| 0.87037
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ed5f8448befb750076defd3f16297f10b90d40f5
| 475
|
py
|
Python
|
test/test_body42.py
|
pygitee/pygitee
|
7622314a4dbb08cf2f729b6cdd0a2887b96e394e
|
[
"MIT"
] | null | null | null |
test/test_body42.py
|
pygitee/pygitee
|
7622314a4dbb08cf2f729b6cdd0a2887b96e394e
|
[
"MIT"
] | null | null | null |
test/test_body42.py
|
pygitee/pygitee
|
7622314a4dbb08cf2f729b6cdd0a2887b96e394e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
import unittest
class TestBody42(unittest.TestCase):
"""Body42 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBody42(self):
"""Test Body42"""
# FIXME: construct object with mandatory attributes with example values
# model = gitee.models.body42.Body42() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 17.592593
| 79
| 0.633684
| 53
| 475
| 5.433962
| 0.679245
| 0.055556
| 0.076389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 0.267368
| 475
| 26
| 80
| 18.269231
| 0.781609
| 0.355789
| 0
| 0.272727
| 0
| 0
| 0.027397
| 0
| 0
| 0
| 0
| 0.038462
| 0
| 1
| 0.272727
| false
| 0.272727
| 0.181818
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
ed670d5e1a117f68b4bda3c8dfa630b3beaeecb1
| 175
|
py
|
Python
|
geotrek/land/apps.py
|
claudep/Geotrek
|
620799cc2667c3b203ef92de6ec35008111fb592
|
[
"BSD-2-Clause"
] | 1
|
2019-12-11T11:04:05.000Z
|
2019-12-11T11:04:05.000Z
|
geotrek/land/apps.py
|
numahell/Geotrek-admin
|
e279875b0b06ef60928c049d51533f76716c902a
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/land/apps.py
|
numahell/Geotrek-admin
|
e279875b0b06ef60928c049d51533f76716c902a
|
[
"BSD-2-Clause"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class LandConfig(AppConfig):
name = 'geotrek.land'
verbose_name = _("Land")
| 21.875
| 54
| 0.754286
| 22
| 175
| 5.818182
| 0.727273
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 175
| 7
| 55
| 25
| 0.870748
| 0
| 0
| 0
| 0
| 0
| 0.091429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
ed7d6583293861a136d4bce1ddf2366f87e28d56
| 114
|
py
|
Python
|
tests/cycle_info_example.py
|
lowrie/OPPPY
|
4bbbc16092f100aaf0411f9e20d89c1b1f8b19f9
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T23:29:20.000Z
|
2020-03-19T07:08:30.000Z
|
tests/cycle_info_example.py
|
lowrie/OPPPY
|
4bbbc16092f100aaf0411f9e20d89c1b1f8b19f9
|
[
"BSD-3-Clause"
] | 5
|
2019-08-20T22:03:23.000Z
|
2021-04-05T15:24:26.000Z
|
tests/cycle_info_example.py
|
lowrie/OPPPY
|
4bbbc16092f100aaf0411f9e20d89c1b1f8b19f9
|
[
"BSD-3-Clause"
] | 4
|
2019-10-23T13:29:26.000Z
|
2021-04-05T14:17:43.000Z
|
cycle_info={}
# populate some example cycle info
cycle_info['cycle']=1
cycle_info['time']=1.0
print(cycle_info)
| 14.25
| 34
| 0.745614
| 19
| 114
| 4.263158
| 0.473684
| 0.555556
| 0.345679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.105263
| 114
| 7
| 35
| 16.285714
| 0.764706
| 0.280702
| 0
| 0
| 0
| 0
| 0.1125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
71ff105211da39c027cbe4ed45c3eb3d49c32f6c
| 75
|
py
|
Python
|
tasks/Scrapy/scrapy_official_newspapers/runner.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 13
|
2020-12-11T12:10:20.000Z
|
2021-04-27T22:54:25.000Z
|
tasks/Scrapy/scrapy_official_newspapers/runner.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 40
|
2020-11-24T06:48:53.000Z
|
2021-04-28T05:20:37.000Z
|
tasks/Scrapy/scrapy_official_newspapers/runner.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 5
|
2020-11-26T08:23:05.000Z
|
2021-04-19T18:08:20.000Z
|
from scrapy.cmdline import execute
execute(['scrapy','crawl', 'elperuano'])
| 37.5
| 40
| 0.76
| 9
| 75
| 6.333333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 75
| 2
| 40
| 37.5
| 0.814286
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
9c11f93e70f9f2e00296ce74bd6f33fd4b703339
| 747
|
py
|
Python
|
Chapter_7_code/build/hector_mapping/cmake/hector_mapping-genmsg-context.py
|
crepuscularlight/ROSbyExample
|
fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c
|
[
"MIT"
] | 1
|
2021-04-23T10:01:22.000Z
|
2021-04-23T10:01:22.000Z
|
Chapter_7_code/build/hector_mapping/cmake/hector_mapping-genmsg-context.py
|
crepuscularlight/ROSbyExample
|
fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c
|
[
"MIT"
] | null | null | null |
Chapter_7_code/build/hector_mapping/cmake/hector_mapping-genmsg-context.py
|
crepuscularlight/ROSbyExample
|
fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c
|
[
"MIT"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_slam/hector_mapping/msg/HectorDebugInfo.msg;/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_slam/hector_mapping/msg/HectorIterData.msg"
services_str = ""
pkg_name = "hector_mapping"
dependencies_str = ""
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "hector_mapping;/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_slam/hector_mapping/msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 62.25
| 251
| 0.813922
| 108
| 747
| 5.351852
| 0.518519
| 0.112457
| 0.103806
| 0.119377
| 0.420415
| 0.420415
| 0.420415
| 0.420415
| 0.420415
| 0.420415
| 0
| 0.02244
| 0.045515
| 747
| 11
| 252
| 67.909091
| 0.788219
| 0.065596
| 0
| 0
| 1
| 0.222222
| 0.708333
| 0.659483
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9c141cbfb146e420f2d73b59b5a80e4a11238b62
| 11,347
|
py
|
Python
|
crop_rotator/strona/models.py
|
Bahusson/crop_rotator
|
c1d86d36ce1867a84b927708f92c62c7815250a4
|
[
"MIT"
] | 1
|
2021-05-08T07:04:45.000Z
|
2021-05-08T07:04:45.000Z
|
crop_rotator/strona/models.py
|
Bahusson/crop_rotator
|
c1d86d36ce1867a84b927708f92c62c7815250a4
|
[
"MIT"
] | 80
|
2020-11-18T20:35:12.000Z
|
2021-06-13T08:08:36.000Z
|
crop_rotator/strona/models.py
|
Bahusson/crop_rotator
|
c1d86d36ce1867a84b927708f92c62c7815250a4
|
[
"MIT"
] | null | null | null |
from django.db import models
# Klasa tłumaczeniowa dla "core"
class PageNames(models.Model):
lang_flag = models.ImageField(upload_to='images') # Mały obrazek języka
lang_flag_id = models.CharField(max_length=20, blank=True, null=True)
headtitle = models.CharField(max_length=200) # Nagłówek strony w tym j
mainpage = models.CharField(max_length=200) # Strona główna w tym języku
all_plants = models.CharField(max_length=200, blank=True, null=True) # Spis roślin
about = models.CharField(max_length=200) # Informacje w tym języku
contact = models.CharField(max_length=200) # Kontakty w tym języku
logout = models.CharField(max_length=200) # Wyloguj
login = models.CharField(max_length=200) # zaloguj
register = models.CharField(max_length=50)
see_more = models.CharField(max_length=200)
my_plans = models.CharField(max_length=200, blank=True, null=True) # Spis roślin
all_plans = models.CharField(max_length=200, blank=True, null=True) # Wszystkie plany
see_more = models.CharField(max_length=200, blank=True, null=True) # Czytaj dalej
of_steps = models.CharField(max_length=200, blank=True, null=True) # Kroków
of_plants = models.CharField(max_length=200, blank=True, null=True) # Czytaj dalej
by_crops = models.CharField(max_length=50, blank=True, null=True) # Rośliny (button)
by_families = models.CharField(max_length=50, blank=True, null=True) # Rodziny (button)
by_tags = models.CharField(max_length=50, blank=True, null=True) # Kategorie (button)
class Meta:
verbose_name_plural = 'Page Names'
# Klasa tłumaczeniowa dla Login/Register i myprofile.
class RegNames(models.Model):
password = models.CharField(max_length=50, blank=True, null=True)
re_password = models.CharField(max_length=50, blank=True, null=True)
name = models.CharField(max_length=50, blank=True, null=True)
refresh = models.CharField(max_length=50, blank=True, null=True)
passwd_too_simple = models.CharField(max_length=250, blank=True, null=True)
register = models.CharField(max_length=50, blank=True, null=True)
class Meta:
verbose_name_plural = 'Registry Names'
# Klasa skórek do naszej apki. Pola nienulowalne.
class PageSkin(models.Model):
themetitle = models.CharField(max_length=200)
position = models.IntegerField()
planimagedefault = models.ImageField(
upload_to='skins', blank=True, null=True)
rotatorlogo_main = models.ImageField(
upload_to='skins', blank=True, null=True)
class Meta:
ordering = ['position']
verbose_name_plural = 'Page Skins'
def __str__(self):
return self.themetitle
# klasa tłumaczeniowa dla strony "o projekcie"
class AboutPageNames(models.Model):
about_project = models.TextField() # Pole tekstowe dla strony about.
send_email = models.CharField(max_length=200) # Wyślij email
gitter = models.CharField(max_length=200) # Adres gittera
github = models.CharField(max_length=200) # Adres github
login_to_see = models.CharField(max_length=200) # zaloguj się by przeglądać
curr_prog_includes = models.CharField(max_length=40, blank=True, null=True) # Obecnie program zawiera bazę
over = models.CharField(max_length=30, blank=True, null=True) # Ponad
plants = models.CharField(max_length=30, blank=True, null=True) # roślin_uprawnych
coming_from = models.CharField(max_length=30, blank=True, null=True) # pochodzących z
families = models.CharField(max_length=30, blank=True, null=True) # rodzin
marked_by = models.CharField(max_length=30, blank=True, null=True) # oznaczonych według
categories = models.CharField(max_length=30, blank=True, null=True) # kategorii
and_over = models.CharField(max_length=30, blank=True, null=True) # i ponad
unique_interactions = models.CharField(max_length=30, blank=True, null=True) # unikalnych interakcji
described_by = models.CharField(max_length=30, blank=True, null=True) # opisanych na podstawie
sources = models.CharField(max_length=30, blank=True, null=True) # źródeł
class Meta:
verbose_name_plural = 'About Page Names'
# klasa tłumaczeniowa dla strony edycji planów.
class RotatorEditorPageNames(models.Model):
new_plan = models.CharField(max_length=200) # dodaj nowy plan
new_step = models.CharField(max_length=200) # Dodaj nowy krok
name_plan = models.CharField(max_length=200) # Nazwa planu
name_step = models.CharField(max_length=200) # Nazwa kroku
plan_remove = models.CharField(max_length=200) # Usuń plan (button)
step_remove = models.CharField(max_length=200) # Usuń krok (button)
remove_warning = models.CharField(max_length=200, blank=True, null=True) # Czy na pewno usunąć? (text)
remove_permanent = models.CharField(max_length=200, blank=True, null=True) # Tak usuń trwale (button)
dont_remove = models.CharField(max_length=200, blank=True, null=True) # Nie usuwaj (button)
editme = models.CharField(max_length=200) # Edytuj
switch_places = models.CharField(max_length=200) # Zamień miejscami (button)
switch_with = models.CharField(max_length=200) # Zamień z krokiem:
switch_text = models.CharField(max_length=200) # Zamień z innym krokiem w planie (text)
u_edit_step_no = models.CharField(max_length=200) # Edytujesz krok nr.
title = models.CharField(max_length=200) # Tytuł
descr = models.CharField(max_length=200) # opis
early_crop = models.CharField(max_length=200) # Wczesny Plon
middle_crop = models.CharField(max_length=200, blank=True, null=True) # Śródplon
late_crop = models.CharField(max_length=200) # late_crop
destroy_early_crop = models.CharField(max_length=200) # Zniszcz na zielony nawóz
add_fertilizer = models.CharField(max_length=200) # Dodaj nawóz
change = models.CharField(max_length=200) # Zachowaj zmiany (button)
publish = models.CharField(max_length=200, blank=True, null=True) # Opublikuj
unpublish = models.CharField(max_length=200, blank=True, null=True) # Wycofaj
publish_text = models.CharField(max_length=200, blank=True, null=True) # Opublikuj swój plan (text)
unpublish_text = models.CharField(max_length=200, blank=True, null=True) # Wycofaj plan z publikacji (text)
publish_onhover = models.CharField(max_length=900, blank=True, null=True) # Wyjaśnienie onhover o publikacji
unpublish_onhover = models.CharField(max_length=900, blank=True, null=True) # Wyjaśnienie onhover o wycofywaniu publikacji
more_info = models.CharField(max_length=900, blank=True, null=True) # więcej informacji (button "info")
option_select = models.CharField(max_length=200, blank=True, null=True) # Wybierz opcję: (dropdown)
in_this_plan = models.CharField(max_length=200, blank=True, null=True) # W tym planie znajduje się
fabs_and = models.CharField(max_length=200, blank=True, null=True) # bobowatych lub strączkowych
should_be_fabs = models.CharField(max_length=200, blank=True, null=True) # Powinno ich być między 25% a 33%
error_len = models.CharField(max_length=200, blank=True, null=True) # Błąd: ten płodozmian jest za krótki.
len_required = models.CharField(max_length=200, blank=True, null=True) # W płodozmianie znajdują się rośliny, które wymagają dłuższego zmianowania.
remove_or_add = models.CharField(max_length=200, blank=True, null=True) # Usuń je i wybierz coś innego, lub dodaj więcej roślin.
plan_limit_reached = models.TextField(blank=True, null=True) # Osiągnięto limit planów
family = models.CharField(max_length=200, blank=True, null=True) # Rodzina
species = models.CharField(max_length=200, blank=True, null=True) # Gatunki
sources = models.CharField(max_length=200, blank=True, null=True) # Źródła
notes = models.CharField(max_length=200, blank=True, null=True) # Uwagi
harms = models.CharField(max_length=200, blank=True, null=True) # Szkodzi
in_step = models.CharField(max_length=200, blank=True, null=True) # W kroku
well_cooperates = models.CharField(max_length=200, blank=True, null=True) # dobrze oddziaływuje na
collides = models.CharField(max_length=200, blank=True, null=True) # Powoduje KOLIZJĘ z
image_source = models.CharField(max_length=200, blank=True, null=True) # Źródło obrazka
add_fertilizer_main = models.CharField(max_length=200, blank=True, null=True) # W tym planie brakuje nawozu z zewnątrz!
infl_type = models.CharField(max_length=100, blank=True, null=True) # typ oddziaływania
companion = models.CharField(max_length=100, blank=True, null=True) # współrzędna
following = models.CharField(max_length=100, blank=True, null=True) # następcza
allelopatic = models.CharField(max_length=150, blank=True, null=True) # allelopatyczna, albo współrzędna i nastepcza
source_button = models.CharField(max_length=50, blank=True, null=True) # Źródło
known_interactions = models.CharField(max_length=200, blank=True, null=True) # Znane interakcje
plant_to_other = models.CharField(max_length=200, blank=True, null=True) # Roślina oddziaływuje na inne
other_to_plant = models.CharField(max_length=200, blank=True, null=True) # Inne oddziaływują na roślinę
family_to_other = models.CharField(max_length=200, blank=True, null=True) # Rodzina oddziaływuje na inne
other_to_family = models.CharField(max_length=200, blank=True, null=True) # Inne oddziaływują na rodzinę
category_to_other = models.CharField(max_length=200, blank=True, null=True) # Kategoria oddziaływuje na inne
other_to_category = models.CharField(max_length=200, blank=True, null=True) # Inne oddziaływują na kategorię
annual = models.CharField(max_length=50, blank=True, null=True) # Jare
perennial = models.CharField(max_length=50, blank=True, null=True) # Ozime
evaluate_button = models.CharField(max_length=50, blank=True, null=True) # Ewaluacja (button)
analysis_by_text = models.CharField(max_length=200, blank=True, null=True) # Analizuje plan pod kątem pozytywnych i negatywnych interakcji, oraz błędów.
remove_element = models.CharField(max_length=50, blank=True, null=True) # Usuń element (button)
add_element = models.CharField(max_length=50, blank=True, null=True) # Dodaj element (button)
return_to_plan = models.CharField(max_length=50, blank=True, null=True) # Powrót do planu (button)
categories = models.CharField(max_length=50, blank=True, null=True) # Kategorie
next_year = models.CharField(max_length=50, blank=True, null=True) # W kolejnym roku
second_year = models.CharField(max_length=50, blank=True, null=True) # W drugim roku
third_year = models.CharField(max_length=50, blank=True, null=True) # W trzecim roku
two_consecutive = models.CharField(max_length=50, blank=True, null=True) # W dwóch kolejnych latach
manure_added = models.CharField(max_length=50, blank=True, null=True) # DODANO OBORNIK
green_manure_destroyed = models.CharField(max_length=50, blank=True, null=True) # ZNISZCZONO NA ZIELONY NAWÓZ
remove_button = models.CharField(max_length=50, blank=True, null=True) # Usuń
wait_button = models.CharField(max_length=50, blank=True, null=True) # Obliczam... button
second_third = models.CharField(max_length=50, blank=True, null=True) # W drugim i trzecim roku
class Meta:
verbose_name_plural = 'Rotator Editor Page Names'
| 68.355422
| 156
| 0.748656
| 1,586
| 11,347
| 5.20681
| 0.228878
| 0.208888
| 0.250666
| 0.334221
| 0.648099
| 0.568903
| 0.521676
| 0.482078
| 0.482078
| 0.270404
| 0
| 0.032121
| 0.146735
| 11,347
| 165
| 157
| 68.769697
| 0.820801
| 0.210805
| 0
| 0.049296
| 0
| 0
| 0.011198
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007042
| false
| 0.021127
| 0.007042
| 0.007042
| 0.943662
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
9c387abdc7101ce84b434e0967e90ea3d11fadd1
| 120
|
py
|
Python
|
tests/openfl/databases/__init__.py
|
brandon-edwards/openfl
|
9e0521252253eab09571dc2be40f46bfeaf9746a
|
[
"Apache-2.0"
] | 297
|
2021-01-13T08:49:35.000Z
|
2022-03-31T15:06:43.000Z
|
tests/openfl/databases/__init__.py
|
brandon-edwards/openfl
|
9e0521252253eab09571dc2be40f46bfeaf9746a
|
[
"Apache-2.0"
] | 265
|
2021-02-02T09:57:33.000Z
|
2022-03-30T22:51:55.000Z
|
tests/openfl/databases/__init__.py
|
brandon-edwards/openfl
|
9e0521252253eab09571dc2be40f46bfeaf9746a
|
[
"Apache-2.0"
] | 81
|
2021-01-18T07:52:36.000Z
|
2022-03-26T18:55:54.000Z
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""tests.openfl.databases package."""
| 30
| 43
| 0.75
| 16
| 120
| 5.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091743
| 0.091667
| 120
| 3
| 44
| 40
| 0.733945
| 0.916667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9c62239602bee7f09d33cb2c02cecbf2bb2ffe6b
| 170
|
py
|
Python
|
nlu/streamlit/01_dashboard.py
|
hatrungduc/spark-nlp-workshop
|
4a4ec0195d1d3d847261df9ef2df7aa5f95bbaec
|
[
"Apache-2.0"
] | 687
|
2018-09-07T03:45:39.000Z
|
2022-03-20T17:11:20.000Z
|
nlu/streamlit/01_dashboard.py
|
hatrungduc/spark-nlp-workshop
|
4a4ec0195d1d3d847261df9ef2df7aa5f95bbaec
|
[
"Apache-2.0"
] | 89
|
2018-09-18T02:04:42.000Z
|
2022-02-24T18:22:27.000Z
|
nlu/streamlit/01_dashboard.py
|
hatrungduc/spark-nlp-workshop
|
4a4ec0195d1d3d847261df9ef2df7aa5f95bbaec
|
[
"Apache-2.0"
] | 407
|
2018-09-07T03:45:44.000Z
|
2022-03-20T05:12:25.000Z
|
import nlu
nlu.enable_streamlit_caching() # Optional caching the models, recommended
nlu.load('ner').viz_streamlit(['I love NLU and Streamlit!','I hate buggy software'])
| 42.5
| 84
| 0.776471
| 25
| 170
| 5.16
| 0.72
| 0.155039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 170
| 3
| 85
| 56.666667
| 0.843137
| 0.235294
| 0
| 0
| 0
| 0
| 0.382813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
9c70db857c23160057be0e2d7842554b41679dd1
| 192
|
py
|
Python
|
sheet/forms.py
|
samarth-singh-thakur/finanza
|
b518d800341ba3ccd2d9e45dd3b309ecfb181289
|
[
"MIT"
] | null | null | null |
sheet/forms.py
|
samarth-singh-thakur/finanza
|
b518d800341ba3ccd2d9e45dd3b309ecfb181289
|
[
"MIT"
] | 5
|
2021-03-30T14:08:55.000Z
|
2021-09-22T19:31:37.000Z
|
sheet/forms.py
|
samarth-singh-thakur/finanza
|
b518d800341ba3ccd2d9e45dd3b309ecfb181289
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Ledger
class LedgerForm(forms.ModelForm):
class Meta:
model = Ledger
fields = ('lender', 'borrower', 'amount','description',)
| 24
| 64
| 0.671875
| 21
| 192
| 6.142857
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213542
| 192
| 8
| 64
| 24
| 0.854305
| 0
| 0
| 0
| 0
| 0
| 0.160622
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
9c7ec81e80295304adad08f8086e6397eddf8190
| 231
|
py
|
Python
|
tests/test_linear_stable_convergence.py
|
dmitry-kabanov/fickettmodel
|
255b1e9cae1cfb7a6b914ad61a17288d52215cc4
|
[
"MIT"
] | null | null | null |
tests/test_linear_stable_convergence.py
|
dmitry-kabanov/fickettmodel
|
255b1e9cae1cfb7a6b914ad61a17288d52215cc4
|
[
"MIT"
] | null | null | null |
tests/test_linear_stable_convergence.py
|
dmitry-kabanov/fickettmodel
|
255b1e9cae1cfb7a6b914ad61a17288d52215cc4
|
[
"MIT"
] | null | null | null |
"""
Test that linear solver convergence with correct rate of convergence
when applied to the problem with stable detonation.
"""
# class TestLinearStableConvergence:
# def test_linear_stable_convergence(self):
# pass
| 23.1
| 68
| 0.757576
| 27
| 231
| 6.37037
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 231
| 9
| 69
| 25.666667
| 0.910053
| 0.930736
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
13391be17287077c23c254a63f27cb22436b4f20
| 4,550
|
py
|
Python
|
augur/datasources/ghtorrent/test_ghtorrent_functions.py
|
parthsharma2/augur
|
6d59c8c80f3c21eb97bfa4ea4817908ea9a7d10b
|
[
"MIT"
] | null | null | null |
augur/datasources/ghtorrent/test_ghtorrent_functions.py
|
parthsharma2/augur
|
6d59c8c80f3c21eb97bfa4ea4817908ea9a7d10b
|
[
"MIT"
] | null | null | null |
augur/datasources/ghtorrent/test_ghtorrent_functions.py
|
parthsharma2/augur
|
6d59c8c80f3c21eb97bfa4ea4817908ea9a7d10b
|
[
"MIT"
] | null | null | null |
import os
import pytest
@pytest.fixture(scope="module")
def ghtorrent():
import augur
augur_app = augur.Application()
return augur_app['ghtorrent']()
def test_repoid(ghtorrent):
assert ghtorrent.repoid('rails', 'rails') >= 1000
def test_userid(ghtorrent):
assert ghtorrent.userid('howderek') >= 1000
"""
Pandas testing format
assert ghtorrent.<function>('owner', 'repo').isin(['<data that should be in dataframe>']).any
The tests check if a value is anywhere in the dataframe
"""
# *** DIVERSITY AND INCLUSION *** #
# *** GROWTH, MATURITY, AND DECLINE *** #
def test_closed_issues(ghtorrent):
assert ghtorrent.closed_issues('cashmusic', 'platform').isin(["2012-11-09T00:00:00.000Z"]).any
def test_code_commits(ghtorrent):
assert ghtorrent.code_commits('facebook', 'folly').isin(["2013-01-07"]).any
def test_code_review_iteration(ghtorrent):
assert ghtorrent.code_review_iteration('apache', 'spark').isin(["2015-05-22T00:00:00.000Z"]).any
def test_contribution_acceptance(ghtorrent):
assert ghtorrent.contribution_acceptance('rails', 'rails').isin(["2012-05-16T00:00:00.000Z"]).any
def test_contributing_github_organizations(ghtorrent):
assert ghtorrent.contributing_github_organizations('rails', 'rails').isin(["4066"]).any
def test_first_response_to_issue_duration(ghtorrent):
assert ghtorrent.first_response_to_issue_duration('AudioKit', 'AudioKit').isin(["13000839"]).any
def test_forks(ghtorrent):
assert ghtorrent.forks('facebook', 'hiphop-php').isin(["2012-01-08"]).any
def test_maintainer_response_to_merge_request_duration(ghtorrent):
assert ghtorrent.maintainer_response_to_merge_request_duration('rails', 'rails').isin(["2011-05-10T00:00:00.000Z"]).any
def test_new_contributing_github_organizations(ghtorrent):
assert ghtorrent.new_contributing_github_organizations('rails', 'rails').isin(["4066"]).any
def test_open_issues(ghtorrent):
assert ghtorrent.open_issues('mongodb', 'mongo').isin(["2013-01-05"]).any
def test_pull_request_comments(ghtorrent):
assert ghtorrent.pull_request_comments('rails', 'rails').isin(["2011-11-15T00:00:00.000Z"]).any
def test_pull_requests_open(ghtorrent):
assert ghtorrent.pull_requests_open('rails', 'rails').isin(["2013-01-09T00:00:00.000Z"]).any
def test_pull_requests_closed(ghtorrent):
assert ghtorrent.pull_requests_closed('rails', 'rails').isin(["2013-01-09T00:00:00.000Z"]).any
def test_pull_request_comment_duration(ghtorrent):
assert ghtorrent.pull_request_comment_duration('AudioKit', 'AudioKit').isin(["13000839"]).any
# *** RISK *** #
# *** VALUE *** #
# *** ACTIVITY *** #
def test_watchers(ghtorrent):
assert ghtorrent.watchers('rails', 'rails').isin(["2017-08-23T00:00:00.000Z"]).any
def test_issue_comments(ghtorrent):
assert ghtorrent.issue_comments('rails', 'rails').isin(["2009-04-05T00:00:00.000Z"]).any
# *** EXPERIMENTAL *** #
def test_commits100(ghtorrent):
assert ghtorrent.commits100('rails', 'rails').isin(["2017-08-13T00:00:00.000Z"]).any
def test_commit_comments(ghtorrent):
assert ghtorrent.commit_comments('rails', 'rails').isin(["2008-07-10T00:00:00.000Z"]).any
def test_committer_locations(ghtorrent):
assert ghtorrent.committer_locations('mavam', 'stat-cookbook').isin(["Berkeley, CA"]).any
def test_total_committers(ghtorrent):
assert ghtorrent.total_committers('rails', 'rails').isin(["2004-11-24T00:00:00.000Z"]).any
def test_total_watchers(ghtorrent):
assert ghtorrent.total_watchers('rails', 'rails').isin(["2005-08-26T00:00:00.000Z"]).any
def test_issue_activity(ghtorrent):
assert ghtorrent.issue_activity('bitcoin', 'bitcoin').isin(["2010-12-20T00:00:00.000Z"]).any
def test_pull_acceptance_rate(ghtorrent):
assert ghtorrent.pull_request_acceptance_rate('akka', 'akka').isin([0.5]).any
# def test_community_age(ghtorrent):
# assert ghtorrent.community_age('TEST', 'TEST').isin(["DATE"]).any
def test_community_engagement(ghtorrent):
assert ghtorrent.community_engagement('rails', 'rails').isin(["2010-09-11T00:00:00.000Z"]).any
def test_contributions(ghtorrent):
assert ghtorrent.contributions('ariya', 'phantomjs').isin(["ariya"]).any
def test_contributors(ghtorrent):
assert ghtorrent.contributors('TTimo', 'doom3.gpl').isin(["sergiocampama"]).any
def test_project_age(ghtorrent):
assert ghtorrent.project_age('rails', 'rails').isin(["2008-04-11T00:00:00.000Z"]).any
def test_fakes(ghtorrent):
assert ghtorrent.fakes('rails', 'rails').isin(["2008-09-24T00:00:00.000Z"]).any
| 35
| 123
| 0.738901
| 602
| 4,550
| 5.395349
| 0.252492
| 0.147783
| 0.229064
| 0.057574
| 0.334667
| 0.237993
| 0.133313
| 0.07697
| 0.065271
| 0.065271
| 0
| 0.085756
| 0.092747
| 4,550
| 129
| 124
| 35.271318
| 0.701066
| 0.053846
| 0
| 0
| 0
| 0
| 0.20873
| 0.099488
| 0
| 0
| 0
| 0
| 0.447761
| 1
| 0.462687
| false
| 0
| 0.044776
| 0
| 0.522388
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
134f15445e7302c0e86c87de872ff2ca3ad4311e
| 134
|
py
|
Python
|
my_code.py
|
Athenian-Computer-Science/dictionary-notes-assignment-template
|
435b0c312ba9813d32c166ccea4e1e41ab8f8bcd
|
[
"Apache-2.0"
] | null | null | null |
my_code.py
|
Athenian-Computer-Science/dictionary-notes-assignment-template
|
435b0c312ba9813d32c166ccea4e1e41ab8f8bcd
|
[
"Apache-2.0"
] | null | null | null |
my_code.py
|
Athenian-Computer-Science/dictionary-notes-assignment-template
|
435b0c312ba9813d32c166ccea4e1e41ab8f8bcd
|
[
"Apache-2.0"
] | null | null | null |
# Use this to take notes on the Edpuzzle video. Try each example rather than just watching it - you will get much more out of it!
#
| 44.666667
| 129
| 0.738806
| 25
| 134
| 3.96
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223881
| 134
| 2
| 130
| 67
| 0.951923
| 0.947761
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1369d4c49e09e1f502eaa09062aedc111a1e000f
| 85
|
py
|
Python
|
checkov/bicep/checks/param/registry.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | null | null | null |
checkov/bicep/checks/param/registry.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | null | null | null |
checkov/bicep/checks/param/registry.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | null | null | null |
from checkov.bicep.checks.param.base_registry import Registry
registry = Registry()
| 21.25
| 61
| 0.823529
| 11
| 85
| 6.272727
| 0.727273
| 0.463768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 85
| 3
| 62
| 28.333333
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
13913f6d784e5527d7e979b177b5c67feb8ea7ae
| 33,122
|
py
|
Python
|
src/visualization/MakeVeraReport.py
|
tsdataclinic/Vera
|
3d8f6545b689355f2cf3f4c267145e7639a89b23
|
[
"MIT"
] | 4
|
2020-03-11T20:34:03.000Z
|
2022-03-15T13:47:00.000Z
|
src/visualization/MakeVeraReport.py
|
Smarker/Vera
|
9a1eec490f970a9bf128c92ef4ff6d0e36ed114d
|
[
"MIT"
] | 7
|
2020-03-31T11:10:33.000Z
|
2021-11-18T20:06:56.000Z
|
src/visualization/MakeVeraReport.py
|
Smarker/Vera
|
9a1eec490f970a9bf128c92ef4ff6d0e36ed114d
|
[
"MIT"
] | 4
|
2020-03-11T20:16:11.000Z
|
2021-06-03T00:21:16.000Z
|
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('/home/vera0519/vera_911')
import pandas as pd
# import cenpy
from slugify import slugify
from pathlib import Path
import src.features.call_types as call_types
from src.cities.new_orleans import NewOrleans
from src.cities.seattle import Seattle
from src.cities.dallas import Dallas
from src.cities.detroit import Detroit
from src.cities.charleston import Charleston
import matplotlib.pyplot as plt
import src.features.geo as Geo
from src.features.call_types import load_call_mappings, assign_disposition, process
import src.visualization.visualize as vis
BASE_MAP_DIR = Path('/home/vera0519/vera_911/reports/VeraExport/Maps')
BASE_CHARTS_DIR = Path('/home/vera0519/vera_911/reports/VeraExport/Charts')
new_orleans = NewOrleans()
dallas = Dallas()
seattle = Seattle()
detroit = Detroit()
charleston = Charleston()
new_orleans.clean_data(reload=True)
dallas.clean_data(reload=True)
detroit.clean_data(reload=True)
charleston.clean_data(reload=True);
seattle.clean_data(reload=True);
### SUMMARIES
def summary_office_initiated(city):
return (city.clean_data()
.groupby(['year','self_initiated'])
.count()
.reset_index()
.pivot_table(index='year', columns='self_initiated', values='day_of_week')
.assign(total=lambda x: x.sum(axis=1))
.assign(percent_other = lambda x: 100*x.No/x.total,
pecent_self_initaited = lambda x: 100*x.Yes/x.total)
.rename(columns={'No' : 'other', 'Yes': 'self_initiated'}))
def summary_by_type(city):
return (city.clean_data()
.loc[lambda x: x.year.isin([2014,2015,2016,2018,2019])]
.groupby(['year','self_initiated'])
.count()
.reset_index()
.pivot_table(index='year', columns='self_initiated', values='day_of_week')
.assign(total=lambda x: x.sum(axis=1))
.assign(percent_other = lambda x: 100*x.No/x.total,
pecent_self_initaited = lambda x: 100*x.Yes/x.total)
.rename(columns={'No' : 'other', 'Yes': 'self_initiated'}))
# .assign(percent_other = lambda x: 100*x.No/x.total,
# pecent_self_initaited = lambda x: 100*x.Yes/x.total)
# .rename(columns={'No' : 'other', 'Yes': 'self_initiated'}))
def summary_by_type_disposition(city, percentage=False):
return (city.clean_data()
.loc[lambda x: x.year.isin([2014,2015,2016,2018,2019])]
.rename(columns={'year': 'Year', 'call_type': 'Incident Type','disposition':"Disposition"})
.groupby(['Year','Incident Type','Disposition'])
.count()[['index']]
.rename(columns= {'index' : 'Frequency'})
.reset_index()
.pivot_table(index=['Year','Incident Type'], columns='Disposition', values='Frequency')
.fillna(0)
.pipe(lambda x: 100* x.div(x.sum(axis=1),axis =0 ) if percentage else x)
.rename(columns = lambda x: "Percent {}".format(x) if percentage else "Frequency {}".format(x))
)
def full_summary_by_type_disposition(city):
return pd.merge(
summary_by_type_disposition(new_orleans, percentage=False),
summary_by_type_disposition(new_orleans, percentage=True),
left_index=True,
right_index=True
)
def summary_of_priority(city, percentage = False):
return (city.clean_data()
.rename(columns= {'year': 'Year', 'priority': 'Priority'})
.groupby(['Year','Priority'])
.count()
[['index']]
.rename(columns={'index': 'Frequency'})
.reset_index()
.pivot_table(index='Year', columns='Priority',values='Frequency')
.pipe(lambda x: 100* x.div(x.sum(axis=1),axis =0 ) if percentage else x)
.fillna(0)
.rename(columns = lambda x: "Percent {}".format(x) if percentage else "Frequency {}".format(x))
)
### MAPS
def generate_per_capita_maps(geometry='tract'):
plot_dir = BASE_MAP_DIR / "call_volumne_per_capita"
plot_dir.mkdir(exist_ok=True)
figsize = (10,10)
for city in [new_orleans,dallas,detroit,charleston]:
tracts = city.load_tracts().to_crs(vis.map_crs)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles')
tracts.plot(color='none', edgecolor='red', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-all_CFS.png'.format(city.BASE_NAME, 'calls_per_capita'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles',year=year)
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
ax.set_title('{} / calls per capita / {} / {}'.format(city.BASE_NAME, 'All Years', 'All CFS'))
fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'calls_per_capita',year))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles',year=year)
ax.set_title('{} / calls per capita / {} / {}'.format(city.BASE_NAME, year, 'All CFS'))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'calls_per_capita',year))
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles',call_type=cfs)
ax.set_title('{} / calls per capita / {} / {}'.format(city.BASE_NAME, 'All Years', cfs))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME, 'calls_per_capita',slugify(cfs)))
for year in city.USE_YEARS:
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_call_volume(city, norm_by='capita', ax=ax, scheme='percentiles',year=year,call_type=cfs)
ax.set_title('{} / calls per capita / {} / {}'.format(city.BASE_NAME, year, cfs))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME,'calls_per_capita',slugify(cfs)))
def generate_self_initiated_fraction_maps(geometry='tract'):
plot_dir = BASE_MAP_DIR / "officer_initiated"
plot_dir.mkdir(exist_ok=True)
figsize = (10,10)
for city in [new_orleans,dallas,detroit,charleston]:
tracts = city.load_tracts().to_crs(vis.map_crs)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_self_initiated(city, norm_by='total', ax=ax)
tracts.plot(color='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-all_CFS.png'.format(city.BASE_NAME, 'officer_initiated_fraction'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_self_initiated(city, norm_by='total', ax=ax, year=year)
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
ax.set_title('{} / officer initiated fraction / {} / {}'.format(city.BASE_NAME, 'All Years', 'All CFS'))
fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'officer_initiated_fraction',year))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_self_initiated(city, norm_by='total', ax=ax,year=year)
ax.set_title('{} / officer initiated fraction / {} / {}'.format(city.BASE_NAME, year, 'All CFS'))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'officer_initiated_fraction',year))
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_self_initiated(city, norm_by='total', ax=ax,call_type=cfs)
ax.set_title('{} / officer initiated fraction / {} / {}'.format(city.BASE_NAME, 'All Years', cfs))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME, 'officer_initiated_fraction',slugify(cfs)))
for year in city.USE_YEARS:
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_self_initiated(city, norm_by='total', ax=ax,year=year,call_type=cfs)
ax.set_title('{} / officer initiated fraction / {} / {}'.format(city.BASE_NAME, year, cfs))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME,'officer_initiated_fraction',slugify(cfs)))
def generate_enforcement_action_maps(geometry='tract'):
plot_dir = BASE_MAP_DIR / "enforcement_action"
plot_dir.mkdir(exist_ok=True)
figsize = (10,10)
for city in [new_orleans,dallas,detroit,charleston]:
tracts = city.load_tracts().to_crs(vis.map_crs)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_enforcement_by_tract(city, ax=ax)
tracts.plot(color='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-all_CFS.png'.format(city.BASE_NAME, 'enforcement_action'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_enforcement_by_tract(city, ax=ax, year=year)
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
ax.set_title('{} / Enforcement Action / {} / {}'.format(city.BASE_NAME, 'All Years', 'All CFS'))
fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'enforcement_action',year))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_enforcement_by_tract(city, ax=ax,year=year)
ax.set_title('{} / Enforcement Action / {} / {}'.format(city.BASE_NAME, year, 'All CFS'))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'enforcement_action',year))
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_enforcement_by_tract(city, ax=ax,call_type=cfs)
ax.set_title('{} / Enforcement Action / {} / {}'.format(city.BASE_NAME, 'All Years', cfs))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME, 'enforcement_action',slugify(cfs)))
for year in city.USE_YEARS:
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_enforcement_by_tract(city, ax=ax,year=year,call_type=cfs)
ax.set_title('{} / Enforcement Action / {} / {}'.format(city.BASE_NAME, year, cfs))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME,'enforcement_action',slugify(cfs)))
def generate_response_time_maps(geometry='tract'):
plot_dir = BASE_MAP_DIR / "median_response_time"
plot_dir.mkdir(exist_ok=True)
figsize = (10,10)
for city in [new_orleans,dallas,detroit,charleston]:
tracts = city.load_tracts().to_crs(vis.map_crs)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_median_response_time(city, ax=ax)
tracts.plot(color='none', edgecolor='black', ax=ax, scheme='percentiles')
fig.savefig(plot_dir / '{}-{}-tracts-all_years-all_CFS.png'.format(city.BASE_NAME, 'median_response_time'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_median_response_time(city, ax=ax, year=year, scheme='percentiles')
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
ax.set_title('{} / Median Response Time / {} / {}'.format(city.BASE_NAME, 'All Years', 'All CFS'))
fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'median_response_time',year))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_median_response_time(city, ax=ax,year=year, scheme='percentiles')
ax.set_title('{} / Median Response Time / {} / {}'.format(city.BASE_NAME, year, 'All CFS'))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-{}-all_CFS.png'.format(city.BASE_NAME, 'median_response_time',year))
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_median_response_time(city, ax=ax,call_type=cfs, scheme='percentiles')
ax.set_title('{} / Median Response Time / {} / {}'.format(city.BASE_NAME, 'All Years', cfs))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME, 'median_response_time',slugify(cfs)))
for year in city.USE_YEARS:
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.map_median_response_time(city, ax=ax,year=year,call_type=cfs, scheme='percentiles')
ax.set_title('{} / Median Response Time / {} / {}'.format(city.BASE_NAME, year, cfs))
tracts.plot(facecolor='none', edgecolor='black', ax=ax)
fig.savefig(plot_dir / '{}-{}-tracts-all_years-{}.png'.format(city.BASE_NAME,'median_response_time',slugify(cfs)))
def generate_disposition_by_CFS():
plot_dir = BASE_CHARTS_DIR / 'disposition_by_CFS'
plot_dir.mkdir(exist_ok=True)
figsize=(15,10)
for city in [new_orleans,dallas,detroit,charleston,seattle]:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_disposition_fraction_by_call_Type(city, ax=ax)
ax.set_xlim(0,1)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('{} - {}'.format(city.BASE_NAME,'All Years'))
plt.tight_layout()
fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'disposition_by_CFS'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_disposition_fraction_by_call_Type(city, ax=ax, year=year)
ax.set_xlim(0,1)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('{} - {}'.format(city.BASE_NAME,year))
plt.tight_layout()
fig.savefig(plot_dir / '{}-{}-{}.png'.format(city.BASE_NAME, 'disposition_by_CFS',year))
def self_initiated_by_CFS():
plot_dir = BASE_CHARTS_DIR / 'officer_initiated_by_CFS'
plot_dir.mkdir(exist_ok=True)
figsize=(15,10)
for city in [new_orleans,dallas,detroit,charleston,seattle]:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_self_initated_by_call_type(city, ax=ax)
ax.set_xlim(0,1)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('{} - {}'.format(city.BASE_NAME,'All Years'))
plt.tight_layout()
fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'officer_initiated_by_CFS'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_self_initated_by_call_type(city, ax=ax, year=year)
ax.set_xlim(0,1)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('{} - {}'.format(city.BASE_NAME,year))
plt.tight_layout()
fig.savefig(plot_dir / '{}-{}-{}.png'.format(city.BASE_NAME, 'officer_initiated_by_CFS',year))
### Plots
def generate_CFS_breakdown():
plot_dir = BASE_CHARTS_DIR / 'CFS_breakdown'
plot_dir.mkdir(exist_ok=True)
figsize=(10,10)
for city in [new_orleans,dallas,detroit,charleston,seattle]:
print('Doint city {}'.format(city.BASE_NAME))
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_cfs_breakdown(city)
fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'cfs_breakdown'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_cfs_breakdown(city,year=year)
fig.savefig(plot_dir / '{}-{}-{}.png'.format(city.BASE_NAME, 'cfs_breakdown',year))
def generate_disposition_by_CFS():
plot_dir = BASE_CHARTS_DIR / 'disposition_by_CFS'
plot_dir.mkdir(exist_ok=True)
figsize=(15,10)
for city in [new_orleans,dallas,detroit,charleston,seattle]:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_disposition_fraction_by_call_Type(city, ax=ax)
ax.set_xlim(0,1)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('{} - {}'.format(city.BASE_NAME,'All Years'))
plt.tight_layout()
fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'disposition_by_CFS'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_disposition_fraction_by_call_Type(city, ax=ax, year=year)
ax.set_xlim(0,1)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('{} - {}'.format(city.BASE_NAME,year))
plt.tight_layout()
fig.savefig(plot_dir / '{}-{}-{}.png'.format(city.BASE_NAME, 'disposition_by_CFS',year))
## Quantile based breakdowns
def call_type_breakdown_by_quantile(data, variable, quantile_range, ax=None, subset=None ):
try:
agg = (data[
(data[variable] > data[variable].quantile(quantile_range[0])) & (data[variable] < data[variable].quantile(quantile_range[1]))
]
.groupby('call_type')
.count()[['index']]
.assign(total = lambda x : x['index'].sum())
.pipe(lambda x: 100*x.div(x.total,axis=0))
['index'].sort_values()
)
return agg.plot(kind='barh',ax=ax)
except:
ax.text(0.5,0.5,'No data', horizontalalignment='center', verticalalignment='center')
ax.set_axis_off()
return ax
def call_type_breakdown_by_quantile_percent(data,variable,breakdown, quantile_range,ax=None, subset=None):
try:
ax = (data[
(data[variable] > data[variable].quantile(quantile_range[0])) & (data[variable] < data[variable].quantile(quantile_range[1]))
]
.groupby(['call_type', breakdown])
.count()[['index']]
.reset_index()
.pivot_table(index='call_type', values='index', columns=breakdown)
.assign(total = lambda x: x.sum(axis=1))
.sort_values(by='total')
.pipe(lambda x: 100*x.div(x.total,axis=0))
.drop('total', axis=1)
.plot(kind='barh', stacked=True,ax=ax)
# .assign(total = lambda x : x['index'].sum())
# ['index'].sort_values().plot(kind='barh')
)
return ax
except:
ax.text(0.5,0.5,'No data', horizontalalignment='center', verticalalignment='center')
ax.set_axis_off()
return ax
# subset = ['Violent Crime','Suspicion','Drugs','Sex Offenses', 'Domestic Violence', 'Property Crime ']
def plot_demographic_breakdown_summary(city,variable,path,year=None):
fig, axs= plt.subplots(nrows=3,ncols=2, sharey='row', figsize=(20,15))
data = (city.filter_calls_by(year=year)
.pipe(city.assign_demographics))
axs= axs.flatten()
call_type_breakdown_by_quantile(data, variable, quantile_range=[0,0.1],ax=axs[0], subset=subset)
axs[0].set_xlim(0,8.5)
axs[0].set_title('Lowest 10% by median income')
axs[0].set_xlabel('')
axs[0].set_ylabel('')
call_type_breakdown_by_quantile_percent(data, variable, 'self_initiated', [0,0.1],ax=axs[2], subset=subset)
axs[2].set_xlim(0,100)
axs[2].set_ylabel('% of CFS type by officer initiated')
axs[2].set_title('')
axs[2].get_legend().remove()
call_type_breakdown_by_quantile_percent(data, variable, 'disposition', [0,0.1],ax=axs[4], subset=subset)
axs[4].set_xlim(0,100)
axs[4].set_ylabel('% of CFS type by disposition')
axs[4].set_title('')
axs[4].get_legend().remove()
call_type_breakdown_by_quantile(data, variable, quantile_range=[0.9,1.0],ax=axs[1], subset=subset)
axs[1].set_xlim(0,8.5)
axs[1].set_title('Highest 10% by median income')
axs[1].set_xlabel('')
axs[1].set_ylabel('highest 10% by median income')
max_x = max(axs[1].get_xlim()[1], axs[0].get_xlim()[0])
axs[0].set_xlim(0,max_x)
axs[1].set_xlim(0,max_x)
call_type_breakdown_by_quantile_percent(data, variable, 'self_initiated', [0.9,1.0],ax=axs[3], subset=subset)
axs[3].set_xlim(0,100)
axs[3].set_ylabel('')
axs[3].legend(loc='upper right',title='',bbox_to_anchor=(1.25,1.0), labels=["Other", 'Officer Initiated'])
call_type_breakdown_by_quantile_percent(data, variable, 'disposition', [0.9,1.0],ax=axs[5], subset=subset)
axs[5].set_xlim(0,100)
axs[5].set_ylabel('')
axs[5].legend(loc='upper right', title='',bbox_to_anchor=(1.3,1.0),fontsize='x-large')
plt.suptitle("{} - {} - {}".format(city.BASE_NAME, variable,year if year else 'All Years'))
plt.tight_layout(rect=[0,.98,0,1])
fig.savefig(path / "{}-{}-{}-{}.png".format(city.BASE_NAME,'demographic_quantiles',variable,year if year else 'All Years'))
def generate_response_time_by_CFS():
plot_dir = BASE_CHARTS_DIR / 'response_time'
plot_dir.mkdir(exist_ok=True)
figsize=(15,10)
for city in [new_orleans,dallas,detroit,charleston,seattle]:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_response_time_dist(city, ax=ax)
ax.set_title('{} - {}'.format(city.BASE_NAME,'All Years'))
fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'response_time'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_response_time_dist(city, ax=ax, year=year)
ax.set_title('{} - {} - {} - {}'.format(city.BASE_NAME,'response_time',year, 'All CFS'))
fig.savefig(plot_dir / '{}-{}-{}-{}.png'.format(city.BASE_NAME, 'response_time',year,'All CFS'))
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_response_time_dist(city, ax=ax, call_type=cfs)
ax.set_title('{} - {} - {} - {}'.format(city.BASE_NAME,'response_time','All Years', cfs))
fig.savefig(plot_dir / '{}-{}-{}-{}.png'.format(city.BASE_NAME, 'response_time','all_years',slugify(cfs)))
for year in city.USE_YEARS:
for cfs in city.clean_data().call_type.unique():
if(cfs != None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_response_time_dist(city, ax=ax, year=year)
ax.set_title('{} - {} - {} - {}'.format(city.BASE_NAME,'response_time',year, cfs))
fig.savefig(plot_dir / '{}-{}-{}-{}.png'.format(city.BASE_NAME, 'response_time',year,slugify(cfs)))
def generate_percentile_comparisons():
for city in [new_orleans,dallas,detroit,charleston]:
demos = ['pc_white','pc_black','pc_occupied_homes','median_income','median_rent','percent_income_spent_on_rent' ]
out_dir = BASE_CHARTS_DIR / 'demographics_comparison'
out_dir.mkdir(exist_ok=True)
for demo in demos:
plot_demographic_breakdown_summary(new_orleans,demo,out_dir)
for year in city.USE_YEARS:
plot_demographic_breakdown_summary(new_orleans,demo,out_dir,year=year)
def generate_officer_initiated_by_CFS():
plot_dir = BASE_CHARTS_DIR / 'officer_initiated_by_CFS'
plot_dir.mkdir(exist_ok=True)
figsize=(15,10)
for city in [new_orleans,dallas,detroit,charleston,seattle]:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_self_initated_by_call_type(city, ax=ax)
ax.set_xlim(0,1)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('{} - {}'.format(city.BASE_NAME,'All Years'))
plt.tight_layout()
fig.savefig(plot_dir / '{}-{}.png'.format(city.BASE_NAME, 'officer_initiated_by_CFS'))
for year in city.USE_YEARS:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vis.plot_self_initated_by_call_type(city, ax=ax, year=year)
ax.set_xlim(0,1)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('{} - {}'.format(city.BASE_NAME,year))
plt.tight_layout()
fig.savefig(plot_dir / '{}-{}-{}.png'.format(city.BASE_NAME, 'officer_initiated_by_CFS',year))
## Correlation coefficent plots
def make_tract_metrics(city, call_type=None, year=None):
col_volume = (city.call_volume_by_tract(norm_by='capita',call_type=call_type, year=year)
.dropna()
.drop('geometry',axis=1)).rename(columns={'calls':"calls_per_capita_per_year"})
try:
enforcement_fraction = city.disposition_by_tract(norm_by='total', call_type=call_type, year=year)[city.ENFORCEMENT_VARIABLES].sum(axis=1)
except:
enforcement_fraction = np.nan
try:
officer_initated_fraction = city.self_initated_by_tract(norm_by='total', call_type=call_type, year=year)['Yes']
except:
officer_initated_fraction = np.nan
return city.assign_demographics(col_volume.assign(enforcement_fraction = enforcement_fraction, officer_initated_fraction=officer_initated_fraction))
def make_corr_plot_for_city(data, city, cut='All',metrics=['calls_per_capita_per_year', 'enforcement_fraction', 'officer_initated_fraction']):
demos = ['pc_asian','pc_black','pc_employed', 'pc_hispanic', 'pc_occupied_homes', 'pc_white','median_income', 'median_rent']
corr_data = data.copy().drop(['state','county','tract','geometry'],axis=1)
corr_data = corr_data.dropna(axis=1, how='all')
corr_data[corr_data <0 ] = None
corr_data = corr_data.dropna(how='any',axis=0)
result = []
for metric in metrics:
if(metric in corr_data.columns):
for demo in demos:
result.append( { 'c': corr_data[metric].corr(corr_data[demo]), 'city' : city, 'metric': metric,'demo':demo})
return pd.DataFrame(result)
def generate_correlation_coefficent_plots():
for call_type in new_orleans.clean_data().call_type.unique():
all_data = pd.DataFrame()
for city in [new_orleans,detroit,dallas,charleston]:
tract_data = make_tract_metrics(city, call_type=call_type).set_index('GEOID')
all_data = all_data.append(make_corr_plot_for_city(tract_data, city.BASE_NAME ))
# g= sns.PairGrid(all_data, x_vars = ['c','metric','city'], y_vars=['demo'], height=10, aspect=.25)
plt.figure(figsize=(10,5))
demo_names =['% Asian', '% Black', '% Hispanic', '% White', '% Employed', 'Median Income', 'Median rent', '% Occupied homes']
for index, demo in enumerate(['pc_asian','pc_black','pc_hispanic','pc_white','pc_employed', 'median_income', 'median_rent','pc_occupied_homes']):
plt.subplot(2,4,index+1 )
ax = sns.stripplot( data=all_data[all_data.demo == demo], x='c',y='metric', hue='city')
ax.set_xlim(-1,1)
if(index!=0 and index!=4):
ax.set_yticklabels([])
else:
ax.set_yticklabels(['Calls per capita per year','Enforcement fraction', 'Officer initiated'])
plt.axvline(x=0)
if(index == 3):
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
ax.get_legend().remove()
ax.set_ylabel('')
ax.set_xlabel('Correlation coefficent')
plt.title(demo_names[index])
plt.tight_layout(rect=[0,0.03,1,0.95])
plt.suptitle(call_type)
plt.savefig(plot_dir / slugify(call_type))
if __name__ == "__main__":
with pd.ExcelWriter('../../reports/CityPriorityBreakdown.xlsx') as writer:
summary_of_priority(new_orleans).to_excel(writer,sheet_name='New Orleans')
summary_of_priority(dallas).to_excel(writer,sheet_name='Dallas')
summary_of_priority(detroit).to_excel(writer,sheet_name='Detroit')
summary_of_priority(seattle).to_excel(writer,sheet_name='Seattle')
summary_of_priority(charleston).to_excel(writer,sheet_name='Charleston')
with pd.ExcelWriter('../../reports/CityCallTypeDisposition.xlsx') as writer:
full_summary_by_type_disposition(new_orleans).to_excel(writer,sheet_name='New Orleans')
full_summary_by_type_disposition(dallas).to_excel(writer,sheet_name='Dallas')
full_summary_by_type_disposition(detroit).to_excel(writer,sheet_name='Detroit')
full_summary_by_type_disposition(charleston).to_excel(writer,sheet_name='Charleston')
full_summary_by_type_disposition(seattle).to_excel(writer, sheet_name='Seattle')
with pd.ExcelWriter('../../reports/CityOfficerInitiatedSummary.xlsx') as writer:
summary_office_initiated(new_orleans).to_excel(writer,sheet_name='New Orleans', merge_cells=False)
summary_office_initiated(dallas).to_excel(writer,sheet_name='Dallas')
summary_office_initiated(detroit).to_excel(writer,sheet_name='Detroit', merge_cells=False)
summary_office_initiated(seattle).to_excel(writer,sheet_name='Seattle')
summary_office_initiated(charleston).to_excel(writer,sheet_name='Charleston')
with pd.ExcelWriter('../../reports/CityCallTypeSummary.xlsx') as writer:
summary_by_type(new_orleans).to_excel(writer,sheet_name='New Orleans')
summary_by_type(dallas).to_excel(writer,sheet_name='Dallas')
summary_by_type(detroit).to_excel(writer,sheet_name='Detroit')
summary_by_type(seattle).to_excel(writer,sheet_name='Seattle')
summary_by_type(charleston).to_excel(writer,sheet_name='Chrleston')
generate_per_capita_maps();
generate_self_initiated_fraction_maps();
generate_enforcement_action_maps();
generate_response_time_maps();
generate_CFS_breakdown()
generate_response_time_by_CFS()
generate_disposition_by_CFS()
generate_officer_initiated_by_CFS()
generate_percentile_comparisons()
generate_correlation_coefficent_plots()
| 48.852507
| 154
| 0.622124
| 4,356
| 33,122
| 4.493802
| 0.070018
| 0.026973
| 0.04046
| 0.05977
| 0.77645
| 0.727356
| 0.710294
| 0.683423
| 0.641788
| 0.618851
| 0
| 0.017184
| 0.2287
| 33,122
| 678
| 155
| 48.852507
| 0.749051
| 0.017541
| 0
| 0.543011
| 0
| 0
| 0.149877
| 0.040375
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.02509
| null | null | 0.001792
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
13aac356c77623518d4aea58569f898ef86d3338
| 440
|
py
|
Python
|
tests/test_exlo_data.py
|
ovinc/exlo_data
|
d09290fdabb3cd3503a80891682833e58f9788f0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_exlo_data.py
|
ovinc/exlo_data
|
d09290fdabb3cd3503a80891682833e58f9788f0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_exlo_data.py
|
ovinc/exlo_data
|
d09290fdabb3cd3503a80891682833e58f9788f0
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests for the exlo module."""
# Standard library
from pathlib import Path
# Non standard
import pytest
import exlo_data
basefolder = Path(exlo_data.__file__).parent
def test_exlo():
"""Check that all required files are present."""
assert (basefolder / 'config.json').exists()
assert (basefolder / 'users.json').exists()
assert (basefolder / 'projects.json').exists()
assert (basefolder / 'setups.json').exists()
| 23.157895
| 52
| 0.7
| 54
| 440
| 5.574074
| 0.611111
| 0.212625
| 0.159468
| 0.259136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163636
| 440
| 18
| 53
| 24.444444
| 0.817935
| 0.227273
| 0
| 0
| 0
| 0
| 0.137195
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.111111
| false
| 0
| 0.333333
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
13e3c5f761d01cf26f003cdc521e8ee7a7754aef
| 75
|
py
|
Python
|
python/anonymous-functions.py
|
ThePeeps191/code-snippets
|
f4f6ffa58747433f13e6d512d51c10f1e296c104
|
[
"MIT"
] | 1
|
2022-01-20T04:20:17.000Z
|
2022-01-20T04:20:17.000Z
|
python/anonymous-functions.py
|
ThePeeps191/code-snippets
|
f4f6ffa58747433f13e6d512d51c10f1e296c104
|
[
"MIT"
] | null | null | null |
python/anonymous-functions.py
|
ThePeeps191/code-snippets
|
f4f6ffa58747433f13e6d512d51c10f1e296c104
|
[
"MIT"
] | null | null | null |
def my_function(function):
function()
print(my_function(lambda : 99))
| 15
| 31
| 0.72
| 10
| 75
| 5.2
| 0.6
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.146667
| 75
| 4
| 32
| 18.75
| 0.78125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
13f8849ca57a939013ea96f793d675b22a57c27f
| 248
|
py
|
Python
|
rudalle_paddle/packages/einops/__init__.py
|
HighCWu/ru-dalle-paddle
|
742d7002b71a5e166fb4dee854524c7c44d20cf1
|
[
"Apache-2.0"
] | 18
|
2021-11-22T16:30:07.000Z
|
2022-03-09T07:59:05.000Z
|
rudalle_paddle/packages/einops/__init__.py
|
AgentMaker/ru-dalle-paddle
|
742d7002b71a5e166fb4dee854524c7c44d20cf1
|
[
"Apache-2.0"
] | null | null | null |
rudalle_paddle/packages/einops/__init__.py
|
AgentMaker/ru-dalle-paddle
|
742d7002b71a5e166fb4dee854524c7c44d20cf1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'Alex Rogozhnikov'
__version__ = '0.3.2'
from .einops import rearrange, reduce, repeat, parse_shape, asnumpy, EinopsError
__all__ = ['rearrange', 'reduce', 'repeat', 'parse_shape', 'asnumpy', 'EinopsError']
| 24.8
| 84
| 0.693548
| 28
| 248
| 5.642857
| 0.75
| 0.189873
| 0.265823
| 0.329114
| 0.620253
| 0.620253
| 0.620253
| 0
| 0
| 0
| 0
| 0.018605
| 0.133065
| 248
| 9
| 85
| 27.555556
| 0.716279
| 0.084677
| 0
| 0
| 0
| 0
| 0.315556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b917cbd30b53e81e1207f00c0f39306426edfa5f
| 151
|
py
|
Python
|
mpr/middleware.py
|
root79-glit/medicine-price-registry
|
8c9b135f4abd5b97b127ace01559c9dbe8b9a303
|
[
"Apache-2.0"
] | null | null | null |
mpr/middleware.py
|
root79-glit/medicine-price-registry
|
8c9b135f4abd5b97b127ace01559c9dbe8b9a303
|
[
"Apache-2.0"
] | null | null | null |
mpr/middleware.py
|
root79-glit/medicine-price-registry
|
8c9b135f4abd5b97b127ace01559c9dbe8b9a303
|
[
"Apache-2.0"
] | null | null | null |
class CORSMiddleware:
def process_response(self, request, response):
response['Access-Control-Allow-Origin'] = "*"
return response
| 30.2
| 53
| 0.682119
| 15
| 151
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205298
| 151
| 4
| 54
| 37.75
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0.18543
| 0.178808
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
b919282ce011459272ef8dc02521c9ffb813974d
| 837
|
py
|
Python
|
tests/python/test_struct_for_intermediate.py
|
Detavern/taichi
|
1599050e42e1a1927a54d6e7aced5d158af77340
|
[
"MIT"
] | 1
|
2020-07-17T08:59:53.000Z
|
2020-07-17T08:59:53.000Z
|
tests/python/test_struct_for_intermediate.py
|
youyufeng92/taichi
|
c826de521d254745db556835e322dd2e0cfdbfa0
|
[
"MIT"
] | null | null | null |
tests/python/test_struct_for_intermediate.py
|
youyufeng92/taichi
|
c826de521d254745db556835e322dd2e0cfdbfa0
|
[
"MIT"
] | null | null | null |
import taichi as ti
@ti.all_archs
def test_nested():
ti.cfg.demote_dense_struct_fors = False
x = ti.var(ti.i32)
p, q = 3, 7
n, m = 2, 4
@ti.layout
def place():
ti.root.dense(ti.ij, (p, q)).dense(ti.ij, (n, m)).place(x)
@ti.kernel
def iterate():
for i, j in x.parent():
x[i, j] += 1
iterate()
for i in range(p):
for j in range(q):
assert x[i * n, j * m] == 1
@ti.all_archs
def test_nested_demote():
ti.cfg.demote_dense_struct_fors = True
ti.cfg.print_ir = True
x = ti.var(ti.i32)
p, q = 3, 7
n, m = 2, 4
@ti.layout
def place():
ti.root.dense(ti.ij, (p, q)).dense(ti.ij, (n, m)).place(x)
@ti.kernel
def iterate():
for i, j in x.parent():
x[i, j] += 1
iterate()
for i in range(p):
for j in range(q):
assert x[i * n, j * m] == 1
| 17.081633
| 62
| 0.541219
| 162
| 837
| 2.722222
| 0.265432
| 0.027211
| 0.081633
| 0.058957
| 0.893424
| 0.893424
| 0.671202
| 0.671202
| 0.671202
| 0.671202
| 0
| 0.02649
| 0.278375
| 837
| 48
| 63
| 17.4375
| 0.703642
| 0
| 0
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.166667
| false
| 0
| 0.027778
| 0
| 0.194444
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b91db35b07f8308e3ad7a033f352c96b3b4f858f
| 261
|
py
|
Python
|
end_to_end_tests/custom-templates-golden-record/my_test_api_client/api/tag1/__init__.py
|
barjomet/openapi-python-client
|
3d0b96478a81a84468f9f34e70c715a486915108
|
[
"MIT"
] | null | null | null |
end_to_end_tests/custom-templates-golden-record/my_test_api_client/api/tag1/__init__.py
|
barjomet/openapi-python-client
|
3d0b96478a81a84468f9f34e70c715a486915108
|
[
"MIT"
] | null | null | null |
end_to_end_tests/custom-templates-golden-record/my_test_api_client/api/tag1/__init__.py
|
barjomet/openapi-python-client
|
3d0b96478a81a84468f9f34e70c715a486915108
|
[
"MIT"
] | null | null | null |
""" Contains methods for accessing the API Endpoints """
import types
from my_test_api_client.api.tag1 import get_tag_with_number
class Tag1Endpoints:
@classmethod
def get_tag_with_number(cls) -> types.ModuleType:
return get_tag_with_number
| 21.75
| 59
| 0.770115
| 37
| 261
| 5.108108
| 0.675676
| 0.095238
| 0.15873
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009217
| 0.168582
| 261
| 11
| 60
| 23.727273
| 0.861751
| 0.183908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
b92f501e3c62c69d79fb298a46427a2f3d82a0d8
| 54
|
py
|
Python
|
other/run_gui.py
|
MSLNZ/Mass-Circular-Weighing
|
f144158b9e2337d7e9446326d6927e1dd606ed38
|
[
"MIT"
] | 1
|
2020-02-19T09:10:43.000Z
|
2020-02-19T09:10:43.000Z
|
other/run_gui.py
|
MSLNZ/Mass-Circular-Weighing
|
f144158b9e2337d7e9446326d6927e1dd606ed38
|
[
"MIT"
] | null | null | null |
other/run_gui.py
|
MSLNZ/Mass-Circular-Weighing
|
f144158b9e2337d7e9446326d6927e1dd606ed38
|
[
"MIT"
] | null | null | null |
import mass_circular_weighing as mcw
mcw.show_gui()
| 10.8
| 36
| 0.814815
| 9
| 54
| 4.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 4
| 37
| 13.5
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b94a14936a17f35fddac15ee549ab4325e5b8c42
| 2,546
|
py
|
Python
|
python-百度关键字爬虫/seo_prj/spider/chinaz/chinaz.py
|
wangchuanli001/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 12
|
2019-12-07T01:44:55.000Z
|
2022-01-27T14:13:30.000Z
|
python-百度关键字爬虫/seo_prj/spider/chinaz/chinaz.py
|
hujiese/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 23
|
2020-05-23T03:56:33.000Z
|
2022-02-28T07:54:45.000Z
|
python-百度关键字爬虫/seo_prj/spider/chinaz/chinaz.py
|
hujiese/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 7
|
2019-12-20T04:48:56.000Z
|
2021-11-19T02:23:45.000Z
|
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import urllib.request
import myutils
# cnblogs post请求
def getDoc(keyword):
url = 'http://tool.chinaz.com/kwevaluate'
headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
}
values = {
't': 'kwevaluate',
'kw': keyword
}
data = urllib.parse.urlencode(values).encode('utf-8')
request = urllib.request.Request(url, data, headers)
html = urllib.request.urlopen(request).read().decode('utf-8')
print(html)
# get
def get(keyword):
url = "http://index.baidu.com/api/SearchApi/index?word=" + keyword + "&area=0&days=30"
headers = {
"Accept": "application / json, text / plain, * / *",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie":"BAIDUID=47B9867DEBA6B652903C4975C010AB3F:FG=1; BIDUPSID=47B9867DEBA6B652903C4975C010AB3F; PSTM=1554868710; BD_UPN=12314753; BDORZ=AE84CDB3A529C0F8A2B9DCDD1D18B695; H_WISE_SIDS=130593_125703_128701_130792_125696_130163_120129_131381_128879_118882_118864_118838_118819_118793_130763_131649_131577_131535_131534_131530_130222_131295_131246_129565_107317_131392_130120_131517_131239_131195_130350_117431_129649_127027_130689_128967_131036_130569_129838_130990_129479_129644_124802_131423_131467_130716_110085_127969_131506_123289_131210_131296_127317_128200_131549_130595_131264_131262_128604_131458_128806; delPer=0; BD_CK_SAM=1; BDUSS=IzekZDay1FUHJJclIxNTVVQVpuZE5-NUtBLXVWdzREdk9SRHkzc2MwSWtWLTVjSVFBQUFBJCQAAAAAAAAAAAEAAACWA4Z6gVfs4buou~DYvAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACTKxlwkysZcRl; bdindexid=g296e3ba116gv1ulcnasoo8gt0; SE_LAUNCH=5%3A25941955; PSINO=5; BD_HOME=1; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; H_PS_PSSID=1466_21117_28721_28832_28585_26350_28604_28890; H_PS_645EC=c576SDj%2FVTd6i0UC7abuVnm0w%2Bi370%2F7VhDK9yUMJlg6A%2BE0Vg1mVFzXbHCRTajZ7D%2Bv; sug=3; sugstore=0; ORIGIN=0; bdime=0",
"Host": "index.baidu_index.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
req = requests.get(url=url, headers=headers, timeout=5)
html_doc = req.text
print(html_doc)
if __name__ == '__main__':
# for i in range(1, 10):
get('php从入门到精通')
get('java开发工具')
get('matlab好学吗')
| 51.959184
| 1,151
| 0.754124
| 311
| 2,546
| 5.88746
| 0.707396
| 0.006554
| 0.015292
| 0.018569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268018
| 0.128044
| 2,546
| 48
| 1,152
| 53.041667
| 0.556757
| 0.024745
| 0
| 0.054054
| 0
| 0.054054
| 0.684423
| 0.415254
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.108108
| 0
| 0.162162
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b94aa95e2076fae89eeaa325fcf151f5f9b58a88
| 27
|
py
|
Python
|
data/studio21_generated/introductory/3626/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/3626/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/3626/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
def encode(message, key):
| 13.5
| 25
| 0.703704
| 4
| 27
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 2
| 26
| 13.5
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b96b601e7e94d3fb1e8152ca8ba0f0dca2364cfc
| 358
|
py
|
Python
|
espnet2/text/text_preparation/stress_dictionary/extracting/old/wikipedia_accent_parser/test_wikipedia_accent_parser.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | null | null | null |
espnet2/text/text_preparation/stress_dictionary/extracting/old/wikipedia_accent_parser/test_wikipedia_accent_parser.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | null | null | null |
espnet2/text/text_preparation/stress_dictionary/extracting/old/wikipedia_accent_parser/test_wikipedia_accent_parser.py
|
texpomru13/espnet
|
7ef005e832e2fb033f356c16f54e0f08762fb4b0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wikipedia_accent_parser import WikipediaAccentParser
accent_parser = WikipediaAccentParser()
assert accent_parser.retrieve_accent(u"Алексей") == u"алексе́й"
assert accent_parser.retrieve_accent(u"Андрей") == u"андре́й"
assert accent_parser.retrieve_accent(u"Васильев") == u"васи́льев"
print("SUCCESS")
| 25.571429
| 65
| 0.768156
| 51
| 358
| 5.27451
| 0.529412
| 0.223048
| 0.200743
| 0.289963
| 0.3829
| 0.3829
| 0.260223
| 0.260223
| 0
| 0
| 0
| 0.003067
| 0.089385
| 358
| 14
| 66
| 25.571429
| 0.812883
| 0.117318
| 0
| 0
| 0
| 0
| 0.165079
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b96c580f39b020791ae72e5aae37f8ddbce7762b
| 4,835
|
py
|
Python
|
marspylib/fret/__init__.py
|
duderstadt-lab/marspylib
|
f91acb75a78c9a6cfbdd9caa7c5a07b5575f7d85
|
[
"BSD-2-Clause"
] | 1
|
2022-03-25T07:47:46.000Z
|
2022-03-25T07:47:46.000Z
|
marspylib/fret/__init__.py
|
duderstadt-lab/marspylib
|
f91acb75a78c9a6cfbdd9caa7c5a07b5575f7d85
|
[
"BSD-2-Clause"
] | null | null | null |
marspylib/fret/__init__.py
|
duderstadt-lab/marspylib
|
f91acb75a78c9a6cfbdd9caa7c5a07b5575f7d85
|
[
"BSD-2-Clause"
] | 1
|
2022-03-25T07:47:49.000Z
|
2022-03-25T07:47:49.000Z
|
import numpy as np
## marspylib.fret
def get_T_bleach(molecule, metadata_tag_populations = ['FRET', 'AO', 'DO'], names_bleaching_events = ['Donor_Bleach', 'Acceptor_Bleach']):
'''
Function that returns the T_bleach position for a molecule.
Requirements
archive: an archive should have been initiated prior to running this function.
Inputs
molecule: the variable 'molecule', representing a single molecule record in the archive, should
have been defined prior to running this function.
metadata_tag_populations: default ['FRET', 'AO', 'DO'], list with strings denoting the tags present in the
archive to tag molecules displaying FRET behavior, that have an acceptor only (AO) or donor only (DO).
Note: names have to be entered in the specific order (FRET name, AO name, DO name).
names_bleaching_events: default ['Donor_Bleach', 'Acceptor_Bleach'], list with strings denoting the position
names of the donor and bleaching events in the archive.
Note: names have to be entered in the specific order (Donor bleaching name, Acceptor bleaching name).
Outputs
T_bleach: the T-position of the bleaching point where either one of the dyes (donor or acceptor) has bleached.
Numerical value.
@Author: Nadia M. Huisjes
'''
if (archive.metadataHasTag(molecule.getMetadataUID(),metadata_tag_populations[0])):
if (molecule.hasPosition(names_bleaching_events[1]) & molecule.hasPosition(names_bleaching_events[0])):
T_AO_bleach = molecule.getPosition(names_bleaching_events[1]).getPosition()
T_DO_bleach = molecule.getPosition(names_bleaching_events[0]).getPosition()
if int(T_AO_bleach) > int(T_DO_bleach):
T_bleach = int(T_AO_bleach)
else:
T_bleach = int(T_DO_bleach)
# Molecules in an AO dataset
elif (archive.metadataHasTag(molecule.getMetadataUID(),metadata_tag_populations[1])):
T_bleach = int(molecule.getPosition(names_bleaching_events[1]).getPosition())
# Molecules in a DO dataset
elif (archive.metadataHasTag(molecule.getMetadataUID(),metadata_tag_populations[2])):
T_bleach = int(molecule.getPosition(names_bleaching_events[0]).getPosition())
else:
T_bleach = np.NaN
return T_bleach
def get_acceptor_donor_bleach_fret(molecule, metadata_tag_fret = 'FRET', names_bleaching_events = ['Donor_Bleach', 'Acceptor_Bleach']):
'''
Function that returns the T_bleach position for a molecule. IMPORTANT: both bleaching positions are only retrieved
in the case the molecule has a metadata tag representing a FRET molecule.
Requirements
archive: an archive should have been initiated prior to running this function.
Inputs
molecule: the variable molecule, representing a single molecule record in the archive, should
have been defined prior to running this function. By default set to the name molecule.
metadata_tag_fret: default 'FRET', string denoting the tags present in the archive to tag molecules displaying
FRET behavior.
names_bleaching_events: default ['Donor_Bleach', 'Acceptor_Bleach'], list with strings denoting the position
names of the donor and bleaching events in the archive.
Note: names have to be entered in the specific order (Donor bleaching name, Acceptor bleaching name).
Outputs (tuple with the following three parameters)
T_bleach: the T-position of the bleaching point where there first dye has bleached. Numerical value.
T_second_bleach:the T-position of the bleaching point where there second dye has bleached. Numerical value.
dye: list with one string representing which dye is associated with the defined T_bleach
@Author: Nadia M. Huisjes
'''
if (archive.metadataHasTag(molecule.getMetadataUID(),metadata_tag_fret)):
if (molecule.hasPosition(names_bleaching_events[1]) & molecule.hasPosition(names_bleaching_events[0])):
T_AO_bleach = molecule.getPosition(names_bleaching_events[1]).getPosition()
T_DO_bleach = molecule.getPosition(names_bleaching_events[0]).getPosition()
if int(T_AO_bleach) < int(T_DO_bleach):
T_bleach = int(T_AO_bleach)
T_second_bleach = int(T_DO_bleach)
dye = ['acceptor']
else:
T_bleach = int(T_DO_bleach)
T_second_bleach = int(T_AO_bleach)
dye = ['donor']
else:
T_bleach = np.NaN
T_second_bleach = np.NaN
dye = ['NaN']
return (T_bleach, T_second_bleach, dye)
| 49.336735
| 138
| 0.680869
| 615
| 4,835
| 5.172358
| 0.185366
| 0.035209
| 0.088023
| 0.062245
| 0.781515
| 0.744106
| 0.733417
| 0.707953
| 0.677146
| 0.629362
| 0
| 0.00357
| 0.246949
| 4,835
| 97
| 139
| 49.845361
| 0.870091
| 0.493485
| 0
| 0.457143
| 0
| 0
| 0.035792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.028571
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b987eeb22980c6ec95eb951a7947b24c26cd8830
| 316
|
py
|
Python
|
runway/core/providers/aws/__init__.py
|
avosper-intellaegis/runway
|
757d4e7db269ec16479b044ac82a69f25fa2a450
|
[
"Apache-2.0"
] | 134
|
2018-02-26T21:35:23.000Z
|
2022-03-03T00:30:27.000Z
|
runway/core/providers/aws/__init__.py
|
asksmruti/runway
|
8aca76df9372e3d13eb35e12f81758f618e89e74
|
[
"Apache-2.0"
] | 937
|
2018-03-08T22:04:35.000Z
|
2022-03-30T12:21:47.000Z
|
runway/core/providers/aws/__init__.py
|
asksmruti/runway
|
8aca76df9372e3d13eb35e12f81758f618e89e74
|
[
"Apache-2.0"
] | 70
|
2018-02-26T23:48:11.000Z
|
2022-03-02T18:44:30.000Z
|
"""Runway AWS objects."""
from . import s3
from ._account import AccountDetails
from ._assume_role import AssumeRole
from ._response import BaseResponse, ResponseError, ResponseMetadata
__all__ = [
"AccountDetails",
"AssumeRole",
"BaseResponse",
"ResponseError",
"ResponseMetadata",
"s3",
]
| 21.066667
| 68
| 0.71519
| 28
| 316
| 7.785714
| 0.571429
| 0.229358
| 0.376147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 0.177215
| 316
| 14
| 69
| 22.571429
| 0.830769
| 0.060127
| 0
| 0
| 0
| 0
| 0.230241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b98bb1f72ac08ba0054f44d1583e7ea839be2480
| 37
|
py
|
Python
|
foiamachine/local/lib/python2.7/encodings/cp737.py
|
dwillis/foiamachine
|
26d3b02870227696cdaab639c39d47b2a7a42ae5
|
[
"Unlicense",
"MIT"
] | 3
|
2021-08-07T04:01:55.000Z
|
2021-08-07T05:12:11.000Z
|
foiamachine/local/lib/python2.7/encodings/cp737.py
|
dwillis/foiamachine
|
26d3b02870227696cdaab639c39d47b2a7a42ae5
|
[
"Unlicense",
"MIT"
] | null | null | null |
foiamachine/local/lib/python2.7/encodings/cp737.py
|
dwillis/foiamachine
|
26d3b02870227696cdaab639c39d47b2a7a42ae5
|
[
"Unlicense",
"MIT"
] | 1
|
2021-08-05T22:51:14.000Z
|
2021-08-05T22:51:14.000Z
|
/usr/lib/python2.7/encodings/cp737.py
| 37
| 37
| 0.810811
| 7
| 37
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 0
| 37
| 1
| 37
| 37
| 0.675676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b9a55a27068247e5dbf3027d80652de300521fc0
| 7,233
|
py
|
Python
|
tests/sentry/api/serializers/test_team.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/serializers/test_team.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/serializers/test_team.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.team import TeamWithProjectsSerializer
from sentry.api.serializers.models.project import ProjectSerializer
from sentry.models import InviteStatus
from sentry.testutils import TestCase
class TeamSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username="foo")
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop("dateCreated")
assert result == {
"slug": team.slug,
"name": team.name,
"hasAccess": True,
"isPending": False,
"isMember": False,
"id": six.text_type(team.id),
"avatar": {"avatarType": "letter_avatar", "avatarUuid": None},
"memberCount": 0,
}
def test_member_count(self):
user = self.create_user(username="foo")
other_user = self.create_user(username="bar")
third_user = self.create_user(username="baz")
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization, members=[user, other_user, third_user])
result = serialize(team, user)
assert 3 == result["memberCount"]
def test_member_count_does_not_include_invite_requests(self):
org = self.create_organization(owner=self.user)
team = self.create_team(organization=org)
self.create_member(user=self.create_user(), organization=org, teams=[team]) # member
self.create_member(email="1@example.com", organization=org, teams=[team]) # pending invite
result = serialize(team, self.user)
assert result["memberCount"] == 2
# invite requests
self.create_member(
email="2@example.com",
organization=org,
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
teams=[team],
)
self.create_member(
email="3@gmail.com",
organization=org,
invite_status=InviteStatus.REQUESTED_TO_JOIN.value,
teams=[team],
)
result = serialize(team, self.user)
assert result["memberCount"] == 2
def test_member_access(self):
user = self.create_user(username="foo")
organization = self.create_organization()
self.create_member(user=user, organization=organization)
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop("dateCreated")
assert result["hasAccess"] is True
assert result["isMember"] is False
organization.flags.allow_joinleave = False
organization.save()
result = serialize(team, user)
# after changing to allow_joinleave=False
assert result["hasAccess"] is False
assert result["isMember"] is False
self.create_team_membership(user=user, team=team)
result = serialize(team, user)
# after giving them access to team
assert result["hasAccess"] is True
assert result["isMember"] is True
def test_admin_access(self):
user = self.create_user(username="foo")
organization = self.create_organization()
self.create_member(user=user, organization=organization, role="admin")
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop("dateCreated")
assert result["hasAccess"] is True
assert result["isMember"] is False
organization.flags.allow_joinleave = False
organization.save()
result = serialize(team, user)
# after changing to allow_joinleave=False
assert result["hasAccess"] is False
assert result["isMember"] is False
self.create_team_membership(user=user, team=team)
result = serialize(team, user)
# after giving them access to team
assert result["hasAccess"] is True
assert result["isMember"] is True
def test_manager_access(self):
user = self.create_user(username="foo")
organization = self.create_organization()
self.create_member(user=user, organization=organization, role="manager")
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop("dateCreated")
assert result["hasAccess"] is True
assert result["isMember"] is False
organization.flags.allow_joinleave = False
organization.save()
result = serialize(team, user)
# after changing to allow_joinleave=False
assert result["hasAccess"] is True
assert result["isMember"] is False
self.create_team_membership(user=user, team=team)
result = serialize(team, user)
# after giving them access to team
assert result["hasAccess"] is True
assert result["isMember"] is True
def test_owner_access(self):
user = self.create_user(username="foo")
organization = self.create_organization()
self.create_member(user=user, organization=organization, role="owner")
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop("dateCreated")
assert result["hasAccess"] is True
assert result["isMember"] is False
organization.flags.allow_joinleave = False
organization.save()
result = serialize(team, user)
# after changing to allow_joinleave=False
assert result["hasAccess"] is True
assert result["isMember"] is False
self.create_team_membership(user=user, team=team)
result = serialize(team, user)
# after giving them access to team
assert result["hasAccess"] is True
assert result["isMember"] is True
class TeamWithProjectsSerializerTest(TestCase):
def test_simple(self, project_serializer=None):
user = self.create_user(username="foo")
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(teams=[team], organization=organization, name="foo")
project2 = self.create_project(teams=[team], organization=organization, name="bar")
result = serialize(team, user, TeamWithProjectsSerializer())
serialized_projects = serialize([project2, project], user, project_serializer)
assert result == {
"slug": team.slug,
"name": team.name,
"hasAccess": True,
"isPending": False,
"isMember": False,
"id": six.text_type(team.id),
"projects": serialized_projects,
"avatar": {"avatarType": "letter_avatar", "avatarUuid": None},
"memberCount": 0,
"dateCreated": team.date_added,
}
def test_with_performance_flag(self):
with self.feature("organizations:enterprise-perf"):
self.test_simple(ProjectSerializer(include_features=False))
| 36.346734
| 99
| 0.648832
| 779
| 7,233
| 5.894737
| 0.14249
| 0.087108
| 0.07034
| 0.075131
| 0.76176
| 0.727352
| 0.719948
| 0.712761
| 0.642639
| 0.620427
| 0
| 0.002024
| 0.248721
| 7,233
| 198
| 100
| 36.530303
| 0.843025
| 0.048528
| 0
| 0.691275
| 0
| 0
| 0.087203
| 0.004222
| 0
| 0
| 0
| 0
| 0.194631
| 1
| 0.060403
| false
| 0
| 0.04698
| 0
| 0.120805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b9c00b86343b58893eccec0356f47cb6aa23aafa
| 89
|
py
|
Python
|
bmwcd/__init__.py
|
gerard33/bmwcd
|
02f08f8e8f0e98a406102f390ecf18879549d703
|
[
"MIT"
] | 6
|
2018-01-20T10:47:36.000Z
|
2022-03-08T09:48:49.000Z
|
bmwcd/__init__.py
|
gerard33/bmwcd
|
02f08f8e8f0e98a406102f390ecf18879549d703
|
[
"MIT"
] | 1
|
2019-02-18T14:56:58.000Z
|
2019-02-25T09:04:44.000Z
|
bmwcd/__init__.py
|
gerard33/bmwcd
|
02f08f8e8f0e98a406102f390ecf18879549d703
|
[
"MIT"
] | 6
|
2018-01-20T08:58:24.000Z
|
2021-03-21T18:49:14.000Z
|
""" Simple BMW ConnectedDrive API.
init file for backward compatibility
"""
# empty
| 14.833333
| 40
| 0.707865
| 10
| 89
| 6.3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202247
| 89
| 5
| 41
| 17.8
| 0.887324
| 0.842697
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b9c43036ec34606c8c72543f45f2945157e2af79
| 19,370
|
py
|
Python
|
tests/unittests/test_dispatcher.py
|
anandagopal6/azure-functions-python-worker
|
e4adb351e5454c093fcefbf0fb84f200af32f386
|
[
"MIT"
] | null | null | null |
tests/unittests/test_dispatcher.py
|
anandagopal6/azure-functions-python-worker
|
e4adb351e5454c093fcefbf0fb84f200af32f386
|
[
"MIT"
] | null | null | null |
tests/unittests/test_dispatcher.py
|
anandagopal6/azure-functions-python-worker
|
e4adb351e5454c093fcefbf0fb84f200af32f386
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import collections as col
import os
import sys
import unittest
from typing import Optional, Tuple
from unittest.mock import patch
from azure_functions_worker import protos
from azure_functions_worker import testutils
from azure_functions_worker.constants import PYTHON_THREADPOOL_THREAD_COUNT, \
PYTHON_THREADPOOL_THREAD_COUNT_DEFAULT
SysVersionInfo = col.namedtuple("VersionInfo", ["major", "minor", "micro",
"releaselevel", "serial"])
DISPATCHER_FUNCTIONS_DIR = testutils.UNIT_TESTS_FOLDER / 'dispatcher_functions'
class TestThreadPoolSettingsPython37(testutils.AsyncTestCase):
"""Base test class for testing thread pool settings for sync threadpool
worker count. This class specifically sets sys.version_info to return as
Python 3.7 and extended classes change this value and other platform
specific values to test the behavior across the different python versions.
- Why not python 3.6?
- In Azure.Functions (library), the typing_inspect module imports specific
modules which are not available on systems where Python 3.7+ is installed.
Ref:
NEW_TYPING = sys.version_info[:3] >= (3, 7, 0) # PEP 560
"""
def setUp(self):
self._ctrl = testutils.start_mockhost(
script_root=DISPATCHER_FUNCTIONS_DIR)
self._default_workers: Optional[
int] = PYTHON_THREADPOOL_THREAD_COUNT_DEFAULT
self._pre_env = dict(os.environ)
self.mock_version_info = patch(
'azure_functions_worker.dispatcher.sys.version_info',
SysVersionInfo(3, 7, 0, 'final', 0))
self.mock_version_info.start()
def tearDown(self):
os.environ.clear()
os.environ.update(self._pre_env)
self.mock_version_info.stop()
async def test_dispatcher_initialize_worker(self):
"""Test if the dispatcher can be initialized worker successfully
"""
async with self._ctrl as host:
r = await host.init_worker('3.0.12345')
self.assertIsInstance(r.response, protos.WorkerInitResponse)
async def test_dispatcher_initialize_worker_logging(self):
"""Test if the dispatcher's log can be flushed out during worker
initialization
"""
async with self._ctrl as host:
r = await host.init_worker('3.0.12345')
self.assertEqual(
len([l for l in r.logs if l.message.startswith(
'Received WorkerInitRequest'
)]),
1
)
async def test_dispatcher_send_worker_request(self):
"""Test if the worker status response will be sent correctly when
a worker status request is received
"""
async with self._ctrl as host:
r = await host.get_worker_status()
self.assertIsInstance(r.response, protos.WorkerStatusResponse)
async def test_dispatcher_sync_threadpool_default_worker(self):
"""Test if the sync threadpool has maximum worker count set the
correct default value
"""
async with self._ctrl as host:
# await self._check_if_function_is_ok(host)
await self._assert_workers_threadpool(self._ctrl, host,
self._default_workers)
async def test_dispatcher_sync_threadpool_set_worker(self):
"""Test if the sync threadpool maximum worker can be set
"""
# Configure thread pool max worker
os.environ.update({PYTHON_THREADPOOL_THREAD_COUNT: '5'})
async with self._ctrl as host:
await self._check_if_function_is_ok(host)
await self._assert_workers_threadpool(self._ctrl, host, 5)
async def test_dispatcher_sync_threadpool_invalid_worker_count(self):
"""Test when sync threadpool maximum worker is set to an invalid value,
the host should fallback to default value
"""
# The @patch decorator does not work as expected and will suppress
# any assertion failures in the async test cases.
# Thus we're moving the patch() method to use the with syntax
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
# Configure thread pool max worker to an invalid value
os.environ.update({PYTHON_THREADPOOL_THREAD_COUNT: 'invalid'})
async with self._ctrl as host:
await self._check_if_function_is_ok(host)
await self._assert_workers_threadpool(self._ctrl, host,
self._default_workers)
mock_logger.warning.assert_any_call(
f'{PYTHON_THREADPOOL_THREAD_COUNT} must be an integer')
async def test_dispatcher_sync_threadpool_below_min_setting(self):
"""Test if the sync threadpool will pick up default value when the
setting is below minimum
"""
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
# Configure thread pool max worker to an invalid value
os.environ.update({PYTHON_THREADPOOL_THREAD_COUNT: '0'})
async with self._ctrl as host:
await self._check_if_function_is_ok(host)
await self._assert_workers_threadpool(self._ctrl, host,
self._default_workers)
mock_logger.warning.assert_any_call(
f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set to a value '
'between 1 and 32. Reverting to default value for max_workers')
async def test_dispatcher_sync_threadpool_exceed_max_setting(self):
"""Test if the sync threadpool will pick up default value when the
setting is above maximum
"""
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
# Configure thread pool max worker to an invalid value
os.environ.update({PYTHON_THREADPOOL_THREAD_COUNT: '33'})
async with self._ctrl as host:
await self._check_if_function_is_ok(host)
# Ensure the dispatcher sync threadpool should fallback to 1
await self._assert_workers_threadpool(self._ctrl, host,
self._default_workers)
mock_logger.warning.assert_any_call(
f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set to a value '
'between 1 and 32. '
'Reverting to default value for max_workers')
async def test_dispatcher_sync_threadpool_in_placeholder(self):
"""Test if the sync threadpool will pick up app setting in placeholder
mode (Linux Consumption)
"""
async with self._ctrl as host:
await self._check_if_function_is_ok(host)
# Reload environment variable on specialization
await host.reload_environment(environment={
PYTHON_THREADPOOL_THREAD_COUNT: '3'
})
# Ensure the dispatcher sync threadpool should fallback to 1
await self._assert_workers_threadpool(self._ctrl, host, 3)
async def test_dispatcher_sync_threadpool_in_placeholder_invalid(self):
"""Test if the sync threadpool will use the default setting when the
app setting is invalid
"""
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
await self._check_if_function_is_ok(host)
# Reload environment variable on specialization
await host.reload_environment(environment={
PYTHON_THREADPOOL_THREAD_COUNT: 'invalid'
})
await self._assert_workers_threadpool(self._ctrl, host,
self._default_workers)
# Check warning message
mock_logger.warning.assert_any_call(
f'{PYTHON_THREADPOOL_THREAD_COUNT} must be an integer')
async def test_dispatcher_sync_threadpool_in_placeholder_above_max(self):
"""Test if the sync threadpool will use the default setting when the
app setting is above maximum
"""
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
await self._check_if_function_is_ok(host)
# Reload environment variable on specialization
await host.reload_environment(environment={
PYTHON_THREADPOOL_THREAD_COUNT: '33'
})
await self._assert_workers_threadpool(self._ctrl, host,
self._default_workers)
mock_logger.warning.assert_any_call(
f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set to a '
f'value '
'between 1 and 32. '
'Reverting to default value for max_workers')
async def test_dispatcher_sync_threadpool_in_placeholder_below_min(self):
"""Test if the sync threadpool will use the default setting when the
app setting is below minimum
"""
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
await self._check_if_function_is_ok(host)
# Reload environment variable on specialization
await host.reload_environment(environment={
PYTHON_THREADPOOL_THREAD_COUNT: '0'
})
await self._assert_workers_threadpool(self._ctrl, host,
self._default_workers)
mock_logger.warning.assert_any_call(
f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set to a '
f'value '
'between 1 and 32. '
'Reverting to default value for max_workers')
async def test_sync_invocation_request_log(self):
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
request_id: str = self._ctrl._worker._request_id
func_id, invoke_id, func_name = (
await self._check_if_function_is_ok(host)
)
mock_logger.info.assert_any_call(
'Received FunctionInvocationRequest, '
f'request ID: {request_id}, '
f'function ID: {func_id}, '
f'function name: {func_name}, '
f'invocation ID: {invoke_id}, '
'function type: sync, '
f'sync threadpool max workers: {self._default_workers}'
)
async def test_async_invocation_request_log(self):
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
request_id: str = self._ctrl._worker._request_id
func_id, invoke_id, func_name = (
await self._check_if_async_function_is_ok(host)
)
mock_logger.info.assert_any_call(
'Received FunctionInvocationRequest, '
f'request ID: {request_id}, '
f'function ID: {func_id}, '
f'function name: {func_name}, '
f'invocation ID: {invoke_id}, '
'function type: async'
)
async def test_sync_invocation_request_log_threads(self):
os.environ.update({PYTHON_THREADPOOL_THREAD_COUNT: '5'})
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
request_id: str = self._ctrl._worker._request_id
func_id, invoke_id, func_name = (
await self._check_if_function_is_ok(host)
)
mock_logger.info.assert_any_call(
'Received FunctionInvocationRequest, '
f'request ID: {request_id}, '
f'function ID: {func_id}, '
f'function name: {func_name}, '
f'invocation ID: {invoke_id}, '
'function type: sync, '
'sync threadpool max workers: 5'
)
async def test_async_invocation_request_log_threads(self):
os.environ.update({PYTHON_THREADPOOL_THREAD_COUNT: '4'})
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
request_id: str = self._ctrl._worker._request_id
func_id, invoke_id, func_name = (
await self._check_if_async_function_is_ok(host)
)
mock_logger.info.assert_any_call(
'Received FunctionInvocationRequest, '
f'request ID: {request_id}, '
f'function ID: {func_id}, '
f'function name: {func_name}, '
f'invocation ID: {invoke_id}, '
'function type: async'
)
async def test_sync_invocation_request_log_in_placeholder_threads(self):
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
await host.reload_environment(environment={
PYTHON_THREADPOOL_THREAD_COUNT: '5'
})
request_id: str = self._ctrl._worker._request_id
func_id, invoke_id, func_name = (
await self._check_if_function_is_ok(host)
)
mock_logger.info.assert_any_call(
'Received FunctionInvocationRequest, '
f'request ID: {request_id}, '
f'function ID: {func_id}, '
f'function name: {func_name}, '
f'invocation ID: {invoke_id}, '
'function type: sync, '
'sync threadpool max workers: 5'
)
async def test_async_invocation_request_log_in_placeholder_threads(self):
with patch('azure_functions_worker.dispatcher.logger') as mock_logger:
async with self._ctrl as host:
await host.reload_environment(environment={
PYTHON_THREADPOOL_THREAD_COUNT: '5'
})
request_id: str = self._ctrl._worker._request_id
func_id, invoke_id, func_name = (
await self._check_if_async_function_is_ok(host)
)
mock_logger.info.assert_any_call(
'Received FunctionInvocationRequest, '
f'request ID: {request_id}, '
f'function ID: {func_id}, '
f'function name: {func_name}, '
f'invocation ID: {invoke_id}, '
'function type: async'
)
async def _assert_workers_threadpool(self, ctrl, host,
expected_worker_count):
self.assertIsNotNone(ctrl._worker._sync_call_tp)
self.assertEqual(ctrl._worker.get_sync_tp_workers_set(),
expected_worker_count)
# Check if the dispatcher still function
await self._check_if_function_is_ok(host)
async def _check_if_function_is_ok(self, host) -> Tuple[str, str]:
# Ensure the function can be properly loaded
function_name = "show_context"
func_id, load_r = await host.load_function(function_name)
self.assertEqual(load_r.response.function_id, func_id)
self.assertEqual(load_r.response.result.status,
protos.StatusResult.Success)
# Ensure the function can be properly invoked
invoke_id, call_r = await host.invoke_function(
'show_context', [
protos.ParameterBinding(
name='req',
data=protos.TypedData(
http=protos.RpcHttp(
method='GET'
)
)
)
])
self.assertIsNotNone(invoke_id)
self.assertEqual(call_r.response.result.status,
protos.StatusResult.Success)
return func_id, invoke_id, function_name
async def _check_if_async_function_is_ok(self, host) -> Tuple[str, str]:
# Ensure the function can be properly loaded
function_name = "show_context_async"
func_id, load_r = await host.load_function('show_context_async')
self.assertEqual(load_r.response.function_id, func_id)
self.assertEqual(load_r.response.result.status,
protos.StatusResult.Success)
# Ensure the function can be properly invoked
invoke_id, call_r = await host.invoke_function(
'show_context_async', [
protos.ParameterBinding(
name='req',
data=protos.TypedData(
http=protos.RpcHttp(
method='GET'
)
)
)
])
self.assertIsNotNone(invoke_id)
self.assertEqual(call_r.response.result.status,
protos.StatusResult.Success)
return func_id, invoke_id, function_name
class TestThreadPoolSettingsPython38(TestThreadPoolSettingsPython37):
def setUp(self):
super(TestThreadPoolSettingsPython38, self).setUp()
self.mock_version_info = patch(
'azure_functions_worker.dispatcher.sys.version_info',
SysVersionInfo(3, 8, 0, 'final', 0))
self.mock_version_info.start()
def tearDown(self):
os.environ.clear()
os.environ.update(self._pre_env)
self.mock_version_info.stop()
@unittest.skipIf(sys.version_info.minor != 9,
"Run the tests only for Python 3.9. In other platforms, "
"as the default passed is None, the cpu_count determines the "
"number of max_workers and we cannot mock the os.cpu_count() "
"in the concurrent.futures.ThreadPoolExecutor")
class TestThreadPoolSettingsPython39(TestThreadPoolSettingsPython37):
def setUp(self):
super(TestThreadPoolSettingsPython39, self).setUp()
self.mock_os_cpu = patch(
'os.cpu_count', return_value=2)
self.mock_os_cpu.start()
# 6 - based on 2 cores - min(32, (os.cpu_count() or 1) + 4) - 2 + 4
self._default_workers: Optional[int] = 6
self.mock_version_info = patch(
'azure_functions_worker.dispatcher.sys.version_info',
SysVersionInfo(3, 9, 0, 'final', 0))
self.mock_version_info.start()
def tearDown(self):
os.environ.clear()
os.environ.update(self._pre_env)
self.mock_os_cpu.stop()
self.mock_version_info.stop()
| 43.922902
| 79
| 0.601239
| 2,163
| 19,370
| 5.101248
| 0.126676
| 0.025376
| 0.041871
| 0.051387
| 0.779318
| 0.736723
| 0.717419
| 0.706272
| 0.687964
| 0.68162
| 0
| 0.007462
| 0.328859
| 19,370
| 440
| 80
| 44.022727
| 0.841308
| 0.083789
| 0
| 0.659164
| 0
| 0
| 0.167886
| 0.0633
| 0
| 0
| 0
| 0
| 0.11254
| 1
| 0.019293
| false
| 0.003215
| 0.028939
| 0
| 0.064309
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b9dd4b9e4a087ece290af4499362873c2216c2ee
| 169
|
wsgi
|
Python
|
contrib/apache/api.wsgi
|
ilcic/alerta
|
b0fd34df0d7574418a9760202538f3aa594d4fc7
|
[
"Apache-2.0"
] | null | null | null |
contrib/apache/api.wsgi
|
ilcic/alerta
|
b0fd34df0d7574418a9760202538f3aa594d4fc7
|
[
"Apache-2.0"
] | null | null | null |
contrib/apache/api.wsgi
|
ilcic/alerta
|
b0fd34df0d7574418a9760202538f3aa594d4fc7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
activate_this = '/opt/alerta/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from alerta.app import app as application
| 33.8
| 53
| 0.804734
| 26
| 169
| 4.923077
| 0.653846
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 169
| 4
| 54
| 42.25
| 0.820513
| 0.118343
| 0
| 0
| 0
| 0
| 0.216216
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b9e34793759fae172d38005724d8074461e7c948
| 86
|
py
|
Python
|
test/test_remove_duplicates_from_sorted_list_ii.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
test/test_remove_duplicates_from_sorted_list_ii.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
test/test_remove_duplicates_from_sorted_list_ii.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
solution = RemoveDuplicatesFromSortedListIi()
assert X == solution.deleteDuplicates( )
| 43
| 45
| 0.825581
| 6
| 86
| 11.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081395
| 86
| 2
| 46
| 43
| 0.898734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b9e59edf503f60d47e2a6b994fecc3b6c5ff8c48
| 218
|
py
|
Python
|
pystributor/pystributor.py
|
hirsimaki-markus/pystributor
|
14d546ab21e48bd3ef3cbe6198f14d5ad393e8d9
|
[
"Unlicense"
] | null | null | null |
pystributor/pystributor.py
|
hirsimaki-markus/pystributor
|
14d546ab21e48bd3ef3cbe6198f14d5ad393e8d9
|
[
"Unlicense"
] | null | null | null |
pystributor/pystributor.py
|
hirsimaki-markus/pystributor
|
14d546ab21e48bd3ef3cbe6198f14d5ad393e8d9
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
"""
Import wrapper for pystributor worker and hub
"""
from pystributor.pystributor_hub import Hub as _Hub
from pystributor.pystributor_worker import Worker as _Worker
Hub = _Hub
Worker = _Worker
| 16.769231
| 60
| 0.784404
| 30
| 218
| 5.5
| 0.4
| 0.206061
| 0.218182
| 0.351515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005376
| 0.146789
| 218
| 12
| 61
| 18.166667
| 0.88172
| 0.288991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b9f14c2386069ffd6fbc60c668e183fb991e62e7
| 155
|
py
|
Python
|
11_tuple.py
|
AmreshTripathy/Python
|
e86420fef7f52da393be5b50ac2f13bddfeb3306
|
[
"Apache-2.0"
] | 4
|
2021-05-27T05:06:09.000Z
|
2021-06-12T17:12:47.000Z
|
11_tuple.py
|
AmreshTripathy/Python
|
e86420fef7f52da393be5b50ac2f13bddfeb3306
|
[
"Apache-2.0"
] | null | null | null |
11_tuple.py
|
AmreshTripathy/Python
|
e86420fef7f52da393be5b50ac2f13bddfeb3306
|
[
"Apache-2.0"
] | null | null | null |
t = ( 10, 11, 12, 34, 99, 4, 98)
print (t[0])
t1 = (1, 1, 1, 2, 3, 4, 65, 65, 3, 2) #tuple with single element
print (t1.count(1))
print (t1.index(65))
| 31
| 65
| 0.541935
| 34
| 155
| 2.470588
| 0.617647
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 0.225806
| 155
| 5
| 66
| 31
| 0.433333
| 0.16129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
6a048e71920ac9364887d7d202a65d42c7152f53
| 95
|
py
|
Python
|
tests/impl/mocks/not_an_impl.py
|
bcurnow/rfid-reader
|
0753b3f8517fecbcaebfe29c660f8e0d6d76fc8e
|
[
"Apache-2.0"
] | null | null | null |
tests/impl/mocks/not_an_impl.py
|
bcurnow/rfid-reader
|
0753b3f8517fecbcaebfe29c660f8e0d6d76fc8e
|
[
"Apache-2.0"
] | 1
|
2021-11-05T12:29:39.000Z
|
2021-11-05T15:37:03.000Z
|
tests/impl/mocks/not_an_impl.py
|
bcurnow/rfid-reader
|
0753b3f8517fecbcaebfe29c660f8e0d6d76fc8e
|
[
"Apache-2.0"
] | null | null | null |
""" This module has no register method and should be skipped by the register_readers logic."""
| 47.5
| 94
| 0.768421
| 15
| 95
| 4.8
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 95
| 1
| 95
| 95
| 0.9
| 0.915789
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6a1cb8bd410126018754807baff0562a38f951c5
| 348
|
py
|
Python
|
checkov/terraform/checks/resource/openstack/SecurityGroupUnrestrictedIngress3389.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1
|
2021-02-13T15:24:42.000Z
|
2021-02-13T15:24:42.000Z
|
checkov/terraform/checks/resource/openstack/SecurityGroupUnrestrictedIngress3389.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 7
|
2021-04-12T06:54:07.000Z
|
2022-03-21T14:04:14.000Z
|
checkov/terraform/checks/resource/openstack/SecurityGroupUnrestrictedIngress3389.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1
|
2021-12-16T03:09:55.000Z
|
2021-12-16T03:09:55.000Z
|
from checkov.terraform.checks.resource.openstack.AbsSecurityGroupUnrestrictedIngress import AbsSecurityGroupUnrestrictedIngress
class SecurityGroupUnrestrictedIngress3389(AbsSecurityGroupUnrestrictedIngress):
def __init__(self):
super().__init__(check_id="CKV_OPENSTACK_3", port=3389)
check = SecurityGroupUnrestrictedIngress3389()
| 34.8
| 127
| 0.847701
| 26
| 348
| 10.923077
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040752
| 0.083333
| 348
| 9
| 128
| 38.666667
| 0.84953
| 0
| 0
| 0
| 0
| 0
| 0.043103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
6a4469fed3e76fd25ea3a810b176b26034bfa2d5
| 70
|
py
|
Python
|
reprexlite/__main__.py
|
jayqi/reprexlite
|
efe0fa3190bd77ba8d47be6995cd9a0d040d36d4
|
[
"MIT"
] | 6
|
2021-02-15T11:33:05.000Z
|
2021-05-31T04:14:18.000Z
|
reprexlite/__main__.py
|
jayqi/reprexlite
|
efe0fa3190bd77ba8d47be6995cd9a0d040d36d4
|
[
"MIT"
] | 51
|
2021-02-15T21:06:51.000Z
|
2022-03-31T15:11:21.000Z
|
reprexlite/__main__.py
|
jayqi/reprexlite
|
efe0fa3190bd77ba8d47be6995cd9a0d040d36d4
|
[
"MIT"
] | null | null | null |
from reprexlite.cli import app
app(prog_name="python -m reprexlite")
| 17.5
| 37
| 0.785714
| 11
| 70
| 4.909091
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 70
| 3
| 38
| 23.333333
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
dbf3792b341b234f73145ccb386c4360a38be852
| 320
|
py
|
Python
|
src/yellowdog_client/model/compute_requirement_template_summary.py
|
yellowdog/yellowdog-sdk-python-public
|
da69a7d6e45c92933e34fefcaef8b5d98dcd6036
|
[
"Apache-2.0"
] | null | null | null |
src/yellowdog_client/model/compute_requirement_template_summary.py
|
yellowdog/yellowdog-sdk-python-public
|
da69a7d6e45c92933e34fefcaef8b5d98dcd6036
|
[
"Apache-2.0"
] | null | null | null |
src/yellowdog_client/model/compute_requirement_template_summary.py
|
yellowdog/yellowdog-sdk-python-public
|
da69a7d6e45c92933e34fefcaef8b5d98dcd6036
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from typing import Optional
@dataclass
class ComputeRequirementTemplateSummary:
id: Optional[str] = None
name: Optional[str] = None
namespace: Optional[str] = None
description: Optional[str] = None
strategyType: Optional[str] = None
type: Optional[str] = None
| 24.615385
| 40
| 0.728125
| 35
| 320
| 6.657143
| 0.457143
| 0.283262
| 0.386266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190625
| 320
| 12
| 41
| 26.666667
| 0.899614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.9
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
dbf585ebc64a22803f1c02b402a2fde5af32de27
| 108
|
py
|
Python
|
django_trumbo/apps.py
|
sasriawesome/django_trumbo
|
28372409837a9e97158428e3beb1ed1c74e8860c
|
[
"MIT"
] | null | null | null |
django_trumbo/apps.py
|
sasriawesome/django_trumbo
|
28372409837a9e97158428e3beb1ed1c74e8860c
|
[
"MIT"
] | null | null | null |
django_trumbo/apps.py
|
sasriawesome/django_trumbo
|
28372409837a9e97158428e3beb1ed1c74e8860c
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig as AppConfigBase
class AppConfig(AppConfigBase):
name='django_trumbo'
| 27
| 50
| 0.814815
| 13
| 108
| 6.692308
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12037
| 108
| 4
| 51
| 27
| 0.915789
| 0
| 0
| 0
| 0
| 0
| 0.119266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
dbf7991b3c4a00b6cc73e52df47681f55c2ddbe7
| 31
|
py
|
Python
|
homeassistant/components/hikvision/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/hikvision/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/hikvision/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The hikvision component."""
| 15.5
| 30
| 0.677419
| 3
| 31
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.75
| 0.774194
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
dbfa68ae662b3d9b2b24158ad57a90a8d0cdd99f
| 3,384
|
py
|
Python
|
nightcappackages/nightcappackages/classes/databases/mogo/mongo_modules.py
|
abaker2010/NightCAP
|
c58365a0e2ff1896ce0f8fbf2977b3e83feee1e2
|
[
"MIT"
] | 2
|
2022-02-11T17:47:38.000Z
|
2022-02-11T21:13:36.000Z
|
nightcappackages/nightcappackages/classes/databases/mogo/mongo_modules.py
|
abaker2010/NightCAP
|
c58365a0e2ff1896ce0f8fbf2977b3e83feee1e2
|
[
"MIT"
] | null | null | null |
nightcappackages/nightcappackages/classes/databases/mogo/mongo_modules.py
|
abaker2010/NightCAP
|
c58365a0e2ff1896ce0f8fbf2977b3e83feee1e2
|
[
"MIT"
] | null | null | null |
# Copyright 2020 by Aaron Baker.
# All rights reserved.
# This file is part of the Nightcap Project,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
# region Imports
from nightcapcore.singleton.singleton import Singleton
from nightcappackages.classes.databases.mogo.connections.mongo_operation_connector import (
MongoDatabaseOperationsConnection,
)
# endregion
class MongoModuleDatabase(MongoDatabaseOperationsConnection, metaclass=Singleton):
"""
This class is used interact with the mongo databse
...
Attributes
----------
_db: -> MongoClient
the connection to the db
Methods
-------
Accessible
-------
create(self, module: str = None): -> None
addes a new module to the database
read(self): -> Any
this will read the database
update(self): -> pass
for override when implemented
delete(self): -> pass
for override when implemented
find(self, module: str = None): -> Any
returns the results of the find query
find_one(self, module: str = None): -> Any
returns the results of the find one query
check_module_path(self, path: list): -> Any
returns the module if exists
get_all_modules(self): -> Any
returns all of the modules
module_install(self, module: str): -> None
tries to install the module
module_try_unintall(self, module: str): -> None
tries to uninstall the module
"""
# region Init
def __init__(self):
MongoDatabaseOperationsConnection.__init__(self)
self._db = self.client[self.conf.config["MONGOSERVER"]["db_name"]]["modules"]
# endregion
# region Create
def create(self, module: str = None):
self._db.insert_one({"type": module})
# endregion
# region Read
def read(self):
return self._db.find()
# endregion
# region Update
def update(self):
pass
# endregion
# region Delete
def delete(self):
pass
# endregion
def drop(self):
self._db.drop()
# region Find
def find(self, module: str = None):
return self._db.find({"type": module})
# endregion
# region Find One
def find_one(self, module: str = None):
return self._db.find_one({"type": module})
# endregion
# region Check Module Path
def check_module_path(self, path: list):
return self.find(path[0])
# endregion
# region Get All Modules
def get_all_modules(self):
_doc = self.read()
return _doc
# endregion
# region Install Module
def module_install(self, module: str):
_moduleexists = self.find(module)
if _moduleexists.count() == 0:
self.create(module)
else:
pass
# endregion
# region Uninstall Module
def module_try_unintall(self, module: str):
_moduleexists = self.find_one(module)
self._db.remove(_moduleexists)
self.printer.print_formatted_additional(
text="Deleted module entry", leadingTab=3
)
# endregion
| 24
| 91
| 0.599586
| 375
| 3,384
| 5.288
| 0.328
| 0.050429
| 0.065557
| 0.068583
| 0.291478
| 0.222895
| 0.079677
| 0.079677
| 0.046394
| 0.046394
| 0
| 0.003006
| 0.311761
| 3,384
| 140
| 92
| 24.171429
| 0.848433
| 0.476359
| 0
| 0.076923
| 0
| 0
| 0.035382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0.076923
| 0.051282
| 0.102564
| 0.512821
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
e012472a9757406a29273b5f1cde6c72b886a7f1
| 307
|
py
|
Python
|
spec/fixtures/with_included_context.py
|
kfischer-okarin/mamba
|
0a2c83e2e9b1fc06aa6165519bc2c8de418e906b
|
[
"MIT"
] | 462
|
2015-01-02T19:59:33.000Z
|
2022-03-12T09:47:17.000Z
|
spec/fixtures/with_included_context.py
|
kfischer-okarin/mamba
|
0a2c83e2e9b1fc06aa6165519bc2c8de418e906b
|
[
"MIT"
] | 93
|
2015-01-31T13:18:47.000Z
|
2021-05-06T18:32:42.000Z
|
spec/fixtures/with_included_context.py
|
kfischer-okarin/mamba
|
0a2c83e2e9b1fc06aa6165519bc2c8de418e906b
|
[
"MIT"
] | 66
|
2015-04-24T14:12:13.000Z
|
2022-03-01T16:52:51.000Z
|
from mamba import shared_context, included_context, describe, it
SHARED_CONTEXT = 'Shared Context'
with shared_context(SHARED_CONTEXT):
with it('shared example'):
pass
with describe('Real tests'):
with included_context(SHARED_CONTEXT):
with it('added example'):
pass
| 21.928571
| 64
| 0.697068
| 37
| 307
| 5.594595
| 0.378378
| 0.376812
| 0.289855
| 0.347826
| 0.425121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214984
| 307
| 13
| 65
| 23.615385
| 0.858921
| 0
| 0
| 0.222222
| 0
| 0
| 0.166124
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.222222
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
e02629bbd217e2c2284440a328cadd313acb7c88
| 1,131
|
py
|
Python
|
apps/hello/src/hello/views.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | 2
|
2021-04-27T03:57:00.000Z
|
2021-06-18T09:39:58.000Z
|
apps/hello/src/hello/views.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | null | null | null |
apps/hello/src/hello/views.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | 2
|
2021-09-06T18:44:45.000Z
|
2022-02-24T04:10:10.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Sample "hello world" view.
from desktop.lib.django_util import render
from hello import conf
def hello(request):
# Use render from django_util so that ?format=json works.
return render("hello.html", request, {"greeting": conf.GREETING.get()})
def goodbye(request):
return render("hello.html", request, {"greeting": "goodbye"})
| 39
| 74
| 0.755968
| 168
| 1,131
| 5.077381
| 0.595238
| 0.07034
| 0.030481
| 0.037515
| 0.084408
| 0.084408
| 0
| 0
| 0
| 0
| 0
| 0.004202
| 0.158267
| 1,131
| 28
| 75
| 40.392857
| 0.891807
| 0.744474
| 0
| 0
| 0
| 0
| 0.159259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
e03153b1bc29cafe0d89ddbbba23df8396dbf680
| 215
|
py
|
Python
|
Aulas/aula03/exe3-media-do-aluno.py
|
GabrielGustavoMS/programacaoDeComputadores
|
4eb3735b2393f241da78e2b259fde30ff2566a4c
|
[
"MIT"
] | null | null | null |
Aulas/aula03/exe3-media-do-aluno.py
|
GabrielGustavoMS/programacaoDeComputadores
|
4eb3735b2393f241da78e2b259fde30ff2566a4c
|
[
"MIT"
] | null | null | null |
Aulas/aula03/exe3-media-do-aluno.py
|
GabrielGustavoMS/programacaoDeComputadores
|
4eb3735b2393f241da78e2b259fde30ff2566a4c
|
[
"MIT"
] | null | null | null |
nome= input("Digite o nome do aluno: ")
nota1 = float(input("Digite a primeira nota: " ))
nota2 = float(input("Digite a segunda nota: " ))
media = (nota1 + nota2)/2
print("A média do aluno é",nome, ":", media)
| 35.833333
| 50
| 0.64186
| 33
| 215
| 4.181818
| 0.545455
| 0.23913
| 0.231884
| 0.246377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.186047
| 215
| 5
| 51
| 43
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e04a5037420562e660d3fde7237be53948a2745e
| 174
|
py
|
Python
|
tests/test_files/src/decorator.py
|
TylerYep/pytest-idempotent
|
f0c5cd84762bd057796d7dec02b230ae8f32b693
|
[
"MIT"
] | 2
|
2021-11-26T07:41:50.000Z
|
2021-11-27T13:48:14.000Z
|
tests/test_files/src/decorator.py
|
TylerYep/pytest-idempotent
|
f0c5cd84762bd057796d7dec02b230ae8f32b693
|
[
"MIT"
] | null | null | null |
tests/test_files/src/decorator.py
|
TylerYep/pytest-idempotent
|
f0c5cd84762bd057796d7dec02b230ae8f32b693
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Any, Callable, TypeVar
_F = TypeVar("_F", bound=Callable[..., Any])
def idempotent(func: _F) -> _F:
return func
| 17.4
| 44
| 0.706897
| 23
| 174
| 5
| 0.608696
| 0.13913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 174
| 9
| 45
| 19.333333
| 0.798611
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
e06cf65c2e8ae616e590636d9540d1301d1c8f7c
| 187
|
py
|
Python
|
dependencies/src/4Suite-XML-1.0.2/Ft/Lib/DistExt/Formatters/__init__.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | null | null | null |
dependencies/src/4Suite-XML-1.0.2/Ft/Lib/DistExt/Formatters/__init__.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | null | null | null |
dependencies/src/4Suite-XML-1.0.2/Ft/Lib/DistExt/Formatters/__init__.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | 1
|
2020-07-26T03:57:45.000Z
|
2020-07-26T03:57:45.000Z
|
__revision__ = '$Id: __init__.py,v 1.2 2006/08/12 15:56:26 jkloth Exp $'
__all__ = ['XmlFormatter', 'ApiFormatter', 'ExtensionFormatter',
'CommandLineFormatter',
]
| 31.166667
| 72
| 0.636364
| 20
| 187
| 5.35
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108844
| 0.213904
| 187
| 5
| 73
| 37.4
| 0.619048
| 0
| 0
| 0
| 0
| 0.25
| 0.625668
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0eb869a88fa1f1bcce59e1ba1c260db5429aa701
| 113
|
py
|
Python
|
streamlit-app/config.py
|
anadiamaq/BCN-air
|
942f218ba0227150fd1f7ad94c57ea81e17a5a41
|
[
"Apache-2.0"
] | null | null | null |
streamlit-app/config.py
|
anadiamaq/BCN-air
|
942f218ba0227150fd1f7ad94c57ea81e17a5a41
|
[
"Apache-2.0"
] | 1
|
2021-08-20T17:56:20.000Z
|
2021-08-20T17:56:20.000Z
|
streamlit-app/config.py
|
anadiamaq/BCN-air
|
942f218ba0227150fd1f7ad94c57ea81e17a5a41
|
[
"Apache-2.0"
] | null | null | null |
import os
from dotenv import load_dotenv
load_dotenv()
PORT = os.getenv('PORT')
MONGO_URL=os.getenv('MONGO_URL')
| 18.833333
| 32
| 0.778761
| 19
| 113
| 4.421053
| 0.473684
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097345
| 113
| 6
| 32
| 18.833333
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0.114035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
0ebaed68865237e96deb5eb41a0c75fba0c5ee87
| 158
|
py
|
Python
|
test.py
|
nzkller/dnav3-code
|
4676adad7cee9490c2cf910e8c73b41a758a828f
|
[
"MIT"
] | null | null | null |
test.py
|
nzkller/dnav3-code
|
4676adad7cee9490c2cf910e8c73b41a758a828f
|
[
"MIT"
] | null | null | null |
test.py
|
nzkller/dnav3-code
|
4676adad7cee9490c2cf910e8c73b41a758a828f
|
[
"MIT"
] | null | null | null |
n = input("value of n\n")
n = int(n)
if n < 5:
print("n is less than 5")
elif n == 5:
print("n is equal to 5")
else:
print("n is greater than 5")
| 17.555556
| 32
| 0.550633
| 34
| 158
| 2.558824
| 0.470588
| 0.206897
| 0.275862
| 0.183908
| 0.229885
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04386
| 0.278481
| 158
| 8
| 33
| 19.75
| 0.719298
| 0
| 0
| 0
| 0
| 0
| 0.392405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0ec49fc9cfa6234ca63d19ddb6743551087aa47e
| 210
|
py
|
Python
|
tinynn/converter/operators/tflite/custom.py
|
www516717402/TinyNeuralNetwork
|
23e7931b4377462fad94a9ab0651b6d9a346252d
|
[
"MIT"
] | 1
|
2021-12-20T07:21:35.000Z
|
2021-12-20T07:21:35.000Z
|
tinynn/converter/operators/tflite/custom.py
|
www516717402/TinyNeuralNetwork
|
23e7931b4377462fad94a9ab0651b6d9a346252d
|
[
"MIT"
] | null | null | null |
tinynn/converter/operators/tflite/custom.py
|
www516717402/TinyNeuralNetwork
|
23e7931b4377462fad94a9ab0651b6d9a346252d
|
[
"MIT"
] | 1
|
2021-12-20T07:21:37.000Z
|
2021-12-20T07:21:37.000Z
|
from .generated_ops import CustomOperator
class Atan2Operator(CustomOperator):
def __init__(self, inputs, outputs) -> None:
super().__init__(inputs, outputs)
self.op.custom_code = "Atan2"
| 26.25
| 48
| 0.709524
| 23
| 210
| 6.043478
| 0.782609
| 0.18705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011696
| 0.185714
| 210
| 7
| 49
| 30
| 0.80117
| 0
| 0
| 0
| 1
| 0
| 0.02381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
0edd342f30db856a96dbfe76270baf29fc345c03
| 9,828
|
py
|
Python
|
hufscoops/haksik_table_make.py
|
JunKiBeom/HUFormation-kakao
|
a76c23fa0e8e0625b7ae98e79df117c9e4ac8fb5
|
[
"MIT"
] | null | null | null |
hufscoops/haksik_table_make.py
|
JunKiBeom/HUFormation-kakao
|
a76c23fa0e8e0625b7ae98e79df117c9e4ac8fb5
|
[
"MIT"
] | null | null | null |
hufscoops/haksik_table_make.py
|
JunKiBeom/HUFormation-kakao
|
a76c23fa0e8e0625b7ae98e79df117c9e4ac8fb5
|
[
"MIT"
] | null | null | null |
import sqlite3
from django.shortcuts import render
import datetime
def to_seo_table(request):
context = {}
context['menu'] = seo_haksik_load()
# 식단표 단어별로 분류 완료
#print(context['menu'])
#print(context)
return render(request, 'seo_haksik_table.html', context)
def to_glo_table(request):
context = {}
context['menu'] = glo_haksik_load()
# 식단표 단어별로 분류 완료
#print(context['menu'])
#print(context)
return render(request, 'glo_haksik_table.html', context)
def seo_haksik_load():
t = ['월', '화', '수', '목', '금', '토', '일']
r = datetime.datetime.today().weekday()
days = t[r]
all_menu = {}
time = 'lunch'
menu_list = formatted_haksik(time, '인문관')
#print(menu_list)
try:
if len(menu_list) == 1:
inmoon_menu = dict(
inmoon_today_menu0=menu_list[0][1],
inmoon_today_price0=menu_list[0][-1],
)
elif len(menu_list) == 2:
inmoon_menu = dict(
inmoon_today_menu0=menu_list[0][1],
inmoon_today_price0=menu_list[0][-1],
inmoon_today_menu1=menu_list[1][1],
inmoon_today_price1=menu_list[1][-1],
)
elif len(menu_list) == 3:
inmoon_menu = dict(
inmoon_today_menu0=menu_list[0][1],
inmoon_today_price0=menu_list[0][-1],
inmoon_today_menu1=menu_list[1][1],
inmoon_today_price1=menu_list[1][-1],
inmoon_today_menu2=menu_list[2][1],
inmoon_today_price2=menu_list[2][-1],
)
all_menu.update(inmoon_menu)
except:
pass
#print(inmoon_menu)
menu_list = formatted_haksik(time, '교수회관')
try:
if len(menu_list) == 1:
gyosoo_menu = dict(
gyosoo_today_menu0=menu_list[0][1],
gyosoo_today_price0=menu_list[0][-1],
)
elif len(menu_list) == 2:
gyosoo_menu = dict(
gyosoo_today_menu0=menu_list[0][1],
gyosoo_today_price0=menu_list[0][-1],
gyosoo_today_menu1=menu_list[1][1],
gyosoo_today_price1=menu_list[1][-1],
)
all_menu.update(gyosoo_menu)
except:
pass
menu_list = formatted_haksik(time, '스카이라운지')
try:
if len(menu_list) == 1:
lounge_menu = dict(
lounge_today_menu0=menu_list[0][1],
lounge_today_price0=menu_list[0][-1],
)
elif len(menu_list) == 2:
lounge_menu = dict(
lounge_today_menu0=menu_list[0][1],
lounge_today_price0=menu_list[0][-1],
lounge_today_menu1=menu_list[1][1],
lounge_today_price1=menu_list[1][-1],
)
all_menu.update(lounge_menu)
except:
pass
#print(menu_list)
#print(all_menu)
return all_menu
def glo_haksik_load():
all_menu = {}
time='lunch'
menu_list=formatted_haksik(time, '후생관')
try:
if len(menu_list)==0:
hooseng_menu=dict()
elif len(menu_list)==1:
hooseng_menu=dict(
hooseng_today_menu0=menu_list[0][1],
hooseng_today_price0=menu_list[0][-1],
)
elif len(menu_list)==2:
hooseng_menu=dict(
hooseng_today_menu0=menu_list[0][1],
hooseng_today_price0=menu_list[0][-1],
hooseng_today_menu1=menu_list[1][1],
hooseng_today_price1=menu_list[1][-1],
)
elif len(menu_list)==3:
hooseng_menu=dict(
hooseng_today_menu0=menu_list[0][1],
hooseng_today_price0=menu_list[0][-1],
hooseng_today_menu1=menu_list[1][1],
hooseng_today_price1=menu_list[1][-1],
hooseng_today_menu2=menu_list[2][1],
hooseng_today_price2=menu_list[2][-1],
)
elif len(menu_list)==4:
hooseng_menu=dict(
hooseng_today_menu0=menu_list[0][1],
hooseng_today_price0=menu_list[0][-1],
hooseng_today_menu1=menu_list[1][1],
hooseng_today_price1=menu_list[1][-1],
hooseng_today_menu2=menu_list[2][1],
hooseng_today_price2=menu_list[2][-1],
hooseng_today_menu3=menu_list[3][1],
hooseng_today_price3=menu_list[3][-1],
)
elif len(menu_list)==5:
hooseng_menu=dict(
hooseng_today_menu0=menu_list[0][1],
hooseng_today_price0=menu_list[0][-1],
hooseng_today_menu1=menu_list[1][1],
hooseng_today_price1=menu_list[1][-1],
hooseng_today_menu2=menu_list[2][1],
hooseng_today_price2=menu_list[2][-1],
hooseng_today_menu3=menu_list[3][1],
hooseng_today_price3=menu_list[3][-1],
hooseng_today_menu4=menu_list[4][1],
hooseng_today_price4=menu_list[4][-1],
)
except:
pass
all_menu.update(hooseng_menu)
menu_list=formatted_haksik(time, '교직원')
try:
if len(menu_list)==0:
gyojik_menu=dict()
elif len(menu_list)==1:
gyojik_menu=dict(
gyojik_today_menu0=menu_list[0][1],
gyojik_today_price0=menu_list[0][-1],
)
elif len(menu_list)==2:
gyojik_menu=dict(
gyojik_today_menu0=menu_list[0][1],
gyojik_today_price0=menu_list[0][-1],
gyojik_today_menu1=menu_list[1][1],
gyojik_today_price1=menu_list[1][-1],
)
except:
pass
all_menu.update(gyojik_menu)
menu_list=formatted_haksik(time, '어문관')
try:
umoon_menu = dict(
)
if len(menu_list)==1:
umoon_menu=dict(
umoon_today_menu0=menu_list[0][1],
umoon_today_price0=menu_list[0][-1],
)
elif len(menu_list)==2:
umoon_menu=dict(
umoon_today_menu0=menu_list[0][1],
umoon_today_price0=menu_list[0][-1],
umoon_today_menu1=menu_list[1][1],
umoon_today_price1=menu_list[1][-1],
)
except:
pass
all_menu.update(umoon_menu)
menu_list=formatted_haksik(time, '기숙사')
try:
if len(menu_list)==0:
hufsdorm_menu=dict()
elif len(menu_list)==1:
hufsdorm_menu=dict(
hufsdorm_today_menu0=menu_list[0][1],
hufsdorm_today_price0=menu_list[0][-1],
)
elif len(menu_list) == 2:
hufsdorm_menu = dict(
hufsdorm_today_menu0=menu_list[0][1],
hufsdorm_today_price0=menu_list[0][-1],
hufsdorm_today_menu1=menu_list[1][1],
hufsdorm_today_price1=menu_list[1][-1],
)
elif len(menu_list) == 3:
hufsdorm_menu = dict(
hufsdorm_today_menu0=menu_list[0][1],
hufsdorm_today_price0=menu_list[0][-1],
hufsdorm_today_menu1=menu_list[1][1],
hufsdorm_today_price1=menu_list[1][-1],
hufsdorm_today_menu2=menu_list[2][1],
hufsdorm_today_price2=menu_list[2][-1],
)
elif len(menu_list) == 4:
hufsdorm_menu = dict(
hufsdorm_today_menu0=menu_list[0][1],
hufsdorm_today_price0=menu_list[0][-1],
hufsdorm_today_menu1=menu_list[1][1],
hufsdorm_today_price1=menu_list[1][-1],
hufsdorm_today_menu2=menu_list[2][1],
hufsdorm_today_price2=menu_list[2][-1],
hufsdorm_today_menu3=menu_list[3][1],
hufsdorm_today_price3=menu_list[3][-1],
)
elif len(menu_list) == 5:
hufsdorm_menu = dict(
hufsdorm_today_menu0=menu_list[0][1],
hufsdorm_today_price0=menu_list[0][-1],
hufsdorm_today_menu1=menu_list[1][1],
hufsdorm_today_price1=menu_list[1][-1],
hufsdorm_today_menu2=menu_list[2][1],
hufsdorm_today_price2=menu_list[2][-1],
hufsdorm_today_menu3=menu_list[3][1],
hufsdorm_today_price3=menu_list[3][-1],
hufsdorm_today_menu4=menu_list[4][1],
hufsdorm_today_price4=menu_list[4][-1],
)
except:
pass
all_menu.update(hufsdorm_menu)
print(all_menu)
#print(menu_list)
return all_menu
def formatted_haksik(time, cafeteria):
con = sqlite3.connect('./DB/haksik_data.db')
cur = con.cursor()
querys = 'SELECT ' + time + ' FROM ' + cafeteria
cur.execute(querys)
menu_size = len(cur.fetchall())
cur.execute(querys)
menu_list = []
for i in range(0, menu_size):
menu = cur.fetchone()[0]
if len(menu) <= 1:
continue
else:
menu_list.append(menu)
for size in range(0, len(menu_list)):
menu_list[size] = menu_list[size].split('\n')
for i in range(0, len(menu_list[size])):
try:
menu_list[size].remove('')
#print(menu_list)
# 어문관 작업중 . 2017-11-06
for i in range(0,len(menu_list[size])):
if '선택식' in menu_list[size][i]:
#어문관일때 선택식으로 인한 가격수정
menu_list[size][-1] = str(menu_list[size][i-1]).replace('가격 : ', '')
else:
menu_list[size][-1] = menu_list[size][-1].replace('가격 : ', '')
except:
pass
con.close()
return menu_list
seo_haksik_load()
glo_haksik_load()
| 31.703226
| 92
| 0.544668
| 1,234
| 9,828
| 4.002431
| 0.093193
| 0.234865
| 0.082
| 0.085037
| 0.807653
| 0.774651
| 0.683539
| 0.668961
| 0.668961
| 0.626848
| 0
| 0.050703
| 0.33374
| 9,828
| 310
| 93
| 31.703226
| 0.703574
| 0.024318
| 0
| 0.65098
| 0
| 0
| 0.014517
| 0.004386
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0.031373
| 0.011765
| 0
| 0.05098
| 0.003922
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0ef0b34210a5fa4d1a7c3413d0c7012978d858ef
| 686
|
py
|
Python
|
setup.py
|
lienz/sphfile
|
2a9af179245ce16a3a61f6f36f335b78e1b454a3
|
[
"MIT"
] | 1
|
2021-05-03T10:02:55.000Z
|
2021-05-03T10:02:55.000Z
|
setup.py
|
lienz/sphfile
|
2a9af179245ce16a3a61f6f36f335b78e1b454a3
|
[
"MIT"
] | null | null | null |
setup.py
|
lienz/sphfile
|
2a9af179245ce16a3a61f6f36f335b78e1b454a3
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name="sphfile",
version="1.0.1",
url="https://github.com/mcfletch/sphfile",
author="Mike C. Fletcher",
author_email="mcfletch@vrplumber.com",
description="Numpy-based NIST SPH audio-file reader",
long_description=open('README.rst').read(),
packages=setuptools.find_packages(),
install_requires=[],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| 25.407407
| 57
| 0.622449
| 72
| 686
| 5.875
| 0.625
| 0.269504
| 0.35461
| 0.184397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020638
| 0.223032
| 686
| 26
| 58
| 26.384615
| 0.772983
| 0
| 0
| 0
| 0
| 0
| 0.501458
| 0.03207
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0ef32db3f5c0352544551ed32ea76efd272e1a7c
| 70
|
py
|
Python
|
src/aleph_client/vm/__init__.py
|
davetapley/aleph-client
|
71dbab7b8107ea5676fbff5dc11d8418f77ac57b
|
[
"MIT"
] | 4
|
2020-10-17T13:22:45.000Z
|
2022-02-21T17:29:33.000Z
|
src/aleph_client/vm/__init__.py
|
davetapley/aleph-client
|
71dbab7b8107ea5676fbff5dc11d8418f77ac57b
|
[
"MIT"
] | 21
|
2021-04-20T07:33:58.000Z
|
2022-02-16T08:57:34.000Z
|
src/aleph_client/vm/__init__.py
|
davetapley/aleph-client
|
71dbab7b8107ea5676fbff5dc11d8418f77ac57b
|
[
"MIT"
] | 7
|
2020-11-01T13:06:02.000Z
|
2022-02-10T23:07:01.000Z
|
"""
Aleph helpers for apps running inside Aleph Virtual Machines.
"""
| 17.5
| 61
| 0.742857
| 9
| 70
| 5.777778
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157143
| 70
| 3
| 62
| 23.333333
| 0.881356
| 0.871429
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1615cacabe19c00e6fc295f178bb6f26ddfa52c8
| 91
|
py
|
Python
|
djangoProject/GardenAR/apps.py
|
ValenDtv/Gargen_ARback
|
ba4da7d93168fd3dcfb03901eef3a779a2f5859f
|
[
"MIT"
] | null | null | null |
djangoProject/GardenAR/apps.py
|
ValenDtv/Gargen_ARback
|
ba4da7d93168fd3dcfb03901eef3a779a2f5859f
|
[
"MIT"
] | null | null | null |
djangoProject/GardenAR/apps.py
|
ValenDtv/Gargen_ARback
|
ba4da7d93168fd3dcfb03901eef3a779a2f5859f
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class GardenarConfig(AppConfig):
name = 'GardenAR'
| 15.166667
| 33
| 0.758242
| 10
| 91
| 6.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164835
| 91
| 5
| 34
| 18.2
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
16bc9eca21ffbb87310e54c2088a2acab78664f1
| 47,975
|
py
|
Python
|
examples/speech_recognition/twophase_sequence_generator.py
|
sarapapi/FBK-fairseq-ST
|
33f381937c1589602944da8cf39e533802d283ca
|
[
"MIT"
] | 11
|
2021-02-28T23:33:18.000Z
|
2022-02-11T20:42:18.000Z
|
examples/speech_recognition/twophase_sequence_generator.py
|
sarapapi/FBK-fairseq-ST
|
33f381937c1589602944da8cf39e533802d283ca
|
[
"MIT"
] | 1
|
2021-05-21T08:08:19.000Z
|
2021-06-30T12:28:55.000Z
|
examples/speech_recognition/twophase_sequence_generator.py
|
sarapapi/FBK-fairseq-ST
|
33f381937c1589602944da8cf39e533802d283ca
|
[
"MIT"
] | 5
|
2021-03-15T02:05:38.000Z
|
2022-02-14T09:20:20.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import torch
from fairseq import search
from fairseq.models.fairseq_encoder import EncoderOut
from torch import Tensor
from fairseq.sequence_generator import SequenceGenerator, EnsembleModel, BeamContainer
class HierarchicalBeamSearch(search.BeamSearch):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
@torch.jit.export
def step(self, step: int, lprobs, scores: Optional[Tensor], prev_scores: Tensor):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step add the scores from previous beam search
lprobs = lprobs + prev_scores
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
if torch.__version__ < '1.6.0':
beams_buf = torch.div(indices_buf, vocab_size)
else:
beams_buf = torch.floor_divide(indices_buf, vocab_size)
indices_buf = indices_buf.fmod(vocab_size)
return scores_buf, indices_buf, beams_buf
class TwoPhaseSequenceGenerator(SequenceGenerator):
def __init__(
self,
models,
src_dict,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
retain_dropout=False,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
):
"""Generates transcripts and translations of a given source audio.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__(
models, tgt_dict,
beam_size=beam_size,
max_len_a=max_len_a,
max_len_b=max_len_b,
min_len=min_len,
normalize_scores=normalize_scores,
len_penalty=len_penalty,
unk_penalty=unk_penalty,
retain_dropout=retain_dropout,
temperature=temperature,
match_source_len=match_source_len,
no_repeat_ngram_size=no_repeat_ngram_size,
search_strategy=search_strategy,
eos=eos
)
if isinstance(models, EnsembleTwoPhaseModel):
self.model = models
else:
self.model = EnsembleTwoPhaseModel(models)
self.src_pad = src_dict.pad()
self.src_unk = src_dict.unk()
self.src_eos = src_dict.eos() if eos is None else eos
self.src_vocab_size = len(src_dict)
self.src_search = search.BeamSearch(src_dict)
self.search = HierarchicalBeamSearch(tgt_dict)
def cuda(self):
self.model.cuda()
return self
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
net_input = sample["net_input"]
# TODO: should not use audio features...
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.src_eos) & src_tokens.ne(self.src_pad)).long().sum(dim=1)
)
# bsz: total number of sentences in beam
input_size = src_tokens.size()
bsz, src_len = input_size[0], input_size[1]
beam_size = self.beam_size
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
aux_nbest = self._generate_aux(
sample, encoder_outs, prefix_tokens=prefix_tokens, bos_token=bos_token)
return self._generate_tgt(
aux_nbest, encoder_outs, prefix_tokens=prefix_tokens, bos_token=bos_token)
def _generate_tgt(
self,
aux_nbest: List[List[Dict[str, Tensor]]],
encoder_outs: List[EncoderOut],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
# bsz: total number of sentences in beam
bsz = len(aux_nbest)
beam_size = self.beam_size
max_aux_len = max([cand["tokens"].shape[0] for sent in aux_nbest for cand in sent])
src_tokens = (
torch.zeros(bsz, beam_size, max_aux_len).long().fill_(self.src_pad).to(
aux_nbest[0][0]["tokens"].device)
)
for i_batch in range(len(aux_nbest)):
for i_best in range(len(aux_nbest[i_batch])):
cand = aux_nbest[i_batch][i_best]
src_tokens[i_batch, i_best, :cand["tokens"].shape[0]] = cand["tokens"]
src_tokens = src_tokens.view(bsz * beam_size, -1)
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.src_eos) & src_tokens.ne(self.src_pad)).long().sum(dim=1)
)
src_len = src_tokens.size()[1]
auxiliary_outputs = torch.zeros(
bsz, beam_size, max_aux_len, aux_nbest[0][0]["auxiliary_out"].shape[1]).float().fill_(
self.src_pad).to(src_tokens.device)
for i_batch in range(len(aux_nbest)):
for i_best in range(len(aux_nbest[i_batch])):
cand = aux_nbest[i_batch][i_best]
auxiliary_outputs[i_batch, i_best, :cand["auxiliary_out"].shape[0], :] = cand["auxiliary_out"]
auxiliary_outputs = auxiliary_outputs.view(bsz * beam_size, max_aux_len, -1)
prev_scores = torch.stack(
[cand["score"] for sent in aux_nbest for cand in sent]).view(bsz, beam_size, 1)
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never choosed for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# The blacklist indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the blacklist would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
blacklist = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
finished = [
False for i in range(bsz)
] # a boolean array indicating if the sentence at the index is finished or not
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
# print(f'step: {step}')
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
self.model.reorder_incremental_state(reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
src_tokens = src_tokens.index_select(0, reorder_state)
prev_scores = prev_scores.view(-1).index_select(0, reorder_state).view(-1, beam_size, 1)
auxiliary_outputs = auxiliary_outputs.index_select(0, reorder_state)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1], encoder_outs, src_tokens, auxiliary_outputs, self.temperature
)
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size, self.pad, self.eos
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
lprobs = self._no_repeat_ngram(tokens, lprobs, bsz, beam_size, step)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
prev_scores,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][blacklist] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
src_tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(bsz).to(cand_indices)
batch_mask[
torch.tensor(finalized_sents).to(cand_indices)
] = torch.tensor(0).to(batch_mask)
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~blacklist) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
new_blacklist, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
# make into beam container
BCList = [
BeamContainer(elem["score"].item(), elem) for elem in finalized[sent]
]
BCList.sort()
BCList.reverse()
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], [x.elem for x in BCList]
)
return finalized
def _generate_aux(
self,
sample: Dict[str, Dict[str, Tensor]],
encoder_outs: List[EncoderOut],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
net_input = sample["net_input"]
# TODO: should not use audio features...
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.src_eos) & src_tokens.ne(self.src_pad)).long().sum(dim=1)
)
# bsz: total number of sentences in beam
input_size = src_tokens.size()
bsz, src_len = input_size[0], input_size[1]
beam_size = self.beam_size
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
aux_tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.src_pad)
) # +2 for eos and pad
aux_tokens[:, 0] = self.src_eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# The ignorelist indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the ignorelist would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
ignorelist = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of information about the hypothesis being finalized at each step
finished = [
False for i in range(bsz)
] # a boolean array indicating if the sentence at the index is finished or not
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(aux_tokens)
cand_offsets = torch.arange(0, cand_size).type_as(aux_tokens)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
aux_outputs: Optional[Tensor] = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
# print(f'step: {step}')
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
self.model.reorder_auxiliary_incremental_state(reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
aux_outputs = aux_outputs.index_select(1, reorder_state)
lprobs, aux_out, avg_attn_scores = self.model.forward_auxiliary_decoder(
aux_tokens[:, : step + 1], encoder_outs, self.temperature
)
if step == 0:
# We need to initialize this here as we don't know the last dimension (C)
# until we do the first step
aux_outputs = (
torch.zeros(max_len + 1, bsz * beam_size, aux_out.shape[-1]).to(src_tokens).float()
)
# Assign the auxiliary outputs for this decoding step (only the current decoding step is returned)
aux_outputs[step] = aux_out.squeeze(1)
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.src_pad] = -math.inf # never select pad
lprobs[:, self.src_unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.src_eos] = -math.inf
lprobs[:, self.src_eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, aux_tokens, scores = self._prefix_tokens(
step, lprobs, scores, aux_tokens, prefix_tokens, beam_size, self.src_pad, self.src_eos
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.src_eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
aux_tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
self.src_search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
lprobs = self._no_repeat_ngram(aux_tokens, lprobs, bsz, beam_size, step)
cand_scores, cand_indices, cand_beams = self.src_search.step(
step,
lprobs.view(bsz, -1, self.src_vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
eos_mask = cand_indices.eq(self.src_eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][ignorelist] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_aux_hypos(
step,
eos_bbsz_idx,
eos_scores,
aux_tokens,
encoder_outs,
aux_outputs,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
self.src_eos,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(bsz).to(cand_indices)
batch_mask[
torch.tensor(finalized_sents).to(cand_indices)
] = torch.tensor(0).to(batch_mask)
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
ignorelist = ignorelist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
aux_tokens = aux_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~ignorelist) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
new_ignorelist, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update ignorelist to ignore any finalized hypos
ignorelist = new_ignorelist.ge(cand_size)[:, :beam_size]
assert (~ignorelist).any(dim=1).all()
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
aux_tokens[:, : step + 1] = torch.index_select(
aux_tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
aux_tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
# for sent in range(len(finalized)):
# # make into beam container
# BCList = [
# BeamContainer(elem["score"].item(), elem) for elem in finalized[sent]
# ]
# BCList.sort()
# BCList.reverse()
# finalized[sent] = torch.jit.annotate(
# List[Dict[str, Tensor]], [x.elem for x in BCList]
# )
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int, pad, eos
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def finalize_aux_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
encoder_outs,
decoder_out,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
eos,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
Returns number of sentences being finalized.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
decoder_out_clone = decoder_out.index_select(1, bbsz_idx)[: step + 1].transpose(0, 1)
encoder_outs_clone = self.model.reorder_encoder_out(encoder_outs, bbsz_idx)
tokens_clone[:, step] = eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"auxiliary_out": decoder_out_clone[i],
"encoder_outs": self.model.reorder_encoder_out(
encoder_outs_clone, torch.tensor([i], dtype=torch.long).to(tokens_clone.device)),
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
src_tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
Returns number of sentences being finalized.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
src_tokens_clone = src_tokens.index_select(0, bbsz_idx)
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
src_mask = src_tokens_clone[i] != self.src_pad
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"aux_tokens": src_tokens_clone[i].masked_select(src_mask),
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
class EnsembleTwoPhaseModel(EnsembleModel):
"""A wrapper around an ensemble of models."""
auxiliary_incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]]
def __init__(self, models):
super().__init__(models)
self.auxiliary_incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for _ in range(self.models_size)
],
)
def reset_incremental_state(self):
super().reset_incremental_state()
self.auxiliary_incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for _ in range(self.models_size)
],
)
return
@torch.jit.export
def forward_decoder(
self, tokens,
encoder_outs: List[EncoderOut],
aux_tokens: Tensor,
aux_decoder_out: Tensor,
temperature: float = 1.0
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[EncoderOut] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.forward_decoder(
tokens,
auxiliary_out=aux_decoder_out,
auxiliary_tokens=aux_tokens,
encoder_out=encoder_out,
incremental_state=self.incremental_states[i],
)
else:
decoder_out = model.forward_decoder(
tokens,
auxiliary_out=aux_decoder_out,
auxiliary_tokens=aux_tokens,
encoder_out=encoder_out)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_auxiliary_incremental_state(self, new_order):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.auxiliary_decoder.reorder_incremental_state(
self.auxiliary_incremental_states[i], new_order
)
@torch.jit.export
def forward_auxiliary_decoder(
self, tokens, encoder_outs: List[EncoderOut], temperature: float = 1.0
):
log_probs = []
outs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[EncoderOut] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.auxiliary_decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=self.auxiliary_incremental_states[i],
features_only=True,
)
else:
decoder_out = model.auxiliary_decoder.forward(
tokens, encoder_out=encoder_out, features_only=True)
decoder_out_emb = decoder_out[0]
decoder_out = (model.auxiliary_decoder.output_layer(decoder_out[0]), decoder_out[1])
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, decoder_out_emb, attn
log_probs.append(probs)
outs.append(decoder_out_emb)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
avg_decoder_outs = torch.sum(torch.stack(outs, dim=0), dim=0).div_(self.models_size)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_decoder_outs, avg_attn
| 41.14494
| 110
| 0.556894
| 5,680
| 47,975
| 4.480106
| 0.081162
| 0.030809
| 0.014697
| 0.005659
| 0.774865
| 0.747436
| 0.726451
| 0.703148
| 0.690573
| 0.674539
| 0
| 0.010007
| 0.354268
| 47,975
| 1,165
| 111
| 41.180258
| 0.811421
| 0.182137
| 0
| 0.612644
| 0
| 0
| 0.009661
| 0
| 0
| 0
| 0
| 0.000858
| 0.014943
| 1
| 0.017241
| false
| 0
| 0.008046
| 0
| 0.045977
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
16be5a45756dcd71abba477667e1911eccef139f
| 129
|
py
|
Python
|
pages/admin.py
|
kzambrow/cs347
|
bcb711545a9f3dfcb298b8a20cf5106d13701cc1
|
[
"MIT"
] | null | null | null |
pages/admin.py
|
kzambrow/cs347
|
bcb711545a9f3dfcb298b8a20cf5106d13701cc1
|
[
"MIT"
] | null | null | null |
pages/admin.py
|
kzambrow/cs347
|
bcb711545a9f3dfcb298b8a20cf5106d13701cc1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
admin.site.site_header = 'IOT Intrusion Detection Administration'
| 25.8
| 65
| 0.813953
| 17
| 129
| 6.117647
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124031
| 129
| 4
| 66
| 32.25
| 0.920354
| 0.20155
| 0
| 0
| 0
| 0
| 0.376238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
16e7e5ab26e6086c1e610dcb443a29cd1b99c8f0
| 1,203
|
py
|
Python
|
utils/loggers.py
|
Egolas/TC_LAAU
|
6b9f700f642ca3187f1556434b5fb2308f065564
|
[
"MIT"
] | null | null | null |
utils/loggers.py
|
Egolas/TC_LAAU
|
6b9f700f642ca3187f1556434b5fb2308f065564
|
[
"MIT"
] | null | null | null |
utils/loggers.py
|
Egolas/TC_LAAU
|
6b9f700f642ca3187f1556434b5fb2308f065564
|
[
"MIT"
] | null | null | null |
import logging
class Logger():
def __init__(self, using_config):
logger = logging.getLogger()
logger.setLevel('DEBUG')
BASIC_FORMAT = '%(asctime)s %(levelname)s:%(message)s'
formatter = logging.Formatter(BASIC_FORMAT)
chlr = logging.StreamHandler()
chlr.setFormatter(formatter)
chlr.setLevel('DEBUG')
fhlr = logging.FileHandler(using_config.logdir + '/train.log')
fhlr.setFormatter(formatter)
logger.addHandler(chlr)
logger.addHandler(fhlr)
self.logger = logger
def info(self, *args, **kwargs):
return self.logger.info(*args, **kwargs)
def debug(self, *args, **kwargs):
return self.logger.debug(*args, **kwargs)
def warning(self, *args, **kwargs):
return self.logger.warning(*args, **kwargs)
def error(self, *args, **kwargs):
return self.logger.error(*args, **kwargs)
def critical(self, *args, **kwargs):
return self.logger.critical(*args, **kwargs)
def log(self, *args, **kwargs):
return self.logger.log(*args, **kwargs)
def exception(self, *args, **kwargs):
return self.logger.exception(*args, **kwargs)
| 27.976744
| 70
| 0.619285
| 134
| 1,203
| 5.5
| 0.268657
| 0.189959
| 0.132972
| 0.189959
| 0.284939
| 0.284939
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232751
| 1,203
| 42
| 71
| 28.642857
| 0.798483
| 0
| 0
| 0
| 0
| 0
| 0.047382
| 0.020781
| 0
| 0
| 0
| 0
| 0
| 1
| 0.275862
| false
| 0
| 0.034483
| 0.241379
| 0.586207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
bc482f3bca38ad43f6717bc3fc9b850c605338ca
| 154
|
py
|
Python
|
chengyubert/optim/__init__.py
|
VisualJoyce/ChengyuBERT
|
605db3a4b3241dd4d02baa41a68bf23b5b00b36d
|
[
"MIT"
] | 8
|
2020-12-11T13:06:16.000Z
|
2022-03-01T13:47:51.000Z
|
chengyubert/optim/__init__.py
|
VisualJoyce/ChengyuBERT
|
605db3a4b3241dd4d02baa41a68bf23b5b00b36d
|
[
"MIT"
] | 18
|
2020-12-31T07:32:55.000Z
|
2022-02-07T08:33:30.000Z
|
chengyubert/optim/__init__.py
|
VisualJoyce/ChengyuBERT
|
605db3a4b3241dd4d02baa41a68bf23b5b00b36d
|
[
"MIT"
] | 3
|
2021-03-25T01:08:56.000Z
|
2022-03-22T09:05:57.000Z
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
from .sched import noam_schedule, warmup_linear, vqa_schedule, get_lr_sched
| 22
| 75
| 0.785714
| 21
| 154
| 5.52381
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123377
| 154
| 6
| 76
| 25.666667
| 0.859259
| 0.441558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
bc511d30c3e5d85c7985c32edc3b36b8b5459805
| 278
|
py
|
Python
|
flat_sales/flat_sales/doctype/payment_intimation/test_payment_intimation.py
|
swamedh/flat_sales
|
28f3d517e06591669c24accaba4e0683eabe8901
|
[
"MIT"
] | null | null | null |
flat_sales/flat_sales/doctype/payment_intimation/test_payment_intimation.py
|
swamedh/flat_sales
|
28f3d517e06591669c24accaba4e0683eabe8901
|
[
"MIT"
] | null | null | null |
flat_sales/flat_sales/doctype/payment_intimation/test_payment_intimation.py
|
swamedh/flat_sales
|
28f3d517e06591669c24accaba4e0683eabe8901
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Deepak and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Payment Intimation')
class TestPaymentIntimation(unittest.TestCase):
pass
| 21.384615
| 62
| 0.780576
| 34
| 278
| 6.147059
| 0.823529
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020576
| 0.125899
| 278
| 12
| 63
| 23.166667
| 0.839506
| 0.510791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
bc6a9054e4861fa0582118a5f32f82c3e6794d28
| 157
|
py
|
Python
|
LABWORK1/Scripts/django-admin.py
|
maxovic/summerpractice2019
|
0b61ca6302f74618a62bad60615c47f29fa531cb
|
[
"MIT"
] | null | null | null |
LABWORK1/Scripts/django-admin.py
|
maxovic/summerpractice2019
|
0b61ca6302f74618a62bad60615c47f29fa531cb
|
[
"MIT"
] | null | null | null |
LABWORK1/Scripts/django-admin.py
|
maxovic/summerpractice2019
|
0b61ca6302f74618a62bad60615c47f29fa531cb
|
[
"MIT"
] | null | null | null |
#!D:\KBTU_LIFE\backend\LABWORK1\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 26.166667
| 50
| 0.789809
| 21
| 157
| 5.333333
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007042
| 0.095541
| 157
| 5
| 51
| 31.4
| 0.78169
| 0.312102
| 0
| 0
| 0
| 0
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
bc6bb62456ccb44a104766d7588dc5e17d0bd397
| 1,623
|
py
|
Python
|
clients/python-fastapi/generated/src/openapi_server/models/pipeline_step_impl.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-fastapi/generated/src/openapi_server/models/pipeline_step_impl.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-fastapi/generated/src/openapi_server/models/pipeline_step_impl.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
from openapi_server.models.input_step_impl import InputStepImpl
from openapi_server.models.pipeline_step_impllinks import PipelineStepImpllinks
class PipelineStepImpl(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
PipelineStepImpl - a model defined in OpenAPI
_class: The _class of this PipelineStepImpl [Optional].
links: The links of this PipelineStepImpl [Optional].
display_name: The display_name of this PipelineStepImpl [Optional].
duration_in_millis: The duration_in_millis of this PipelineStepImpl [Optional].
id: The id of this PipelineStepImpl [Optional].
input: The input of this PipelineStepImpl [Optional].
result: The result of this PipelineStepImpl [Optional].
start_time: The start_time of this PipelineStepImpl [Optional].
state: The state of this PipelineStepImpl [Optional].
"""
_class: Optional[str] = None
links: Optional[PipelineStepImpllinks] = None
display_name: Optional[str] = None
duration_in_millis: Optional[int] = None
id: Optional[str] = None
input: Optional[InputStepImpl] = None
result: Optional[str] = None
start_time: Optional[str] = None
state: Optional[str] = None
PipelineStepImpl.update_forward_refs()
| 37.744186
| 96
| 0.735675
| 197
| 1,623
| 5.923858
| 0.350254
| 0.046272
| 0.169666
| 0.231362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009916
| 0.192237
| 1,623
| 42
| 97
| 38.642857
| 0.880244
| 0.492298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.388889
| 0
| 0.944444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
bc71c22c730c9017fa5f0b1dbe330f244e29fcf2
| 256
|
py
|
Python
|
tests/models/control.py
|
IntegrCiTy/zerobnl
|
7daafc67f945b3797b465674272302de113f46f2
|
[
"Apache-2.0"
] | 2
|
2018-10-23T12:02:25.000Z
|
2019-12-21T09:07:02.000Z
|
tests/models/control.py
|
IntegrCiTy/zerobnl
|
7daafc67f945b3797b465674272302de113f46f2
|
[
"Apache-2.0"
] | 5
|
2018-11-20T07:40:37.000Z
|
2019-01-30T18:10:34.000Z
|
tests/models/control.py
|
IntegrCiTy/zerobnl
|
7daafc67f945b3797b465674272302de113f46f2
|
[
"Apache-2.0"
] | null | null | null |
class Model:
def __init__(self):
self.SoC = 0.0
self.io = 0.0
def step(self, value):
if self.SoC > 0.95 and self.io == 1.0:
self.io = 0.0
if self.SoC < 0.05 and self.io == 0.0:
self.io = 1.0
| 23.272727
| 46
| 0.46875
| 45
| 256
| 2.577778
| 0.333333
| 0.258621
| 0.206897
| 0.206897
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 0.390625
| 256
| 10
| 47
| 25.6
| 0.628205
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
bc8a44e9af1696b1f8944dfde715149b7c4354be
| 760
|
py
|
Python
|
exercises.py
|
hauntsaninja/importlib_metadata
|
2f05392ca980952a6960d82b2f2d2ea10aa53239
|
[
"Apache-2.0"
] | 42
|
2020-10-24T16:41:15.000Z
|
2022-03-09T06:17:08.000Z
|
exercises.py
|
hauntsaninja/importlib_metadata
|
2f05392ca980952a6960d82b2f2d2ea10aa53239
|
[
"Apache-2.0"
] | 770
|
2020-10-22T14:05:50.000Z
|
2022-03-30T15:49:13.000Z
|
exercises.py
|
hauntsaninja/importlib_metadata
|
2f05392ca980952a6960d82b2f2d2ea10aa53239
|
[
"Apache-2.0"
] | 33
|
2020-10-24T16:50:36.000Z
|
2022-03-31T16:20:55.000Z
|
from pytest_perf.deco import extras
@extras('perf')
def discovery_perf():
"discovery"
import importlib_metadata # end warmup
importlib_metadata.distribution('ipython')
def entry_points_perf():
"entry_points()"
import importlib_metadata # end warmup
importlib_metadata.entry_points()
@extras('perf')
def cached_distribution_perf():
"cached distribution"
import importlib_metadata
importlib_metadata.distribution('ipython') # end warmup
importlib_metadata.distribution('ipython')
@extras('perf')
def uncached_distribution_perf():
"uncached distribution"
import importlib
import importlib_metadata
# end warmup
importlib.invalidate_caches()
importlib_metadata.distribution('ipython')
| 20.540541
| 60
| 0.742105
| 79
| 760
| 6.886076
| 0.253165
| 0.28125
| 0.169118
| 0.264706
| 0.373162
| 0.373162
| 0.180147
| 0
| 0
| 0
| 0
| 0
| 0.168421
| 760
| 36
| 61
| 21.111111
| 0.860759
| 0.146053
| 0
| 0.478261
| 0
| 0
| 0.144663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| true
| 0
| 0.521739
| 0
| 0.695652
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
bca428f1c1bab9cde2bfcb7273f1a7f8d7ce9e83
| 2,811
|
py
|
Python
|
ocial/ocial_project/topics/decorators.py
|
kasimbozdag/swe_574
|
a77fa29fd80c713cd202ccbb82cfcadfa52b81fa
|
[
"MIT"
] | 1
|
2019-09-29T12:54:58.000Z
|
2019-09-29T12:54:58.000Z
|
ocial/ocial_project/topics/decorators.py
|
kasimbozdag/swe_574
|
a77fa29fd80c713cd202ccbb82cfcadfa52b81fa
|
[
"MIT"
] | 55
|
2019-09-26T16:29:22.000Z
|
2022-02-10T11:28:32.000Z
|
ocial/ocial_project/topics/decorators.py
|
kasimbozdag/swe_574
|
a77fa29fd80c713cd202ccbb82cfcadfa52b81fa
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import PermissionDenied
from .models import *
def course_teacher_is_user(function):
def wrap(request, *args, **kwargs):
course = Course.objects.get(pk=kwargs['course_id'])
if course.teacher == request.user:
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def glossary_teacher_is_user(function):
def wrap(request, *args, **kwargs):
glossary = Glossary.objects.get(pk=kwargs['glossary_id'])
if glossary.course.teacher == request.user:
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def section_teacher_is_user(function):
def wrap(request, *args, **kwargs):
section = Section.objects.get(pk=kwargs['section_id'])
if section.course.teacher == request.user:
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def lecture_teacher_is_user(function):
def wrap(request, *args, **kwargs):
lecture = Lecture.objects.get(pk=kwargs['lecture_id'])
if lecture.section.course.teacher == request.user:
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def quiz_teacher_is_user(function):
def wrap(request, *args, **kwargs):
quiz = Quiz.objects.get(pk=kwargs['quiz_id'])
if quiz.section.course.teacher == request.user:
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def question_teacher_is_user(function):
def wrap(request, *args, **kwargs):
question = Question.objects.get(pk=kwargs['question_id'])
if question.quiz.section.course.teacher == request.user:
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def choice_teacher_is_user(function):
def wrap(request, *args, **kwargs):
choice = Choice.objects.get(pk=kwargs['choice_id'])
if choice.question.quiz.section.course.teacher == request.user:
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
| 35.582278
| 71
| 0.660263
| 316
| 2,811
| 5.43038
| 0.10443
| 0.089744
| 0.138695
| 0.085664
| 0.752914
| 0.752914
| 0.752914
| 0.752914
| 0.752914
| 0.569347
| 0
| 0
| 0.23408
| 2,811
| 79
| 72
| 35.582278
| 0.797027
| 0
| 0
| 0.680556
| 0
| 0
| 0.023826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.194444
| false
| 0
| 0.027778
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
bcb24fabd13ef1030f3a10f8992a5028a357bc96
| 36
|
py
|
Python
|
docs/core/howto/tutorial/listings/finger/finger/__init__.py
|
giadram/twisted
|
4771b1340b822d20d0664bb7d8334e8fb7e52863
|
[
"MIT",
"Unlicense"
] | 4,612
|
2015-01-01T12:57:23.000Z
|
2022-03-30T01:08:23.000Z
|
docs/core/howto/tutorial/listings/finger/finger/__init__.py
|
giadram/twisted
|
4771b1340b822d20d0664bb7d8334e8fb7e52863
|
[
"MIT",
"Unlicense"
] | 1,243
|
2015-01-23T17:23:59.000Z
|
2022-03-28T13:46:17.000Z
|
docs/core/howto/tutorial/listings/finger/finger/__init__.py
|
giadram/twisted
|
4771b1340b822d20d0664bb7d8334e8fb7e52863
|
[
"MIT",
"Unlicense"
] | 1,236
|
2015-01-13T14:41:26.000Z
|
2022-03-17T07:12:36.000Z
|
"""
Finger example application.
"""
| 9
| 27
| 0.666667
| 3
| 36
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 36
| 3
| 28
| 12
| 0.774194
| 0.75
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
bcc8de202bbd86325e25bc06d7290dd13396e94e
| 8,679
|
py
|
Python
|
test/test_recog.py
|
gaochangfeng/espnet
|
dcb281a0a9eb52433dd4f8338b163f592e635303
|
[
"Apache-2.0"
] | 5
|
2021-04-17T13:12:20.000Z
|
2022-02-22T09:36:45.000Z
|
test/test_recog.py
|
JaejinCho/espnet_spkidtts
|
a52bdebb08558b63df23564d6e67dfcba8a41d78
|
[
"Apache-2.0"
] | null | null | null |
test/test_recog.py
|
JaejinCho/espnet_spkidtts
|
a52bdebb08558b63df23564d6e67dfcba8a41d78
|
[
"Apache-2.0"
] | 5
|
2020-02-24T08:13:54.000Z
|
2022-02-22T09:03:09.000Z
|
# coding: utf-8
# Copyright 2018 Hiroshi Seki
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
import espnet.lm.chainer_backend.lm as lm_chainer
import argparse
import importlib
import numpy
import pytest
def make_arg(**kwargs):
defaults = dict(
elayers=4,
subsample="1_2_2_1_1",
etype="blstmp",
eunits=100,
eprojs=100,
dtype="lstm",
dlayers=1,
dunits=300,
atype="location",
aconv_chans=10,
aconv_filts=100,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=320,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=5,
beam_size=3,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
ctc_window_margin=0,
verbose=2,
char_list=["a", "i", "u", "e", "o"],
outdir=None,
ctc_type="warpctc",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
context_residual=False,
use_frontend=False,
replace_sos=False,
tgt_lang=False
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def init_torch_weight_const(m, val):
for p in m.parameters():
p.data.fill_(val)
def init_chainer_weight_const(m, val):
for p in m.params():
p.data[:] = val
@pytest.mark.parametrize(("etype", "dtype", "m_str", "text_idx1"), [
("blstmp", "lstm", "espnet.nets.chainer_backend.e2e_asr", 0),
("blstmp", "lstm", "espnet.nets.pytorch_backend.e2e_asr", 1),
("vggblstmp", "lstm", "espnet.nets.chainer_backend.e2e_asr", 2),
("vggblstmp", "lstm", "espnet.nets.pytorch_backend.e2e_asr", 3),
("bgrup", "gru", "espnet.nets.chainer_backend.e2e_asr", 4),
("bgrup", "gru", "espnet.nets.pytorch_backend.e2e_asr", 5),
("vggbgrup", "gru", "espnet.nets.chainer_backend.e2e_asr", 6),
("vggbgrup", "gru", "espnet.nets.pytorch_backend.e2e_asr", 7),
])
def test_recognition_results(etype, dtype, m_str, text_idx1):
const = 1e-4
numpy.random.seed(1)
seq_true_texts = ([["o", "iuiuiuiuiuiuiuiuo", "aiaiaiaiaiaiaiaio"],
["o", "uiuiuiuiuiuiuiuio", "aiaiaiaiaiaiaiaio"],
["o", "iuiuiuiuiuiuiuiuo", "aiaiaiaiaiaiaiaio"],
["o", "uiuiuiuiuiuiuiuio", "aiaiaiaiaiaiaiaio"],
["o", "iuiuiuiuiuiuiuiuo", "aiaiaiaiaiaiaiaio"],
["o", "uiuiuiuiuiuiuiuio", "aiaiaiaiaiaiaiaio"],
["o", "iuiuiuiuiuiuiuiuo", "aiaiaiaiaiaiaiaio"],
["o", "uiuiuiuiuiuiuiuio", "aiaiaiaiaiaiaiaio"]])
# ctc_weight: 0.0 (attention), 0.5 (hybrid CTC/attention), 1.0 (CTC)
for text_idx2, ctc_weight in enumerate([0.0, 0.5, 1.0]):
seq_true_text = seq_true_texts[text_idx1][text_idx2]
args = make_arg(etype=etype, ctc_weight=ctc_weight)
m = importlib.import_module(m_str)
model = m.E2E(40, 5, args)
if "pytorch" in m_str:
init_torch_weight_const(model, const)
else:
init_chainer_weight_const(model, const)
data = [
("aaa", dict(feat=numpy.random.randn(100, 40).astype(
numpy.float32), token=seq_true_text))
]
in_data = data[0][1]["feat"]
nbest_hyps = model.recognize(in_data, args, args.char_list)
y_hat = nbest_hyps[0]['yseq'][1:]
seq_hat = [args.char_list[int(idx)] for idx in y_hat]
seq_hat_text = "".join(seq_hat).replace('<space>', ' ')
seq_true_text = data[0][1]["token"]
assert seq_hat_text == seq_true_text
@pytest.mark.parametrize(("etype", "dtype", "m_str", "text_idx1"), [
("blstmp", "lstm", "espnet.nets.chainer_backend.e2e_asr", 0),
("blstmp", "lstm", "espnet.nets.pytorch_backend.e2e_asr", 1),
("vggblstmp", "lstm", "espnet.nets.chainer_backend.e2e_asr", 2),
("vggblstmp", "lstm", "espnet.nets.pytorch_backend.e2e_asr", 3),
("bgrup", "gru", "espnet.nets.chainer_backend.e2e_asr", 4),
("bgrup", "gru", "espnet.nets.pytorch_backend.e2e_asr", 5),
("vggbgrup", "gru", "espnet.nets.chainer_backend.e2e_asr", 6),
("vggbgrup", "gru", "espnet.nets.pytorch_backend.e2e_asr", 7),
])
def test_recognition_results_with_lm(etype, dtype, m_str, text_idx1):
const = 1e-4
numpy.random.seed(1)
seq_true_texts = [["o", "iuiuiuiuiuiuiuiuo", "aiaiaiaiaiaiaiaio"],
["o", "uiuiuiuiuiuiuiuio", "aiaiaiaiaiaiaiaio"],
["o", "iuiuiuiuiuiuiuiuo", "aiaiaiaiaiaiaiaio"],
["o", "uiuiuiuiuiuiuiuio", "aiaiaiaiaiaiaiaio"],
["o", "iuiuiuiuiuiuiuiuo", "aiaiaiaiaiaiaiaio"],
["o", "uiuiuiuiuiuiuiuio", "aiaiaiaiaiaiaiaio"],
["o", "iuiuiuiuiuiuiuiuo", "aiaiaiaiaiaiaiaio"],
["o", "uiuiuiuiuiuiuiuio", "aiaiaiaiaiaiaiaio"]]
# ctc_weight: 0.0 (attention), 0.5 (hybrid CTC/attention), 1.0 (CTC)
for text_idx2, ctc_weight in enumerate([0.0, 0.5, 1.0]):
seq_true_text = seq_true_texts[text_idx1][text_idx2]
args = make_arg(etype=etype, rnnlm="dummy", ctc_weight=ctc_weight,
lm_weight=0.3)
m = importlib.import_module(m_str)
model = m.E2E(40, 5, args)
if "pytorch" in m_str:
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(args.char_list), 2, 10))
init_torch_weight_const(model, const)
init_torch_weight_const(rnnlm, const)
else:
rnnlm = lm_chainer.ClassifierWithState(
lm_chainer.RNNLM(len(args.char_list), 2, 10))
init_chainer_weight_const(model, const)
init_chainer_weight_const(rnnlm, const)
data = [
("aaa", dict(feat=numpy.random.randn(100, 40).astype(
numpy.float32), token=seq_true_text))
]
in_data = data[0][1]["feat"]
nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
y_hat = nbest_hyps[0]['yseq'][1:]
seq_hat = [args.char_list[int(idx)] for idx in y_hat]
seq_hat_text = "".join(seq_hat).replace('<space>', ' ')
seq_true_text = data[0][1]["token"]
assert seq_hat_text == seq_true_text
@pytest.mark.parametrize(("etype", "dtype", "m_str"), [
("blstmp", "lstm", "espnet.nets.chainer_backend.e2e_asr"),
("blstmp", "lstm", "espnet.nets.pytorch_backend.e2e_asr"),
("vggblstmp", "lstm", "espnet.nets.chainer_backend.e2e_asr"),
("vggblstmp", "lstm", "espnet.nets.pytorch_backend.e2e_asr"),
("bgrup", "gru", "espnet.nets.chainer_backend.e2e_asr"),
("bgrup", "gru", "espnet.nets.pytorch_backend.e2e_asr"),
("vggbgrup", "gru", "espnet.nets.chainer_backend.e2e_asr"),
("vggbgrup", "gru", "espnet.nets.pytorch_backend.e2e_asr"),
])
def test_batch_beam_search(etype, dtype, m_str):
const = 1e-4
numpy.random.seed(1)
# ctc_weight: 0.0 (attention), 0.5 (hybrid CTC/attention), 1.0 (CTC)
for ctc_weight in [0.0, 0.5]:
args = make_arg(etype=etype, rnnlm="dummy", ctc_weight=ctc_weight,
lm_weight=0.3)
m = importlib.import_module(m_str)
model = m.E2E(40, 5, args)
if "pytorch" in m_str:
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(args.char_list), 2, 10))
init_torch_weight_const(model, const)
init_torch_weight_const(rnnlm, const)
else:
# chainer module
continue
data = [("aaa", dict(feat=numpy.random.randn(100, 40).astype(numpy.float32)))]
in_data = data[0][1]["feat"]
for lm_weight in [0.0, 0.3]:
if lm_weight == 0.0:
s_nbest_hyps = model.recognize(in_data, args, args.char_list)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list)
else:
s_nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list, rnnlm)
assert s_nbest_hyps[0]['yseq'] == b_nbest_hyps[0][0]['yseq']
if ctc_weight > 0.0:
args.ctc_window_margin = 40
s_nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list, rnnlm)
assert s_nbest_hyps[0]['yseq'] == b_nbest_hyps[0][0]['yseq']
| 38.402655
| 92
| 0.596613
| 1,102
| 8,679
| 4.460073
| 0.160617
| 0.050865
| 0.063479
| 0.063479
| 0.7941
| 0.78881
| 0.76826
| 0.763377
| 0.683011
| 0.683011
| 0
| 0.034456
| 0.250951
| 8,679
| 225
| 93
| 38.573333
| 0.721581
| 0.036295
| 0
| 0.52459
| 0
| 0
| 0.223791
| 0.100527
| 0
| 0
| 0
| 0
| 0.021858
| 1
| 0.032787
| false
| 0
| 0.04918
| 0
| 0.087432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
bccf4e06a8dd034bb58dadc034b3381a06cabb21
| 1,451
|
py
|
Python
|
fHDHR_web/rmg/__init__.py
|
deathbybandaid/fHDHR_NewsOn
|
06d205a3ca677b88fa93b9b7503465aed1838c6b
|
[
"WTFPL"
] | 2
|
2021-11-21T18:45:35.000Z
|
2022-01-11T16:11:48.000Z
|
fHDHR_web/rmg/__init__.py
|
deathbybandaid/fHDHR_NewsOn
|
06d205a3ca677b88fa93b9b7503465aed1838c6b
|
[
"WTFPL"
] | null | null | null |
fHDHR_web/rmg/__init__.py
|
deathbybandaid/fHDHR_NewsOn
|
06d205a3ca677b88fa93b9b7503465aed1838c6b
|
[
"WTFPL"
] | null | null | null |
from .rmg_ident_xml import RMG_Ident_XML
from .device_xml import RMG_Device_XML
from .devices_discover import RMG_Devices_Discover
from .devices_probe import RMG_Devices_Probe
from .devices_devicekey import RMG_Devices_DeviceKey
from .devices_devicekey_channels import RMG_Devices_DeviceKey_Channels
from .devices_devicekey_scanners import RMG_Devices_DeviceKey_Scanners
from .devices_devicekey_networks import RMG_Devices_DeviceKey_Networks
from .devices_devicekey_scan import RMG_Devices_DeviceKey_Scan
from .devices_devicekey_prefs import RMG_Devices_DeviceKey_Prefs
from .devices_devicekey_media import RMG_Devices_DeviceKey_Media
class fHDHR_RMG():
def __init__(self, fhdhr):
self.fhdhr = fhdhr
self.rmg_ident_xml = RMG_Ident_XML(fhdhr)
self.device_xml = RMG_Device_XML(fhdhr)
self.devices_discover = RMG_Devices_Discover(fhdhr)
self.devices_probe = RMG_Devices_Probe(fhdhr)
self.devices_devicekey = RMG_Devices_DeviceKey(fhdhr)
self.devices_devicekey_channels = RMG_Devices_DeviceKey_Channels(fhdhr)
self.devices_devicekey_scanners = RMG_Devices_DeviceKey_Scanners(fhdhr)
self.devices_devicekey_networks = RMG_Devices_DeviceKey_Networks(fhdhr)
self.devices_devicekey_scan = RMG_Devices_DeviceKey_Scan(fhdhr)
self.devices_devicekey_prefs = RMG_Devices_DeviceKey_Prefs(fhdhr)
self.devices_devicekey_media = RMG_Devices_DeviceKey_Media(fhdhr)
| 46.806452
| 79
| 0.824948
| 190
| 1,451
| 5.789474
| 0.110526
| 0.407273
| 0.241818
| 0.159091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130944
| 1,451
| 30
| 80
| 48.366667
| 0.872324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.44
| 0
| 0.52
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
bcdd33143d14e2c1810f736cbf4939c6ec53a65b
| 328
|
py
|
Python
|
vernam/test_util.py
|
millaguie/Vernam
|
127b8e3c7d221e4736e66f0e82810404b4d24bd7
|
[
"BSD-3-Clause"
] | 4
|
2018-07-21T22:41:51.000Z
|
2021-11-17T11:16:27.000Z
|
vernam/test_util.py
|
millaguie/Vernam
|
127b8e3c7d221e4736e66f0e82810404b4d24bd7
|
[
"BSD-3-Clause"
] | 1
|
2017-06-07T16:32:37.000Z
|
2017-06-07T16:32:37.000Z
|
vernam/test_util.py
|
millaguie/Vernam
|
127b8e3c7d221e4736e66f0e82810404b4d24bd7
|
[
"BSD-3-Clause"
] | 1
|
2017-06-01T17:21:25.000Z
|
2017-06-01T17:21:25.000Z
|
import unittest
import util
class SimplisticTest(unittest.TestCase):
def test_getKeyHashFromKey(self):
assert util.hashSum("AAA") == "8d708d18b54df3962d696f069ad42dad7762b5d4d3c97ee5fa2dae0673ed46545164c078b8db3d59c4b96020e4316f17bb3d91bf1f6bc0896bbe75416eb8c385"
if __name__ == '__main__':
unittest.main()
| 29.818182
| 170
| 0.810976
| 22
| 328
| 11.681818
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 0.115854
| 328
| 10
| 171
| 32.8
| 0.610345
| 0
| 0
| 0
| 0
| 0
| 0.42378
| 0.390244
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.571429
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
bceab3b511835b23703820db69dbae9dae102bf1
| 377
|
py
|
Python
|
yelp/models/user.py
|
cwithmichael/yelp-camp-flask
|
15d05285ff198256c396e51456a9a88bc836a342
|
[
"MIT"
] | null | null | null |
yelp/models/user.py
|
cwithmichael/yelp-camp-flask
|
15d05285ff198256c396e51456a9a88bc836a342
|
[
"MIT"
] | null | null | null |
yelp/models/user.py
|
cwithmichael/yelp-camp-flask
|
15d05285ff198256c396e51456a9a88bc836a342
|
[
"MIT"
] | null | null | null |
import mongoengine as me
def _not_empty(val):
if not val:
raise me.ValidationError("value can not be empty")
class User(me.Document):
email = me.StringField(required=True, unique=True, validation=_not_empty)
username = me.StringField(required=True, unique=True, validation=_not_empty)
password = me.StringField(required=True, validation=_not_empty)
| 29
| 80
| 0.742706
| 51
| 377
| 5.333333
| 0.490196
| 0.117647
| 0.231618
| 0.275735
| 0.389706
| 0.389706
| 0.389706
| 0.389706
| 0.389706
| 0
| 0
| 0
| 0.156499
| 377
| 12
| 81
| 31.416667
| 0.855346
| 0
| 0
| 0
| 0
| 0
| 0.058355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.125
| 0.125
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
4c036440909eee3fac3c5546dc84ff6a3b70b68d
| 216
|
py
|
Python
|
wsl_make_install.py
|
tacesrever/Il2CppParser
|
2c588761f8f70a63270c3b14b06f43259ecc5ea2
|
[
"MIT"
] | 35
|
2019-12-24T15:34:11.000Z
|
2022-02-26T07:13:15.000Z
|
wsl_make_install.py
|
tacesrever/Il2CppParser
|
2c588761f8f70a63270c3b14b06f43259ecc5ea2
|
[
"MIT"
] | null | null | null |
wsl_make_install.py
|
tacesrever/Il2CppParser
|
2c588761f8f70a63270c3b14b06f43259ecc5ea2
|
[
"MIT"
] | 13
|
2020-01-11T01:52:56.000Z
|
2021-09-29T17:25:52.000Z
|
#!/usr/bin/python
import sys, os
os.system("wslbridge sh -c 'cd build;make'")
os.system("adb push build/libparser.so /data/local/tmp/libparser.so")
os.system("adb shell chmod 0755 /data/local/tmp/libparser.so")
| 36
| 70
| 0.722222
| 37
| 216
| 4.216216
| 0.621622
| 0.153846
| 0.141026
| 0.269231
| 0.294872
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020725
| 0.106481
| 216
| 6
| 71
| 36
| 0.787565
| 0.074074
| 0
| 0
| 0
| 0
| 0.697436
| 0.287179
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4c33c5fc108bb117e42df026b80b64486ee395a7
| 1,817
|
py
|
Python
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/texture_storage.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/texture_storage.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/texture_storage.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''OpenGL extension ARB.texture_storage
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_storage to provide a more
Python-friendly API
Overview (from the spec)
The texture image specification commands in OpenGL allow each level
to be separately specified with different sizes, formats, types and
so on, and only imposes consistency checks at draw time. This adds
overhead for implementations.
This extension provides a mechanism for specifying the entire
structure of a texture in a single call, allowing certain
consistency checks and memory allocations to be done up front. Once
specified, the format and dimensions of the image array become
immutable, to simplify completeness checks in the implementation.
When using this extension, it is no longer possible to supply texture
data using TexImage*. Instead, data can be uploaded using TexSubImage*,
or produced by other means (such as render-to-texture, mipmap generation,
or rendering to a sibling EGLImage).
This extension has complicated interactions with other extensions.
The goal of most of these interactions is to ensure that a texture
is always mipmap complete (and cube complete for cubemap textures).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_storage.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_storage import *
from OpenGL.raw.GL.ARB.texture_storage import _EXTENSION_NAME
def glInitTextureStorageARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 40.377778
| 74
| 0.803522
| 262
| 1,817
| 5.530534
| 0.549618
| 0.041408
| 0.058661
| 0.028986
| 0.071774
| 0.071774
| 0.05245
| 0.05245
| 0
| 0
| 0
| 0
| 0.151348
| 1,817
| 45
| 75
| 40.377778
| 0.939689
| 0.859659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| true
| 0
| 0.777778
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4c3642f8c2592b63c03155615c1082669105f0de
| 248
|
py
|
Python
|
service/common/errors.py
|
DSAdv/student-question-answer-app
|
bbc3cec15cd37ecb7bc6703f2324b1ae24981ae4
|
[
"MIT"
] | null | null | null |
service/common/errors.py
|
DSAdv/student-question-answer-app
|
bbc3cec15cd37ecb7bc6703f2324b1ae24981ae4
|
[
"MIT"
] | null | null | null |
service/common/errors.py
|
DSAdv/student-question-answer-app
|
bbc3cec15cd37ecb7bc6703f2324b1ae24981ae4
|
[
"MIT"
] | null | null | null |
from werkzeug.exceptions import BadRequest
class IncorrectRequestBodyError(BadRequest):
message = "[API ERROR] Incorrect fields in request body."
class ExistingUserError(BadRequest):
message = "[API ERROR] User is already exist in DB."
| 24.8
| 61
| 0.766129
| 28
| 248
| 6.785714
| 0.75
| 0.178947
| 0.210526
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157258
| 248
| 9
| 62
| 27.555556
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0.342742
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4c4692ecbf210acbc5aef9568a9dc8a80690535c
| 106
|
py
|
Python
|
client.py
|
daliasen/LED-Cube
|
3959ee5caf86c1497ac22231d87a8009bed5b3e8
|
[
"BSD-3-Clause"
] | 4
|
2018-08-19T09:16:40.000Z
|
2020-01-27T13:18:19.000Z
|
client.py
|
daliasen/LED-Cube
|
3959ee5caf86c1497ac22231d87a8009bed5b3e8
|
[
"BSD-3-Clause"
] | null | null | null |
client.py
|
daliasen/LED-Cube
|
3959ee5caf86c1497ac22231d87a8009bed5b3e8
|
[
"BSD-3-Clause"
] | 3
|
2018-08-09T13:30:29.000Z
|
2020-01-26T16:19:23.000Z
|
#!/usr/bin/env python
from client import main
import sys
if __name__ == "__main__":
main(sys.argv[1])
| 13.25
| 26
| 0.698113
| 17
| 106
| 3.882353
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011236
| 0.160377
| 106
| 7
| 27
| 15.142857
| 0.730337
| 0.188679
| 0
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4c516b623bad0f9b6592d07c1a35efbe8fc98cf2
| 750
|
py
|
Python
|
onnxmltools/convert/keras/operator_converters/__init__.py
|
weikexin/onnxmltools
|
b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1
|
[
"MIT"
] | null | null | null |
onnxmltools/convert/keras/operator_converters/__init__.py
|
weikexin/onnxmltools
|
b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1
|
[
"MIT"
] | null | null | null |
onnxmltools/convert/keras/operator_converters/__init__.py
|
weikexin/onnxmltools
|
b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from . import Activation
from . import AdvancedActivation
from . import BatchNorm
from . import Concate
from . import Conv
from . import Crop
from . import Dense
from . import Dot
from . import Embed
from . import Flatten
from . import GRU
from . import LSTM
from . import Merge
from . import Permute
from . import Pool
from . import RepeatVector
from . import Reshape
from . import SimpleRNN
from . import Upsample
from . import ZeroPad
| 26.785714
| 76
| 0.614667
| 82
| 750
| 5.621951
| 0.5
| 0.433839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146667
| 750
| 27
| 77
| 27.777778
| 0.720313
| 0.398667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
d5cfdc8645784cc11aa3ad2ed1f9207baeddf21f
| 628
|
py
|
Python
|
qiime2/core/archive/format/v3.py
|
turanoo/qiime2
|
2af79e1a81b35b396b1a80e01617dba0f4e10446
|
[
"BSD-3-Clause"
] | null | null | null |
qiime2/core/archive/format/v3.py
|
turanoo/qiime2
|
2af79e1a81b35b396b1a80e01617dba0f4e10446
|
[
"BSD-3-Clause"
] | null | null | null |
qiime2/core/archive/format/v3.py
|
turanoo/qiime2
|
2af79e1a81b35b396b1a80e01617dba0f4e10446
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import qiime2.core.archive.format.v2 as v2
class ArchiveFormat(v2.ArchiveFormat):
# Exactly the same as v2, but inputs may be variadic where the UUIDs are in
# a YAML sequence. Additionally `Set` is now represented as a sequence
# with a custom !set tag.
pass
| 36.941176
| 79
| 0.552548
| 73
| 628
| 4.753425
| 0.712329
| 0.023055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026365
| 0.154459
| 628
| 16
| 80
| 39.25
| 0.627119
| 0.797771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
d5d2dc1c777ace4aad022048ea9e7ef9a7e47b90
| 68
|
py
|
Python
|
a.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | 1
|
2018-06-07T17:54:27.000Z
|
2018-06-07T17:54:27.000Z
|
a.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | 1
|
2018-06-28T05:08:57.000Z
|
2018-06-28T05:08:57.000Z
|
a.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | null | null | null |
import makemyPYJ
if __name__=='__main__':
print('this is a.py')
| 17
| 25
| 0.691176
| 10
| 68
| 3.9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161765
| 68
| 3
| 26
| 22.666667
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d5f136510bda08b8dca24ae90cee3727da3c9218
| 287
|
py
|
Python
|
python_100/Level1/39.get_output!!!!!!.py
|
relax-space/python-cy
|
eaf4650756e7ece5ec97894b65a7495b5c964eb3
|
[
"Apache-2.0"
] | 1
|
2020-04-27T03:31:23.000Z
|
2020-04-27T03:31:23.000Z
|
python_100/Level1/39.get_output!!!!!!.py
|
relax-space/python-cy
|
eaf4650756e7ece5ec97894b65a7495b5c964eb3
|
[
"Apache-2.0"
] | 1
|
2020-04-14T23:55:19.000Z
|
2020-04-15T03:29:37.000Z
|
python_100/Level1/39.get_output!!!!!!.py
|
relax-space/python-cy
|
eaf4650756e7ece5ec97894b65a7495b5c964eb3
|
[
"Apache-2.0"
] | null | null | null |
# 39.阅读一下代码他们的输出结果是什么?
def multi():
return [lambda x : i*x for i in range(4)]
print([m(3) for m in multi()])
# 输出的应该是
# [0,3,6,9]
# 正确答案是[9,9,9,9],而不是[0,3,6,9]产生的原因是Python的闭包的后期绑定导致的,这意味着在闭包中的变量是在内部函数被调用的时候被查找的,因为,最后函数被调用的时候,for循环已经完成, i 的值最后是3,因此每一个返回值的i都是3,所以最后的结果是[9,9,9,9]
| 20.5
| 146
| 0.696864
| 49
| 287
| 4.081633
| 0.591837
| 0.06
| 0.06
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087649
| 0.125436
| 287
| 13
| 147
| 22.076923
| 0.709163
| 0.634146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
9107a8dbcc754643b25c07ad4aa109966c976c3d
| 142
|
py
|
Python
|
src/ns_web_crawler/ns_web_crawler/eshop/eshop_costants.py
|
steny138/PyNintendoEPrice
|
def9c95690cf3cf72615ae4216fee8fca2934de1
|
[
"Apache-2.0"
] | null | null | null |
src/ns_web_crawler/ns_web_crawler/eshop/eshop_costants.py
|
steny138/PyNintendoEPrice
|
def9c95690cf3cf72615ae4216fee8fca2934de1
|
[
"Apache-2.0"
] | 3
|
2020-06-22T15:38:18.000Z
|
2021-11-24T02:01:51.000Z
|
src/ns_web_crawler/ns_web_crawler/eshop/eshop_costants.py
|
steny138/PyNintendoEPrice
|
def9c95690cf3cf72615ae4216fee8fca2934de1
|
[
"Apache-2.0"
] | 1
|
2018-08-04T08:15:05.000Z
|
2018-08-04T08:15:05.000Z
|
import re
def check_nsuid(nsuid):
if not nsuid:
return False
match = re.match(r'7\d+$', nsuid)
return not match is None
| 15.777778
| 37
| 0.619718
| 23
| 142
| 3.782609
| 0.652174
| 0.252874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 0.28169
| 142
| 9
| 38
| 15.777778
| 0.843137
| 0
| 0
| 0
| 0
| 0
| 0.034965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
910903b76366135e306fd0117be362fab2ce92c9
| 265
|
py
|
Python
|
gdal/run_test.py
|
stevemkim/conda-recipes
|
4fa403587b187d87cd6f77abf0b24b8c3f351564
|
[
"Apache-2.0"
] | null | null | null |
gdal/run_test.py
|
stevemkim/conda-recipes
|
4fa403587b187d87cd6f77abf0b24b8c3f351564
|
[
"Apache-2.0"
] | null | null | null |
gdal/run_test.py
|
stevemkim/conda-recipes
|
4fa403587b187d87cd6f77abf0b24b8c3f351564
|
[
"Apache-2.0"
] | null | null | null |
#import osgeo._gdal
#import osgeo._gdalconst
#import osgeo._ogr
#import osgeo._osr
#import osgeo
#import gdal
#import gdalconst
#import ogr
#import osr
#
#cnt = ogr.GetDriverCount()
#for i in xrange(cnt):
# print ogr.GetDriver(i).GetName()
#
#import os1_hw
pass
| 15.588235
| 37
| 0.743396
| 39
| 265
| 4.923077
| 0.461538
| 0.286458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004367
| 0.135849
| 265
| 16
| 38
| 16.5625
| 0.834061
| 0.867925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
91222ac9ec4b6b1830c54677dd6133e135352430
| 389
|
py
|
Python
|
h5py/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 130
|
2015-07-28T03:41:21.000Z
|
2022-03-16T03:07:41.000Z
|
h5py/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 119
|
2015-08-01T00:54:06.000Z
|
2021-01-05T13:00:46.000Z
|
h5py/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 72
|
2015-07-29T02:35:56.000Z
|
2022-02-26T14:31:15.000Z
|
import h5py._conv
import h5py._errors
import h5py._objects
import h5py._proxy
import h5py.defs
import h5py.h5
import h5py.h5a
import h5py.h5ac
import h5py.h5d
import h5py.h5ds
import h5py.h5f
import h5py.h5fd
import h5py.h5g
import h5py.h5i
import h5py.h5l
import h5py.h5o
import h5py.h5p
import h5py.h5r
import h5py.h5s
import h5py.h5t
import h5py.h5z
import h5py.utils
h5py.run_tests()
| 15.56
| 20
| 0.817481
| 69
| 389
| 4.536232
| 0.376812
| 0.702875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113703
| 0.118252
| 389
| 24
| 21
| 16.208333
| 0.798834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.956522
| 0
| 0.956522
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
9128cfb0fa0ed1b65839adaefd198b312660e5d9
| 92
|
py
|
Python
|
src/server/python/tf1group/lcilive.py
|
nicojqn/Livy
|
d5076a493747563d8e40600d52371df888c75d27
|
[
"MIT"
] | null | null | null |
src/server/python/tf1group/lcilive.py
|
nicojqn/Livy
|
d5076a493747563d8e40600d52371df888c75d27
|
[
"MIT"
] | null | null | null |
src/server/python/tf1group/lcilive.py
|
nicojqn/Livy
|
d5076a493747563d8e40600d52371df888c75d27
|
[
"MIT"
] | null | null | null |
from livetf1group import *
if __name__=="__main__":
print(get_live_url(str("lci")).path)
| 30.666667
| 40
| 0.728261
| 13
| 92
| 4.384615
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.108696
| 92
| 3
| 40
| 30.666667
| 0.682927
| 0
| 0
| 0
| 0
| 0
| 0.11828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
913f9f47a16001f36a363b0e5be88afe4588cdda
| 894
|
py
|
Python
|
malib/rpc/ExperimentManager/base_client.py
|
ReinholdM/play_football_with_human
|
9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c
|
[
"MIT"
] | 5
|
2021-11-17T03:11:13.000Z
|
2021-12-23T09:04:21.000Z
|
malib/rpc/ExperimentManager/base_client.py
|
ReinholdM/play_football_with_human
|
9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c
|
[
"MIT"
] | null | null | null |
malib/rpc/ExperimentManager/base_client.py
|
ReinholdM/play_football_with_human
|
9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
# -----
# Created Date: 2021/2/20
# Author: Hanjing Wang
# -----
# Last Modified:
# Modified By:
# -----
# Copyright (c) 2020 MARL @ SJTU
# -----
import abc
from malib.utils.typing import Any
class BaseClient(abc.ABC):
def info(self, level: str, message: str, nid: str):
pass
@abc.abstractmethod
def create_table(self, primary: str, secondary: str, nid: str):
pass
@abc.abstractmethod
def send_scalar(self, key: int, tag: str, nid: str, content: Any):
pass
@abc.abstractmethod
def send_image(self, key, tag, image, serial):
pass
@abc.abstractmethod
def send_figure(self, key, tag, nid, figure):
pass
@abc.abstractmethod
def send_obj(self, key, tag, nid, obj, serial):
pass
@abc.abstractmethod
def send_binary_tensor(self, key, tag, nid, tensor):
pass
| 20.790698
| 70
| 0.612975
| 116
| 894
| 4.663793
| 0.456897
| 0.077634
| 0.232902
| 0.266174
| 0.358595
| 0.247689
| 0.121996
| 0
| 0
| 0
| 0
| 0.017884
| 0.249441
| 894
| 42
| 71
| 21.285714
| 0.788376
| 0.168904
| 0
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.304348
| false
| 0.304348
| 0.086957
| 0
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
e66b90d0e6be07b4f5e8426ce6b45fbffaf586c8
| 109
|
py
|
Python
|
Part 1/Chapter 7/exercise_7.1.py
|
kg55555/pypractice
|
1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef
|
[
"MIT"
] | null | null | null |
Part 1/Chapter 7/exercise_7.1.py
|
kg55555/pypractice
|
1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef
|
[
"MIT"
] | null | null | null |
Part 1/Chapter 7/exercise_7.1.py
|
kg55555/pypractice
|
1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef
|
[
"MIT"
] | null | null | null |
car = input("What kind of a car would you like to buy?\n")
print(f"Let me see if I can find a {car} for you")
| 54.5
| 58
| 0.678899
| 26
| 109
| 2.846154
| 0.846154
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201835
| 109
| 2
| 59
| 54.5
| 0.850575
| 0
| 0
| 0
| 0
| 0
| 0.754545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
e686ee0edde33198cdaa2a2a246bbc2ef23c913c
| 173
|
py
|
Python
|
lista_ex5.2.py/exercicio2.py
|
robinson-1985/mentoria_exercises
|
8359cead6ee5351851b04cb45f252e3881b79117
|
[
"MIT"
] | null | null | null |
lista_ex5.2.py/exercicio2.py
|
robinson-1985/mentoria_exercises
|
8359cead6ee5351851b04cb45f252e3881b79117
|
[
"MIT"
] | null | null | null |
lista_ex5.2.py/exercicio2.py
|
robinson-1985/mentoria_exercises
|
8359cead6ee5351851b04cb45f252e3881b79117
|
[
"MIT"
] | null | null | null |
''' 2. Escreva um programa que exiba na tela a quantidade de números ímpares existentes
entre dois números que o usuário digitar (testar inclusive os números digitados).'''
| 86.5
| 88
| 0.786127
| 26
| 173
| 5.230769
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 0.156069
| 173
| 2
| 89
| 86.5
| 0.924658
| 0.959538
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e690a85378b5314181ba83245ca53e9163ff4e7f
| 41
|
py
|
Python
|
scrapy/tests/test_utils_template.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | 26
|
2015-02-07T17:35:26.000Z
|
2020-04-27T21:11:00.000Z
|
scrapy/tests/test_utils_template.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | 10
|
2020-02-11T23:34:28.000Z
|
2022-03-11T23:16:12.000Z
|
scrapy/tests/test_utils_template.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | 9
|
2015-09-21T08:17:20.000Z
|
2021-02-07T02:31:36.000Z
|
__doctests__ = ['scrapy.utils.template']
| 20.5
| 40
| 0.756098
| 4
| 41
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.710526
| 0
| 0
| 0
| 0
| 0
| 0.512195
| 0.512195
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e69c7de52621d46625d5d4ae90db8fbe3c914a5f
| 12,995
|
py
|
Python
|
spark_fhir_schemas/r4/complex_types/structuremap_target.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/structuremap_target.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/structuremap_target.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class StructureMap_TargetSchema:
"""
A Map of relationships between 2 structures that can be used to transform
data.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
A Map of relationships between 2 structures that can be used to transform
data.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
context: Type or variable this rule applies to.
contextType: How to interpret the context.
element: Field to create in the context.
variable: Named context for field, if desired, and a field is specified.
listMode: If field is a list, how to manage the list.
listRuleId: Internal rule reference for shared list items.
transform: How the data is copied / created.
parameter: Parameters to the transform.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.simple_types.id import idSchema
from spark_fhir_schemas.r4.complex_types.structuremap_parameter import (
StructureMap_ParameterSchema,
)
if (
max_recursion_limit
and nesting_list.count("StructureMap_Target") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["StructureMap_Target"]
my_parent_path = (
parent_path + ".structuremap_target"
if parent_path
else "structuremap_target"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Type or variable this rule applies to.
StructField(
"context",
idSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".context",
),
True,
),
# How to interpret the context.
StructField("contextType", StringType(), True),
# Field to create in the context.
StructField("element", StringType(), True),
# Named context for field, if desired, and a field is specified.
StructField(
"variable",
idSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".variable",
),
True,
),
# If field is a list, how to manage the list.
# Internal rule reference for shared list items.
StructField(
"listRuleId",
idSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".listruleid",
),
True,
),
# How the data is copied / created.
StructField("transform", StringType(), True),
# Parameters to the transform.
StructField(
"parameter",
ArrayType(
StructureMap_ParameterSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 47.775735
| 104
| 0.554983
| 1,242
| 12,995
| 5.60306
| 0.15942
| 0.050007
| 0.032332
| 0.041385
| 0.738037
| 0.72065
| 0.700532
| 0.661446
| 0.661446
| 0.661446
| 0
| 0.002556
| 0.397845
| 12,995
| 271
| 105
| 47.95203
| 0.886773
| 0.295883
| 0
| 0.540984
| 1
| 0
| 0.04926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005464
| false
| 0
| 0.027322
| 0
| 0.04918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e6ad7747954c711c92b1cc9828b20d90bfa1f7ca
| 8,804
|
py
|
Python
|
log_download_split_statics/logdiv.py
|
linlife/Python
|
6260f6cfdc234d7196255869dfbf70cd0a640ad4
|
[
"Apache-2.0"
] | null | null | null |
log_download_split_statics/logdiv.py
|
linlife/Python
|
6260f6cfdc234d7196255869dfbf70cd0a640ad4
|
[
"Apache-2.0"
] | null | null | null |
log_download_split_statics/logdiv.py
|
linlife/Python
|
6260f6cfdc234d7196255869dfbf70cd0a640ad4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import urllib2
import urllib
import os
import re
import sys
import time
atime=time.time()-60*60*24*1
mydate=time.strftime('%Y-%m-%d',time.localtime(atime))
loglst=[]
logsdir=mydate+'logs'
downloadurl={
'i8.tg.com.cn':'http://223.203.224.40/%s/76/027/i8.tg.com.cn.log.gz'%mydate,\
'www.jia.com':'http://223.203.224.40/%s/06/140/www.jia.com.log.gz'%mydate,\
'qingdao.jia.com':'http://223.203.224.40/%s/68/456/qingdao.jia.com.log.gz'%mydate,\
'sdhz.jia.com':'http://223.203.224.40/%s/10/401/sdhz.jia.com.log.gz'%mydate,\
'jinan.jia.com':'http://223.203.224.40/%s/93/369/jinan.jia.com.log.gz'%mydate,\
'sdjz.jia.com':'http://223.203.224.40/%s/52/535/sdjz.jia.com.log.gz'%mydate,\
'sdly.jia.com':'http://223.203.224.40/%s/53/925/sdly.jia.com.log.gz'%mydate,\
'sdwf.jia.com':'http://223.203.224.40/%s/55/525/sdwf.jia.com.log.gz'%mydate,\
'sdzz.jia.com':'http://223.203.224.40/%s/88/608/sdzz.jia.com.log.gz'%mydate,\
'suzhou.jia.com':'http://223.203.224.40/%s/13/481/suzhou.jia.com.log.gz'%mydate,\
'changzhou.jia.com':'http://223.203.224.40/%s/32/443/changzhou.jia.com.log.gz'%mydate,\
'jsha.jia.com':'http://223.203.224.40/%s/21/652/jsha.jia.com.log.gz'%mydate,\
'kunshan.jia.com':'http://223.203.224.40/%s/29/162/kunshan.jia.com.log.gz'%mydate,\
'jslyg.jia.com':'http://223.203.224.40/%s/68/296/jslyg.jia.com.log.gz'%mydate,\
'nanjing.jia.com':'http://223.203.224.40/%s/22/202/nanjing.jia.com.log.gz'%mydate,\
'nantong.jia.com':'http://223.203.224.40/%s/06/890/nantong.jia.com.log.gz'%mydate,\
'jstz.jia.com':'http://223.203.224.40/%s/98/059/jstz.jia.com.log.gz'%mydate,\
'wuxi.jia.com':'http://223.203.224.40/%s/90/119/wuxi.jia.com.log.gz'%mydate,\
'jsxz.jia.com':'http://223.203.224.40/%s/82/328/jsxz.jia.com.log.gz'%mydate,\
'jsyz.jia.com':'http://223.203.224.40/%s/53/395/jsyz.jia.com.log.gz'%mydate,\
'hangzhou.jia.com':'http://223.203.224.40/%s/73/377/hangzhou.jia.com.log.gz'%mydate,\
'jiaxing.jia.com':'http://223.203.224.40/%s/73/297/jiaxing.jia.com.log.gz'%mydate,\
'ningbo.jia.com':'http://223.203.224.40/%s/96/389/ningbo.jia.com.log.gz'%mydate,\
'shaoxing.jia.com':'http://223.203.224.40/%s/72/717/shaoxing.jia.com.log.gz'%mydate,\
'zjwz.jia.com':'http://223.203.224.40/%s/98/269/zjwz.jia.com.log.gz'%mydate,\
'hefei.jia.com':'http://223.203.224.40/%s/68/346/hefei.jia.com.log.gz'%mydate,\
'ahsz.jia.com':'http://223.203.224.40/%s/37/763/ahsz.jia.com.log.gz'%mydate,\
'shenzhen.jia.com':'http://223.203.224.40/%s/04/460/shenzhen.jia.com.log.gz'%mydate,\
'gddg.jia.com':'http://223.203.224.40/%s/75/197/gddg.jia.com.log.gz'%mydate,\
'gdfs.jia.com':'http://223.203.224.40/%s/09/260/gdfs.jia.com.log.gz'%mydate,\
'guangzhou.jia.com':'http://223.203.224.40/%s/33/583/guangzhou.jia.com.log.gz'%mydate,\
'huizhou.jia.com':'http://223.203.224.40/%s/27/592/huizhou.jia.com.log.gz'%mydate,\
'fuzhou.jia.com':'http://223.203.224.40/%s/02/860/fuzhou.jia.com.log.gz'%mydate,\
'nanning.jia.com':'http://223.203.224.40/%s/98/269/nanning.jia.com.log.gz'%mydate,\
'gxlz.jia.com':'http://223.203.224.40/%s/42/474/gxlz.jia.com.log.gz'%mydate,\
'zhengzhou.jia.com':'http://223.203.224.40/%s/55/415/zhengzhou.jia.com.log.gz'%mydate,\
'wuhan.jia.com':'http://223.203.224.40/%s/66/636/wuhan.jia.com.log.gz'%mydate,\
'changsha.jia.com':'http://223.203.224.40/%s/54/585/changsha.jia.com.log.gz'%mydate,\
'hnyy.jia.com':'http://223.203.224.40/%s/45/284/hnyy.jia.com.log.gz'%mydate,\
'nanchang.jia.com':'http://223.203.224.40/%s/13/371/nanchang.jia.com.log.gz'%mydate,\
'shenyang.jia.com':'http://223.203.224.40/%s/18/811/shenyang.jia.com.log.gz'%mydate,\
'dalian.jia.com':'http://223.203.224.40/%s/66/526/dalian.jia.com.log.gz'%mydate,\
'dandong.jia.com':'http://223.203.224.40/%s/52/685/dandong.jia.com.log.gz'%mydate,\
'haerbin.jia.com':'http://223.203.224.40/%s/28/912/haerbin.jia.com.log.gz'%mydate,\
'changchun.jia.com':'http://223.203.224.40/%s/58/525/changchun.jia.com.log.gz'%mydate,\
'jljl.jia.com':'http://223.203.224.40/%s/07/410/jljl.jia.com.log.gz'%mydate,\
'chengdu.jia.com':'http://223.203.224.40/%s/09/580/chengdu.jia.com.log.gz'%mydate,\
'kunming.jia.com':'http://223.203.224.40/%s/80/008/kunming.jia.com.log.gz'%mydate,\
'guiyang.jia.com':'http://223.203.224.40/%s/79/657/guiyang.jia.com.log.gz'%mydate,\
'shijiazhuang.jia.com':'http://223.203.224.40/%s/40/324/shijiazhuang.jia.com.log.gz'%mydate,\
'handan.jia.com':'http://223.203.224.40/%s/25/732/handan.jia.com.log.gz'%mydate,\
'hbts.jia.com':'http://223.203.224.40/%s/32/523/hbts.jia.com.log.gz'%mydate,\
'hbxt.jia.com':'http://223.203.224.40/%s/57/535/hbxt.jia.com.log.gz'%mydate,\
'taiyuan.jia.com':'http://223.203.224.40/%s/96/099/taiyuan.jia.com.log.gz'%mydate,\
'sxjc.jia.com':'http://223.203.224.40/%s/29/022/sxjc.jia.com.log.gz'%mydate,\
'xian.jia.com':'http://223.203.224.40/%s/81/818/xian.jia.com.log.gz'%mydate,\
'wulumuqi.jia.com':'http://223.203.224.40/%s/10/781/wulumuqi.jia.com.log.gz'%mydate,\
'qhxn.jia.com':'http://223.203.224.40/%s/96/899/qhxn.jia.com.log.gz'%mydate,\
'beijing.jia.com':'http://223.203.224.40/%s/23/732/beijing.jia.com.log.gz'%mydate,\
'chongqing.jia.com':'http://223.203.224.40/%s/97/779/chongqing.jia.com.log.gz'%mydate,\
'shanghai.jia.com':'http://223.203.224.40/%s/42/064/shanghai.jia.com.log.gz'%mydate,\
'tianjin.jia.com':'http://223.203.224.40/%s/00/890/tianjin.jia.com.log.gz'%mydate,\
'jiaju.jia.com':'http://223.203.224.40/%s/80/598/jiaju.jia.com.log.gz'%mydate,\
'mall.jia.com':'http://223.203.224.40/%s/31/113/mall.jia.com.log.gz'%mydate,\
'tg.jia.com':'http://223.203.224.40/%s/18/501/tg.jia.com.log.gz'%mydate,\
'pinpai.jia.com':'http://223.203.224.40/%s/78/387/pinpai.jia.com.log.gz'%mydate,\
'tuku.jia.com':'http://223.203.224.40/%s/66/726/tuku.jia.com.log.gz'%mydate,\
'zixun.jia.com':'http://223.203.224.40/%s/13/391/zixun.jia.com.log.gz'%mydate,\
'm.jia.com':'http://223.203.224.40/%s/28/182/m.jia.com.log.gz'%mydate,\
'mtgi1.jia.com':'http://223.203.224.40/%s/09/540/mtgi1.jia.com.log.gz'%mydate,\
'mtgi2.jia.com':'http://223.203.224.40/%s/68/796/mtgi2.jia.com.log.gz'%mydate,\
'mtgi3.jia.com':'http://223.203.224.40/%s/27/052/mtgi3.jia.com.log.gz'%mydate,\
'mi1.jia.com':'http://223.203.224.40/%s/32/313/mi1.jia.com.log.gz'%mydate,\
'mi2.jia.com':'http://223.203.224.40/%s/73/057/mi2.jia.com.log.gz'%mydate,\
'mi3.jia.com':'http://223.203.224.40/%s/14/801/mi3.jia.com.log.gz'%mydate,\
'mued1.jia.com':'http://223.203.224.40/%s/35/553/mued1.jia.com.log.gz'%mydate,\
'mued2.jia.com':'http://223.203.224.40/%s/94/809/mued2.jia.com.log.gz'%mydate,\
'mued3.jia.com':'http://223.203.224.40/%s/53/065/mued3.jia.com.log.gz'%mydate,\
#'tuku-wap.m.jia.com':''%mydate,\
#'zhuangxiu.jia.com':''%mydate,\
#'i1.tg.com.cn':''%mydate,\
}
def logdir(mydate):
os.chdir("/usr/local/logs")
if os.path.isdir(logsdir):
sys.exit(0)
else:
os.mkdir(logsdir)
def downloadlogs():
os.chdir("/usr/local/logs/%s"%logsdir)
for line in downloadurl.values():
logname=line.split('/')[-1]
urllib.urlretrieve(line,logname)
os.popen("gzip -d *.gz")
def spiderlog():
os.chdir("/usr/local/logs/%s"%logsdir)
for root,dirs,files in os.walk("/usr/local/logs/%s"%logsdir):
for f in files:
try:
if f.split('.')[-1]=='log':
loglst.append(f)
except Exception ,e:
pass
for flog in loglst:
os.popen("egrep -i 'Baiduspider|Sogou web spider|360Spider' %s >spider_%s "%(flog,flog))
if os.path.getsize('spider_%s'%flog)== 0:
os.remove('spider_%s'%flog)
def menu():
logdir(mydate)
downloadlogs()
spiderlog()
if __name__=='__main__':
menu()
| 63.338129
| 106
| 0.57542
| 1,506
| 8,804
| 3.356574
| 0.187915
| 0.185163
| 0.154303
| 0.200593
| 0.639169
| 0.383185
| 0.367161
| 0.367161
| 0.22908
| 0.010682
| 0
| 0.176957
| 0.175829
| 8,804
| 138
| 107
| 63.797101
| 0.519708
| 0.012381
| 0
| 0.016529
| 0
| 0.644628
| 0.615925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.008264
| 0.049587
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e6b44fd9ea5ae5f54d6f07d8221b52760daf2836
| 200
|
py
|
Python
|
epic_hash/exceptions.py
|
AleksMat/epic-hash
|
3ea2e0b50b092029ae4336557fb3d6944712fe6d
|
[
"MIT"
] | null | null | null |
epic_hash/exceptions.py
|
AleksMat/epic-hash
|
3ea2e0b50b092029ae4336557fb3d6944712fe6d
|
[
"MIT"
] | null | null | null |
epic_hash/exceptions.py
|
AleksMat/epic-hash
|
3ea2e0b50b092029ae4336557fb3d6944712fe6d
|
[
"MIT"
] | null | null | null |
"""
A module implementing package-specific exceptions
"""
class OutputValidationError(Exception):
""" This exception is raised whenever there is something wrong with the submitted output
"""
| 25
| 92
| 0.755
| 22
| 200
| 6.863636
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165
| 200
| 8
| 93
| 25
| 0.904192
| 0.695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.