hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
affc39443576305ee2e0ade1abbbe279cdbe06bc | 627 | py | Python | default_values.py | Omar-X/App_init | 9de2fff36a8a5d2911e5dd495b62a7ce91d4a68e | [
"MIT"
] | 1 | 2021-10-11T09:40:27.000Z | 2021-10-11T09:40:27.000Z | default_values.py | Omar-X/App_init | 9de2fff36a8a5d2911e5dd495b62a7ce91d4a68e | [
"MIT"
] | null | null | null | default_values.py | Omar-X/App_init | 9de2fff36a8a5d2911e5dd495b62a7ce91d4a68e | [
"MIT"
] | null | null | null | import os
# getting path so you can run the script python3 App_init, python3 .
if os.getcwd()[-8:] != "App_init":
default_path = "App_init/"
print(default_path)
else:
default_path = ""
# reading all built in modules
default_modules = open(f"{default_path}default_modules.txt", "r").readlines()
for a, i in enumerate(default_modules):
if i[0] != "#":
# make a list of all names
default_modules[a] = i.replace("\n", "")
default_modules[a] = default_modules[a].replace(" ", "")
# removing all comments
for i in default_modules:
if i[0] == "#":
default_modules.remove(i)
| 27.26087 | 77 | 0.639553 | 90 | 627 | 4.288889 | 0.477778 | 0.290155 | 0.11658 | 0.088083 | 0.093264 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010163 | 0.215311 | 627 | 22 | 78 | 28.5 | 0.77439 | 0.226475 | 0 | 0 | 0 | 0 | 0.117155 | 0.069038 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.071429 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b306523785b4bd7109a4337c13f607cc42984c0b | 590 | py | Python | main/python-sphinx-removed-in/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | main/python-sphinx-removed-in/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | main/python-sphinx-removed-in/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | pkgname = "python-sphinx-removed-in"
pkgver = "0.2.1"
pkgrel = 0
build_style = "python_module"
hostmakedepends = ["python-setuptools"]
checkdepends = ["python-sphinx"]
depends = ["python-sphinx"]
pkgdesc = "Sphinx extension for versionremoved and removed-in directives"
maintainer = "q66 <q66@chimera-linux.org>"
license = "BSD-3-Clause"
url = "https://github.com/MrSenko/sphinx-removed-in"
source = f"$(PYPI_SITE)/s/sphinx-removed-in/sphinx-removed-in-{pkgver}.tar.gz"
sha256 = "0588239cb534cd97b1d3900d0444311c119e45296a9f73f1ea81ea81a2cd3db1"
# dependency of pytest
options = ["!check"]
| 36.875 | 78 | 0.759322 | 72 | 590 | 6.180556 | 0.708333 | 0.101124 | 0.134831 | 0.094382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102421 | 0.089831 | 590 | 15 | 79 | 39.333333 | 0.726257 | 0.033898 | 0 | 0 | 0 | 0.071429 | 0.642606 | 0.31162 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b30aff42a0561262838decac18137273d9752c56 | 2,680 | py | Python | modele/Class.py | AntoineDelay/chess | 66dedf1c468a075bb202f85753caa075316dac28 | [
"MIT"
] | null | null | null | modele/Class.py | AntoineDelay/chess | 66dedf1c468a075bb202f85753caa075316dac28 | [
"MIT"
] | null | null | null | modele/Class.py | AntoineDelay/chess | 66dedf1c468a075bb202f85753caa075316dac28 | [
"MIT"
] | null | null | null |
class Case :
def __init__(self,x,y):
self.id = str(x)+','+str(y)
self.x = x
self.y = y
self.piece = None
def check_case(self):
"""renvoie la piece si la case est occupé,renvoie -1 sinon """
if(self.piece != None):
return self.piece
return -1
def affecter_piece(self,piece):
self.piece = piece
self.piece.case_affectation(self)
def desaffecter_piece(self):
if self.piece != None :
self.piece = None
def show(self):
if self.piece != None and self.piece.case != None :
return "| "+str(self.piece.point)+" |"
else :
return "| 0 |"
def get_piece(self):
return self.piece
def get_x(self):
return self.x
def get_y(self):
return self.y
class Board :
def __init__(self,liste_case):
self.board = liste_case
def get_case(self,x,y):
for i in range(len(self.board)):
if self.board[i].x == x and self.board[i].y == y:
return self.board[i]
return -1
def show_board(self):
x = 0
s_board = ""
for case in self.board :
if case.x > x :
s_board += "\n"
x+=1
s_board += case.show()
print(s_board)
class Piece :
def __init__(self,name,color,point,board):
self.name = name
self.color = color
self.point = point
self.case = None
self.board = board
self.depla = []
def possible_depla(self):
"""calcule les déplacement actuellement possible sans contrainte externe, resultat dans depla"""
pass
def case_affectation(self,case):
self.case = case
def get_depla(self):
return self.depla
class Pion(Piece) :
def __init__(self,color,board):
super().__init__('Pion',color,1,board)
def possible_depla(self):
id_case = str(self.case.get_x())+','+str(self.case.get_y()+1)
id_case_2 = None
if self.case.get_y() == 2 :
id_case_2 = str(self.case.get_x())+','+str(self.case.get_y()+1)
for case in self.board.board:
if case.id == id_case and case.piece == None :
self.depla.append(case)
if id_case_2 != None and case.id == id_case_2 and case.piece == None:
self.depla.append(case)
class Roi(Piece):
def __init__(self,color,board):
super().__init__('Roi',color,1000,board)
class Dame(Piece) :
def __init__(self,color,board):
super().__init__('Dame',color,9,board)
| 29.130435 | 104 | 0.538806 | 360 | 2,680 | 3.816667 | 0.172222 | 0.078603 | 0.048035 | 0.046579 | 0.226346 | 0.172489 | 0.172489 | 0.172489 | 0.045124 | 0.045124 | 0 | 0.010686 | 0.336567 | 2,680 | 91 | 105 | 29.450549 | 0.762092 | 0.054851 | 0 | 0.168831 | 0 | 0 | 0.009917 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.246753 | false | 0.012987 | 0 | 0.051948 | 0.454545 | 0.012987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b3106616365026d8efdce59e1af5c72f5a869234 | 257 | py | Python | Dynamic Programming/416. Partition Equal Subset Sum/Python Solution/Solution.py | lionelsamrat10/LeetCode-Solutions | 47f5c94995225b875b1eb0e92c5f643bec646a86 | [
"MIT"
] | 9 | 2021-03-24T11:21:03.000Z | 2022-02-14T05:05:48.000Z | Dynamic Programming/416. Partition Equal Subset Sum/Python Solution/Solution.py | lionelsamrat10/LeetCode-Solutions | 47f5c94995225b875b1eb0e92c5f643bec646a86 | [
"MIT"
] | 38 | 2021-10-07T18:04:12.000Z | 2021-12-05T05:53:27.000Z | Dynamic Programming/416. Partition Equal Subset Sum/Python Solution/Solution.py | lionelsamrat10/LeetCode-Solutions | 47f5c94995225b875b1eb0e92c5f643bec646a86 | [
"MIT"
] | 27 | 2021-10-06T19:55:48.000Z | 2021-11-18T16:53:20.000Z | class Solution:
def canPartition(self, nums: List[int]) -> bool:
dp, s = set([0]), sum(nums)
if s&1:
return False
for num in nums:
dp.update([v+num for v in dp if v+num <= s>>1])
return s>>1 in dp
| 28.555556 | 59 | 0.498054 | 42 | 257 | 3.047619 | 0.547619 | 0.046875 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02454 | 0.365759 | 257 | 8 | 60 | 32.125 | 0.760736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b310942ec473743eb33ea648c960c10e28fedd79 | 712 | py | Python | self_paced_ensemble/canonical_resampling/__init__.py | thulio/self-paced-ensemble | 0270edecc2f88783e2f4657510e089c2cacfcabe | [
"MIT"
] | 203 | 2019-06-04T07:43:25.000Z | 2022-03-30T22:16:32.000Z | self_paced_ensemble/canonical_resampling/__init__.py | thulio/self-paced-ensemble | 0270edecc2f88783e2f4657510e089c2cacfcabe | [
"MIT"
] | 14 | 2020-02-26T09:42:46.000Z | 2022-01-11T12:25:16.000Z | self_paced_ensemble/canonical_resampling/__init__.py | thulio/self-paced-ensemble | 0270edecc2f88783e2f4657510e089c2cacfcabe | [
"MIT"
] | 46 | 2019-11-25T01:13:31.000Z | 2021-12-29T06:49:07.000Z | """
--------------------------------------------------------------------------
The `self_paced_ensemble.canonical_resampling` module implement a
resampling-based classifier for imbalanced classification.
15 resampling algorithms are included:
'RUS', 'CNN', 'ENN', 'NCR', 'Tomek', 'ALLKNN', 'OSS',
'NM', 'CC', 'SMOTE', 'ADASYN', 'BorderSMOTE', 'SMOTEENN',
'SMOTETomek', 'ORG'.
Note: the implementation of these resampling algorithms is based on
imblearn python package.
See https://github.com/scikit-learn-contrib/imbalanced-learn.
--------------------------------------------------------------------------
"""
from .canonical_resampling import ResampleClassifier
__all__ = [
"ResampleClassifier",
]
| 32.363636 | 74 | 0.585674 | 64 | 712 | 6.390625 | 0.828125 | 0.09291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00312 | 0.099719 | 712 | 21 | 75 | 33.904762 | 0.634945 | 0.852528 | 0 | 0 | 0 | 0 | 0.185567 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b312241b809985558a61adf947e84c53f8e2bb9c | 997 | py | Python | python/ql/test/query-tests/Security/CWE-089/sql_injection.py | p-snft/ql | 6243c722c6d18f152fe47d2c800540d5bc5c3c3f | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2021-07-12T09:23:48.000Z | 2021-10-04T10:05:46.000Z | python/ql/test/query-tests/Security/CWE-089/sql_injection.py | p-snft/ql | 6243c722c6d18f152fe47d2c800540d5bc5c3c3f | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-02-21T16:20:02.000Z | 2019-05-01T12:10:05.000Z | python/ql/test/query-tests/Security/CWE-089/sql_injection.py | p-snft/ql | 6243c722c6d18f152fe47d2c800540d5bc5c3c3f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from django.conf.urls import patterns, url
from django.db import connection, models
from django.db.models.expressions import RawSQL
class Name(models.Model):
pass
def save_name(request):
if request.method == 'POST':
name = request.POST.get('name')
curs = connection.cursor()
#GOOD -- Using parameters
curs.execute(
"insert into names_file ('name') values ('%s')", name)
#BAD -- Using string formatting
curs.execute(
"insert into names_file ('name') values ('%s')" % name)
#BAD -- other ways of executing raw SQL code with string interpolation
Name.objects.annotate(RawSQL("insert into names_file ('name') values ('%s')" % name))
Name.objects.raw("insert into names_file ('name') values ('%s')" % name)
Name.objects.extra("insert into names_file ('name') values ('%s')" % name)
urlpatterns = patterns(url(r'^save_name/$',
save_name, name='save_name'))
| 34.37931 | 93 | 0.620863 | 124 | 997 | 4.919355 | 0.419355 | 0.081967 | 0.122951 | 0.155738 | 0.360656 | 0.360656 | 0.360656 | 0.360656 | 0.304918 | 0.304918 | 0 | 0 | 0.242728 | 997 | 28 | 94 | 35.607143 | 0.807947 | 0.12337 | 0 | 0.111111 | 0 | 0 | 0.29229 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0.055556 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
b31e77923a41068a3cef4c14e6a23d2de634a6cd | 386 | py | Python | front_end/migrations/0002_rename_nome_popular_especies_nome_popular.py | majubr/website_Django | eb0683459af82e4abb3e8ccb016d52d4365ff729 | [
"MIT"
] | null | null | null | front_end/migrations/0002_rename_nome_popular_especies_nome_popular.py | majubr/website_Django | eb0683459af82e4abb3e8ccb016d52d4365ff729 | [
"MIT"
] | null | null | null | front_end/migrations/0002_rename_nome_popular_especies_nome_popular.py | majubr/website_Django | eb0683459af82e4abb3e8ccb016d52d4365ff729 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-15 03:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('front_end', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='especies',
old_name='Nome_Popular',
new_name='Nome_popular',
),
]
| 20.315789 | 48 | 0.562176 | 40 | 386 | 5.25 | 0.775 | 0.07619 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073643 | 0.331606 | 386 | 18 | 49 | 21.444444 | 0.74031 | 0.11658 | 0 | 0 | 1 | 0 | 0.165109 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b33442383156f3c0acad7de6e549de783efa735c | 836 | py | Python | application/file/routes.py | h4k1m0u/flask-webapp | d095449a3d324c50de745f7ac5f84b6fa2cb08b4 | [
"MIT"
] | null | null | null | application/file/routes.py | h4k1m0u/flask-webapp | d095449a3d324c50de745f7ac5f84b6fa2cb08b4 | [
"MIT"
] | null | null | null | application/file/routes.py | h4k1m0u/flask-webapp | d095449a3d324c50de745f7ac5f84b6fa2cb08b4 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, url_for, redirect
from flask import current_app as app
from .forms import UploadForm
from werkzeug.utils import secure_filename
import os
file_bp = Blueprint('file', __name__,
template_folder='templates', static_folder='static')
@file_bp.route('/upload', methods=['GET', 'POST'])
def upload():
form = UploadForm()
if form.validate_on_submit():
# get file from form
f = form.photo.data
filename = secure_filename(f.filename)
# save file inside instance folder
f.save(os.path.join(app.instance_path, 'photos', filename))
return redirect(url_for('.success'))
return render_template('file/upload.html', form=form)
@file_bp.route('/success')
def success():
return render_template('file/success.html')
| 26.125 | 72 | 0.688995 | 108 | 836 | 5.148148 | 0.444444 | 0.07554 | 0.053957 | 0.097122 | 0.111511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.194976 | 836 | 31 | 73 | 26.967742 | 0.826152 | 0.061005 | 0 | 0 | 0 | 0 | 0.112532 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.263158 | 0.052632 | 0.526316 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
b336dfd4f06633eb05d75f4fdb5a06c0dcb65ff7 | 1,590 | py | Python | python_experiments/paper_figures/tkde/data_legacy/eval_varying_eps_c_pcg.py | RapidsAtHKUST/SimRank | 3a601b08f9a3c281e2b36b914e06aba3a3a36118 | [
"MIT"
] | 8 | 2020-04-14T23:17:00.000Z | 2021-06-21T12:34:04.000Z | python_experiments/paper_figures/tkde/data_legacy/eval_varying_eps_c_pcg.py | RapidsAtHKUST/SimRank | 3a601b08f9a3c281e2b36b914e06aba3a3a36118 | [
"MIT"
] | null | null | null | python_experiments/paper_figures/tkde/data_legacy/eval_varying_eps_c_pcg.py | RapidsAtHKUST/SimRank | 3a601b08f9a3c281e2b36b914e06aba3a3a36118 | [
"MIT"
] | 1 | 2021-01-17T16:26:50.000Z | 2021-01-17T16:26:50.000Z | import json
if __name__ == '__main__':
with open('varying_eps_c.dicts') as ifs:
pcg_varying_eps_cpu = eval(ifs.readline())
pcg_varying_eps_mem = eval(ifs.readline())
pcg_varying_c_cpu = eval(ifs.readline())
pcg_varying_c_mem = eval(ifs.readline())
pcg_tag = 'pcg'
with open('pcg-varying-eps-cpu.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag: {
'0.6':
pcg_varying_eps_cpu
}
}, indent=4))
with open('pcg-varying-eps-mem.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag: {
'0.6':
pcg_varying_eps_mem
}
}, indent=4))
with open('pcg-varying-eps-cpu.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag: {
'0.6':
pcg_varying_eps_cpu
}
}, indent=4))
def combine(data: dict, extra):
res = dict()
for c, val in data.items():
res[c] = {extra: val}
return res
with open('pcg-varying-c-cpu.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag:
combine(pcg_varying_c_cpu, '0.01')
}, indent=4))
with open('pcg-varying-c-mem.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag:
combine(pcg_varying_c_mem, '0.01')
}, indent=4))
| 27.894737 | 58 | 0.445283 | 185 | 1,590 | 3.594595 | 0.210811 | 0.210526 | 0.156391 | 0.120301 | 0.756391 | 0.690226 | 0.52782 | 0.485714 | 0.485714 | 0.485714 | 0 | 0.018681 | 0.427673 | 1,590 | 56 | 59 | 28.392857 | 0.712088 | 0 | 0 | 0.5 | 0 | 0 | 0.10566 | 0.072956 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.022727 | 0 | 0.068182 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b33cf42bd0fd23988f48859e9516133f47659bcc | 481 | py | Python | src/richie/apps/courses/migrations/0017_auto_20200827_1011.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 174 | 2018-04-14T23:36:01.000Z | 2022-03-10T09:27:01.000Z | src/richie/apps/courses/migrations/0017_auto_20200827_1011.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 631 | 2018-04-04T11:28:53.000Z | 2022-03-31T11:18:31.000Z | src/richie/apps/courses/migrations/0017_auto_20200827_1011.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 64 | 2018-06-27T08:35:01.000Z | 2022-03-10T09:27:43.000Z | # Generated by Django 2.2.15 on 2020-08-27 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("courses", "0016_auto_20200417_1237"),
]
operations = [
migrations.AlterField(
model_name="courserun",
name="resource_link",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Resource link"
),
),
]
| 22.904762 | 83 | 0.590437 | 51 | 481 | 5.431373 | 0.764706 | 0.086643 | 0.115523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.104167 | 0.301455 | 481 | 20 | 84 | 24.05 | 0.720238 | 0.095634 | 0 | 0.142857 | 1 | 0 | 0.150115 | 0.053118 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b343279a8195c95b740ea2b5b46901eb458edfeb | 8,799 | py | Python | nrf5_mesh/CMake/SES/SESGenerator.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 15 | 2019-02-25T20:25:29.000Z | 2021-02-27T17:57:38.000Z | nrf5_mesh/CMake/SES/SESGenerator.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 3 | 2020-02-21T22:35:38.000Z | 2020-10-05T02:25:30.000Z | nrf5_mesh/CMake/SES/SESGenerator.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 5 | 2019-06-29T21:03:57.000Z | 2021-06-15T06:16:20.000Z | #!/usr/bin/env python3
# Usage: python SESGenerator.py <target_configuration>.json <output_directory>
#
# <target_configuration>.json is a json file generated from CMake on the form:
# {
# "target": {
# "name": "light_control_client_nrf52832_xxAA_s132_5.0.0",
# "sources": "main.c;provisioner.c;..",
# "includes": "include1;include2;..",
# "definitions":"NRF52;NRF52_SERIES;..",
# },
# "platform": {
# "name": "nrf52832_xxAA",
# "arch": "cortex-m4f",
# "flash_size": 524288,
# "ram_size": 65536,
# },
# "softdevice": {
# "hex_file": "<path-to-s132_nrf52_5.0.0_softdevice.hex>",
# "flash_size": 143360,
# "ram_size": 12720
# }
# }
import jinja2
import sys
import argparse
import json
import os
from collections import namedtuple
from shutil import copyfile
TEST_JSON_STR = """{
"target": {
"name": "light_control_client_nrf52832_xxAA_s132_5.0.0",
"sources": "main.c;provisioner.c",
"includes": "include1;include2",
"defines":"NRF52;NRF52_SERIES"
},
"platform": {
"name": "nrf52832_xxAA",
"arch": "cortex-m4f",
"flash_size": 524288,
"ram_size": 65536
},
"softdevice": {
"hex_file": "path-to/s132_nrf52_5.0.0_softdevice.hex",
"flash_size": 143360,
"ram_size": 12720
}
}"""
# Constants
NRF51_BOOTLOADER_FLASH_SIZE = 24576
NRF51_BOOTLOADER_RAM_SIZE = 768
NRF52_BOOTLOADER_FLASH_SIZE = 32768
NRF52_BOOTLOADER_RAM_SIZE = 4096
RAM_ADDRESS_START = 536870912
def application_flash_limits_get(softdevice_flash_size,
bootloader_flash_size,
platform_flash_size):
return (hex(softdevice_flash_size), hex(platform_flash_size - bootloader_flash_size))
def application_ram_limits_get(softdevice_ram_size,
bootloader_ram_size,
platform_ram_size):
return (hex(RAM_ADDRESS_START + softdevice_ram_size), hex(platform_ram_size - bootloader_ram_size))
DataRegion = namedtuple("DataRegion", ["start", "size"])
Target = namedtuple("Target", ["name", "includes", "defines", "sources"])
Platform = namedtuple("Platform", ["name", "arch", "flash_size", "ram_size"])
SoftDevice = namedtuple("Softdevice", ["hex_file", "flash_size", "ram_size"])
Configuration = namedtuple("Configuration", ["target", "platform", "softdevice"])
File = namedtuple("File", ["path"])
Group = namedtuple("Group", ["name", "files", "match_string"])
GROUP_TEMPLATES = [
Group(name="Application", files=[], match_string="examples"),
Group(name="Core", files=[], match_string="mesh/core"),
Group(name="Serial", files=[], match_string="mesh/serial"),
Group(name="Mesh stack", files=[], match_string="mesh/stack"),
Group(name="GATT", files=[], match_string="mesh/gatt"),
Group(name="DFU", files=[], match_string="mesh/dfu"),
Group(name="Toolchain", files=[File("$(StudioDir)/source/thumb_crt0.s")], match_string="toolchain"),
Group(name="Access", files=[], match_string="mesh/access"),
Group(name="Bearer", files=[], match_string="mesh/bearer"),
Group(name="SEGGER RTT", files=[], match_string="rtt"),
Group(name="uECC", files=[], match_string="micro-ecc"),
Group(name="nRF5 SDK", files=[], match_string="$(SDK_ROOT"),
Group(name="Provisioning", files=[], match_string="mesh/prov"),
Group(name="Configuration Model", files=[], match_string="models/foundation/config"),
Group(name="Health Model", files=[], match_string="models/foundation/health"),
Group(name="Generic OnOff Model", files=[], match_string="models/model_spec/generic_onoff"),
Group(name="Simple OnOff Model", files=[], match_string="models/vendor/simple_on_off"),
Group(name="Remote provisioning Model", files=[], match_string="models/proprietary/pb_remote")]
def unix_relative_path_get(path1, path2):
if not path1.startswith('$('):
path1 = os.path.relpath(path1, path2)
return path1.replace("\\", "/")
def load_config(input_file):
with open(input_file, "r") as f:
config = json.load(f)
return config
def load_softdevice(sd_config):
with open(sd_config["definition_file"], "r") as f:
config = json.load(f)
return [sd for sd in config["softdevices"] if sd["name"] == sd_config["name"]][0]
def load_platform(platform_config):
with open(platform_config["definition_file"], "r") as f:
config = json.load(f)
return [platform for platform in config["platforms"] if platform["name"] == platform_config["name"]][0]
def create_file_groups(files, out_dir):
other = Group(name="Other", files=[], match_string=None)
groups = GROUP_TEMPLATES[:]
for f in files:
found_group = False
if "gcc_startup" in f.lower() or "arm_startup" in f.lower():
continue
for g in groups:
if g.match_string in f:
f = unix_relative_path_get(f, out_dir)
g.files.append(File(f))
found_group = True
break
if not found_group:
f = unix_relative_path_get(f, out_dir)
other.files.append(File(f))
groups.append(other)
# Remove empty groups
for g in groups[:]:
if len(g.files) == 0:
groups.remove(g)
return groups
def calculate_flash_limits(config):
bl_flash_size = NRF51_BOOTLOADER_FLASH_SIZE if "nrf51" in config["platform"]["config"]["name"].lower() else NRF52_BOOTLOADER_FLASH_SIZE
bl_flash_size = bl_flash_size if "nrf52810_xxAA" not in config["platform"]["config"]["name"] else 0
flash_limits = application_flash_limits_get(config["softdevice"]["config"]["flash_size"], bl_flash_size, config["platform"]["config"]["flash_size"])
return DataRegion(*flash_limits)
def calculate_ram_limits(config):
bl_ram_size = NRF51_BOOTLOADER_RAM_SIZE if "nrf51" in config["platform"]["config"]["name"].lower() else NRF52_BOOTLOADER_RAM_SIZE
bl_ram_size = bl_ram_size if "nrf52810_xxAA" not in config["platform"]["config"]["name"] else 0
ram_limits = application_ram_limits_get(config["softdevice"]["config"]["ram_size"], bl_ram_size, config["platform"]["config"]["ram_size"])
return DataRegion(*ram_limits)
def generate_ses_project(config, out_dir="."):
files = config["target"]["sources"].split(";")
config["target"]["includes"] = [unix_relative_path_get(i, out_dir) for i in config["target"]["includes"].split(";")]
config["target"]["heap_size"] = 1024
config["target"]["stack_size"] = 2048
config["target"]["groups"] = create_file_groups(files, out_dir)
config["target"]["flash"] = calculate_flash_limits(config)
config["target"]["ram"] = calculate_ram_limits(config)
config["platform"]["fpu"] = config["platform"]["config"]["arch"] == "cortex-m4f"
config["softdevice"]["hex_file"] = unix_relative_path_get(config["softdevice"]["hex_file"], out_dir)
config["sdk_default_path"] = unix_relative_path_get('../../../nRF5_SDK_16.0.0_98a08e2', out_dir)
s = ""
with open("ses.xml", "r") as f:
s = f.read()
t = jinja2.Template(s)
s = t.render(config)
return s
def generate_ses_session(out_dir):
session_file_contents = ['<!DOCTYPE CrossStudio_Session_File>',
'<session>',
'\t<Files>',
'\t\t<SessionOpenFile path="{}"/>',
'\t</Files>',
'</session>']
return '\n'.join(session_file_contents).format(unix_relative_path_get('../../doc/getting_started/SES.md', out_dir))
def test():
config = json.loads(TEST_JSON_STR)
print(config)
s = generate_ses_project(config)
with open("test.xml", "w") as f:
f.write(s)
print ("Done")
def main():
input_file = sys.argv[1]
out_dir = sys.argv[2]
config = load_config(input_file)
config["softdevice"]["config"] = load_softdevice(config["softdevice"])
config["platform"]["config"] = load_platform(config["platform"])
ses_project = generate_ses_project(config, out_dir)
out_dir += "/"
# SES doesn't support "." in filenames
output_filename = out_dir + config["target"]["name"].replace(".", "_")
project_file = output_filename + ".emProject"
with open(project_file, "w") as f:
f.write(ses_project)
# Create session
ses_session = generate_ses_session(out_dir)
session_file = output_filename + ".emSession"
with open(session_file, "w") as f:
f.write(ses_session)
# Generate flash placement:
copyfile("flash_placement.xml", out_dir + "flash_placement.xml")
print("Wrote: " + project_file)
if __name__ == "__main__":
main()
| 37.763948 | 152 | 0.642459 | 1,092 | 8,799 | 4.915751 | 0.201465 | 0.036885 | 0.056632 | 0.029806 | 0.292474 | 0.233793 | 0.186848 | 0.167474 | 0.157414 | 0.152012 | 0 | 0.028321 | 0.197409 | 8,799 | 232 | 153 | 37.926724 | 0.731804 | 0.091374 | 0 | 0.030488 | 1 | 0 | 0.250439 | 0.049962 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079268 | false | 0 | 0.042683 | 0.012195 | 0.189024 | 0.018293 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b344553cf35913d934c9423d0a59566943fcaa1d | 1,446 | py | Python | PythonFIAP/Capitulo5_Manipula_Arquivos/Boston.py | DanielGMesquita/StudyPath | 0b3d0bb1deac7eb0d1b301edca5e5ed320568f4c | [
"MIT"
] | null | null | null | PythonFIAP/Capitulo5_Manipula_Arquivos/Boston.py | DanielGMesquita/StudyPath | 0b3d0bb1deac7eb0d1b301edca5e5ed320568f4c | [
"MIT"
] | null | null | null | PythonFIAP/Capitulo5_Manipula_Arquivos/Boston.py | DanielGMesquita/StudyPath | 0b3d0bb1deac7eb0d1b301edca5e5ed320568f4c | [
"MIT"
] | null | null | null | #Análise do relatório de vôos e viagens do aeroporto de Boston com base nos relatórios econômicos oficiais
with open('economic-indicators.csv', 'r') as boston:
total_voos = 0
maior = 0
total_passageiros = 0
maior_media_diaria = 0
ano_usuario = input('Qual ano deseja pesquisar?: ')
#Retornar o total de voos do arquivo
for linha in boston.readlines()[1:-1]:
lista = linha.split(',')
total_voos = total_voos + float(lista[3])
#Retornar o mês/ano com maior trânsito no aeroporto
if float(lista[2]) > float(maior):
maior = lista[2]
ano = lista[0]
mes = lista[1]
#Retorna o total de passageiros que transitaram no aeroporto no ano definido pelo usuário
if ano_usuario == lista[0]:
total_passageiros = total_passageiros + float(lista[2])
#Retorna o mês com maior média de diária de hotéis
if float(lista[5]) > float(maior_media_diaria):
maior_media_diaria = lista[5]
mes_maior_diaria = lista[1]
print('O total de voos é {}'.format(total_voos))
print('O mês/ano com maior trânsito no aeroporto foi {}/{}'.format(mes, ano))
print('O total de passageiros que passaram no ano de {} é {} passageiros'.format(str(ano_usuario),
str(total_passageiros)))
print('O mês do ano {} com maior média diária de hotel foi {}'.format(ano_usuario, mes_maior_diaria))
| 45.1875 | 106 | 0.64592 | 205 | 1,446 | 4.44878 | 0.341463 | 0.039474 | 0.035088 | 0.026316 | 0.122807 | 0.074561 | 0.074561 | 0.074561 | 0 | 0 | 0 | 0.014884 | 0.25657 | 1,446 | 31 | 107 | 46.645161 | 0.833488 | 0.226141 | 0 | 0 | 0 | 0 | 0.218525 | 0.020683 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.173913 | 0 | 0 | 0 | 0.173913 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
b347b691c3c4c7996f91f105edcfcd1839de843a | 1,464 | py | Python | blog/templatetags/blog_tags.py | josephdubon/boilerplate_dubon_django_blog | 1dbe470006be066b12dd6486eb26a41d304206f8 | [
"Unlicense",
"MIT"
] | null | null | null | blog/templatetags/blog_tags.py | josephdubon/boilerplate_dubon_django_blog | 1dbe470006be066b12dd6486eb26a41d304206f8 | [
"Unlicense",
"MIT"
] | 2 | 2021-06-10T20:43:00.000Z | 2021-09-22T19:55:41.000Z | blog/templatetags/blog_tags.py | josephdubon/boilerplate_dubon_django_blog | 1dbe470006be066b12dd6486eb26a41d304206f8 | [
"Unlicense",
"MIT"
] | null | null | null | from django import template
from django.db.models import Count
from django.utils.safestring import mark_safe
import markdown
from ..models import Post
register = template.Library()
####
# Register as simple tags
####
# A simple template tag that returns the number of posts published so far.=
@register.simple_tag
def total_posts():
return Post.published.count()
# A simple template tag that displays the 5 most commented posts
@register.simple_tag
def get_most_commented_posts(count=5):
# Build a QuerySet using the annotate() function to aggregate the
# - total number of comments for each post.
return Post.published.annotate(
# use the Count aggregation function to store the number of comments
# - in the computed field total_comments for each Post object.
total_comments=Count('comments')
).order_by('-total_comments')[:count]
####
# Register as inclusion_tags
####
# An inclusion tag that returns the 5 latest posts.
@register.inclusion_tag('blog/post/latest_posts.html')
def show_latest_posts(count=5):
latest_posts = Post.published.order_by('-publish')[:count]
return {
'latest_posts': latest_posts
}
####
# Register Template Filters
####
# A template filter to enable use of markdown .md syntax in blog posts and then converts
# - post contents to HTML in the templates
@register.filter(name='markdown')
def markdown_format(text):
return mark_safe(markdown.markdown(text))
| 26.618182 | 88 | 0.734973 | 205 | 1,464 | 5.141463 | 0.370732 | 0.062619 | 0.028463 | 0.034156 | 0.041746 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003306 | 0.173497 | 1,464 | 54 | 89 | 27.111111 | 0.867769 | 0.426913 | 0 | 0.086957 | 0 | 0 | 0.097257 | 0.033666 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.217391 | 0.130435 | 0.565217 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
b34ced91974433146e88095dc35d1816f2301813 | 829 | py | Python | weather_board_log.py | petervdb/weather_monitor | 35df2493385892969544579d4b465a5cd31daad4 | [
"MIT"
] | null | null | null | weather_board_log.py | petervdb/weather_monitor | 35df2493385892969544579d4b465a5cd31daad4 | [
"MIT"
] | null | null | null | weather_board_log.py | petervdb/weather_monitor | 35df2493385892969544579d4b465a5cd31daad4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import SI1132
import BME280
import sys
import time
import os
if len(sys.argv) != 2:
print("Usage: weather_board.py <i2c device file>")
sys.exit()
si1132 = SI1132.SI1132(sys.argv[1])
bme280 = BME280.BME280(sys.argv[1], 0x03, 0x02, 0x02, 0x02)
def get_altitude(pressure, seaLevel):
atmospheric = pressure / 100.0
return 44330.0 * (1.0 - pow(atmospheric/seaLevel, 0.1903))
print("======== si1132 ========")
print("UV_index:%.2f" % (si1132.readUV() / 100.0))
print("Visible:%d" % int(si1132.readVisible()))
print("IR:%d" % int(si1132.readIR()))
print("======== bme280 ========")
print("temperature:%.2f" % bme280.read_temperature())
print("humidity:%.2f" % bme280.read_humidity())
p = bme280.read_pressure()
print("pressure:%.2f" % (p / 100.0))
print("altitude:%.2f" % get_altitude(p, 1024.25))
| 28.586207 | 62 | 0.652593 | 117 | 829 | 4.564103 | 0.452991 | 0.039326 | 0.029963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148352 | 0.121834 | 829 | 28 | 63 | 29.607143 | 0.585165 | 0.0193 | 0 | 0 | 0 | 0 | 0.211823 | 0 | 0 | 0 | 0.019704 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.217391 | 0 | 0.304348 | 0.434783 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
b3505f341d67a1c5794fe4b0bc6c75cbc2db2874 | 8,184 | py | Python | tpdatasrc/tpgamefiles/scr/tpModifiers/duelist.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 69 | 2015-05-05T14:09:25.000Z | 2022-02-15T06:13:04.000Z | tpdatasrc/tpgamefiles/scr/tpModifiers/duelist.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 457 | 2015-05-01T22:07:45.000Z | 2022-03-31T02:19:10.000Z | tpdatasrc/tpgamefiles/scr/tpModifiers/duelist.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 25 | 2016-02-04T21:19:53.000Z | 2021-11-15T23:14:51.000Z | from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import char_class_utils
import d20_action_utils
###################################################
def GetConditionName():
return "Duelist"
print "Registering " + GetConditionName()
classEnum = stat_level_duelist
preciseStrikeEnum = 2400
###################################################
#### standard callbacks - BAB and Save values
def OnGetToHitBonusBase(attachee, args, evt_obj):
classLvl = attachee.stat_level_get(classEnum)
babvalue = game.get_bab_for_class(classEnum, classLvl)
evt_obj.bonus_list.add(babvalue, 0, 137) # untyped, description: "Class"
return 0
def OnGetSaveThrowFort(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Fortitude)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowReflex(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Reflex)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowWill(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Will)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def IsArmorless( obj ):
armor = obj.item_worn_at(5)
if armor != OBJ_HANDLE_NULL:
armorFlags = armor.obj_get_int(obj_f_armor_flags)
if armorFlags != ARMOR_TYPE_NONE:
return 0
shield = obj.item_worn_at(11)
if shield != OBJ_HANDLE_NULL:
return 0
return 1
def IsRangedWeapon( weap ):
weapFlags = weap.obj_get_int(obj_f_weapon_flags)
if (weapFlags & OWF_RANGED_WEAPON) == 0:
return 0
return 1
def CannyDefenseAcBonus(attachee, args, evt_obj):
if not IsArmorless(attachee):
return 0
weap = attachee.item_worn_at(3)
if weap == OBJ_HANDLE_NULL or IsRangedWeapon(weap):
weap = attachee.item_worn_at(4)
if weap == OBJ_HANDLE_NULL or IsRangedWeapon(weap):
return 0
duelistLvl = attachee.stat_level_get(classEnum)
intScore = attachee.stat_level_get(stat_intelligence)
intBonus = (intScore - 10)/2
if intBonus <= 0:
return
if duelistLvl < intBonus:
intBonus = duelistLvl
evt_obj.bonus_list.modify(intBonus , 3, 104) # Dexterity bonus, ~Class~[TAG_LEVEL_BONUSES]
return 0
def ImprovedReactionInitBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 2:
return 0
bonVal = 2
if duelistLvl >= 8:
bonVal = 4
evt_obj.bonus_list.add(bonVal, 0, 137 ) # adds untyped bonus to initiative
return 0
def EnhancedMobility(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 3:
return 0
if not IsArmorless(attachee):
return 0
if evt_obj.attack_packet.get_flags() & D20CAF_AOO_MOVEMENT:
evt_obj.bonus_list.add(4, 8, 137 ) # adds +4 dodge bonus
return 0
def GraceReflexBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 4:
return 0
if not IsArmorless(attachee):
return 0
evt_obj.bonus_list.add(2, 34, 137) # Competence bonus
return 0
# def PreciseStrikeRadial(attachee, args, evt_obj):
# duelistLvl = attachee.stat_level_get(classEnum)
# if (duelistLvl < 5):
# return 0
## add radial menu action Precise Strike
# radialAction = tpdp.RadialMenuEntryPythonAction(-1, D20A_PYTHON_ACTION, preciseStrikeEnum, 0, "TAG_INTERFACE_HELP")
# radialParentId = radialAction.add_child_to_standard(attachee, tpdp.RadialMenuStandardNode.Class)
# return 0
# def OnPreciseStrikeCheck(attachee, args, evt_obj):
# if (not IsUsingLightOrOneHandedPiercing(attachee)):
# evt_obj.return_val = AEC_WRONG_WEAPON_TYPE
# return 0
# tgt = evt_obj.d20a.target
# stdChk = ActionCheckTargetStdAtk(attachee, tgt)
# if (stdChk != AEC_OK):
# evt_obj.return_val = stdChk
# return 0
# def OnPreciseStrikePerform(attachee, args, evt_obj):
# print "I performed!"
# return 0
preciseStrikeString = "Precise Strike"
def PreciseStrikeDamageBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 5:
return 0
# check if attacking with one weapon and without a shield
if (attachee.item_worn_at(4) != OBJ_HANDLE_NULL and attachee.item_worn_at(3) != OBJ_HANDLE_NULL) or attachee.item_worn_at(11) != OBJ_HANDLE_NULL:
return 0
# check if light or one handed piercing
if not IsUsingLightOrOneHandedPiercing(attachee):
return 0
tgt = evt_obj.attack_packet.target
if tgt == OBJ_HANDLE_NULL: # shouldn't happen but better be safe
return 0
if tgt.d20_query(Q_Critter_Is_Immune_Critical_Hits):
return 0
damage_dice = dice_new('1d6')
if duelistLvl >= 10:
damage_dice.number = 2
evt_obj.damage_packet.add_dice(damage_dice, -1, 127 )
return 0
def ElaborateParry(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 7:
return 0
if not attachee.d20_query(Q_FightingDefensively): # this also covers Total Defense
return 0
evt_obj.bonus_list.add(duelistLvl , 8, 137) # Dodge bonus, ~Class~[TAG_LEVEL_BONUSES]
return 0
def IsUsingLightOrOneHandedPiercing( obj ):
weap = obj.item_worn_at(3)
offhand = obj.item_worn_at(4)
if weap == OBJ_HANDLE_NULL and offhand == OBJ_HANDLE_NULL:
return 0
if weap == OBJ_HANDLE_NULL:
weap = offhand
offhand = OBJ_HANDLE_NULL
if IsWeaponLightOrOneHandedPiercing(obj, weap):
return 1
# check the offhand
if offhand != OBJ_HANDLE_NULL:
if IsWeaponLightOrOneHandedPiercing(obj, offhand):
return 1
return 0
def IsWeaponLightOrOneHandedPiercing( obj, weap):
# truth table
# nor. | enlarged | return
# 0 x 1 assume un-enlarged state
# 1 0 1 shouldn't be possible... unless it's actually reduce person (I don't really care about that)
# 1 1 is_piercing
# 1 2 is_piercing
# 2 x 0
# 3 x 0
normalWieldType = obj.get_wield_type(weap, 1) # "normal" means weapon is not enlarged
if normalWieldType >= 2: # two handed or unwieldable
return 0
if normalWieldType == 0:
return 1
# otherwise if the weapon is also enlarged;
wieldType = obj.get_wield_type(weap, 0)
if wieldType == 0:
return 1
# weapon is not light, but is one handed - check if piercing
attackType = weap.obj_get_int(obj_f_weapon_attacktype)
if attackType == D20DT_PIERCING: # should be strictly piercing from what I understand (supposed to be rapier-like)
return 1
return 0
def DuelistDeflectArrows(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 9:
return 0
offendingWeapon = evt_obj.attack_packet.get_weapon_used()
if offendingWeapon == OBJ_HANDLE_NULL:
return 0
if not (evt_obj.attack_packet.get_flags() & D20CAF_RANGED):
return 0
# check if attacker visible
attacker = evt_obj.attack_packet.attacker
if attacker == OBJ_HANDLE_NULL:
return 0
if attacker.d20_query(Q_Critter_Is_Invisible) and not attachee.d20_query(Q_Critter_Can_See_Invisible):
return 0
if attachee.d20_query(Q_Critter_Is_Blinded):
return 0
# check flatfooted
if attachee.d20_query(Q_Flatfooted):
return 0
# check light weapon or one handed piercing
if not IsUsingLightOrOneHandedPiercing(attachee):
return 0
atkflags = evt_obj.attack_packet.get_flags()
atkflags |= D20CAF_DEFLECT_ARROWS
atkflags &= ~(D20CAF_HIT | D20CAF_CRITICAL)
evt_obj.attack_packet.set_flags(atkflags)
return 0
classSpecObj = PythonModifier(GetConditionName(), 0)
classSpecObj.AddHook(ET_OnToHitBonusBase, EK_NONE, OnGetToHitBonusBase, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_FORTITUDE, OnGetSaveThrowFort, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, OnGetSaveThrowReflex, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_WILL, OnGetSaveThrowWill, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, CannyDefenseAcBonus, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, EnhancedMobility, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, ElaborateParry, ())
classSpecObj.AddHook(ET_OnGetInitiativeMod, EK_NONE, ImprovedReactionInitBonus, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, GraceReflexBonus, ())
classSpecObj.AddHook(ET_OnDealingDamage, EK_NONE, PreciseStrikeDamageBonus, ())
classSpecObj.AddHook(ET_OnDeflectArrows, EK_NONE, DuelistDeflectArrows, ())
| 32.347826 | 146 | 0.758187 | 1,114 | 8,184 | 5.344704 | 0.217235 | 0.054081 | 0.032751 | 0.042324 | 0.42039 | 0.336245 | 0.270574 | 0.209271 | 0.17484 | 0.164763 | 0 | 0.026823 | 0.143573 | 8,184 | 252 | 147 | 32.47619 | 0.822657 | 0.215176 | 0 | 0.363636 | 0 | 0 | 0.005743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.028409 | null | null | 0.005682 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b35f80332b012e3019899544e95458e4103fb451 | 1,524 | py | Python | megatron/logging.py | coreweave/gpt-neox | 5c9641c8b1dae16e5642d78a29e879ad312e0725 | [
"Apache-2.0"
] | 1 | 2021-04-27T21:28:25.000Z | 2021-04-27T21:28:25.000Z | megatron/logging.py | fplk/gpt-neox | 9992042ab113428022e5e91421c04917577b8e00 | [
"Apache-2.0"
] | null | null | null | megatron/logging.py | fplk/gpt-neox | 9992042ab113428022e5e91421c04917577b8e00 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (c) 2021, EleutherAI contributors
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
class Tee:
""" Duplicate output to both stdout/err and file """
def __init__(self, file, err=False):
self.file = open(file, 'w')
self.err = err
if not err:
self.std = sys.stdout
sys.stdout = self
else:
self.std = sys.stderr
sys.stderr = self
def __del__(self):
if not self.err:
sys.stdout = self.std
else:
sys.stderr = self.std
self.file.close()
def write(self, data):
try:
self.file.write(data)
except OSError:
pass
try:
self.std.write(data)
except OSError:
pass
def flush(self):
try:
self.file.flush()
except OSError:
pass
| 28.222222 | 106 | 0.612861 | 206 | 1,524 | 4.495146 | 0.509709 | 0.064795 | 0.055076 | 0.034557 | 0.056156 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008531 | 0.307743 | 1,524 | 53 | 107 | 28.754717 | 0.869194 | 0.479003 | 0 | 0.354839 | 0 | 0 | 0.001294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0.096774 | 0.032258 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
b36568a7f5e7ad85c77f6c9d7ddd21f9cadfde38 | 1,132 | py | Python | src/afterpay/merchant.py | nyneava/afterpay-python | ec9f9230ce321a2d9876ac93f222c24ffe7eee1a | [
"MIT"
] | null | null | null | src/afterpay/merchant.py | nyneava/afterpay-python | ec9f9230ce321a2d9876ac93f222c24ffe7eee1a | [
"MIT"
] | null | null | null | src/afterpay/merchant.py | nyneava/afterpay-python | ec9f9230ce321a2d9876ac93f222c24ffe7eee1a | [
"MIT"
] | null | null | null | from afterpay.attribute_getter import AttributeGetter
from afterpay.exceptions import AfterpayError
class Merchant(AttributeGetter):
"""
Merchant object
Attributes:
redirectConfirmUrl: The consumer is redirected to this URL when the payment process is completed.
redirectCancelUrl: The consumer is redirected to this URL if the payment process is cancelled.
"""
attribute_list = [
"redirectConfirmUrl",
"redirectCancelUrl",
]
def __init__(self, attributes):
if "redirectConfirmUrl" not in attributes:
raise AfterpayError("Cannot initialize Contact object without a 'redirectConfirmUrl'")
if "redirectCancelUrl" not in attributes:
raise AfterpayError("Cannot initialize Contact object without a 'redirectCancelUrl'")
AttributeGetter.__init__(self, attributes)
def __repr__(self):
return super(Merchant, self).__repr__(self.attribute_list)
def get_json(self):
return {
i: super(Merchant, self).__dict__[i] for i in super(Merchant, self).__dict__ if i in self.attribute_list
}
| 35.375 | 116 | 0.701413 | 120 | 1,132 | 6.375 | 0.391667 | 0.05098 | 0.066667 | 0.060131 | 0.266667 | 0.266667 | 0.266667 | 0.183007 | 0.183007 | 0.183007 | 0 | 0 | 0.229682 | 1,132 | 31 | 117 | 36.516129 | 0.877294 | 0.201413 | 0 | 0 | 0 | 0 | 0.222603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.105263 | 0.105263 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
b36b45596be0c00b3ce27b76dfcdb7d0c441a008 | 693 | py | Python | webdriver_setup/opera.py | xloem/webdriver-setup | 3cd091559ea6d3017995ac7f252c2c107ff1f44c | [
"Apache-2.0"
] | 1 | 2020-12-06T13:27:45.000Z | 2020-12-06T13:27:45.000Z | webdriver_setup/opera.py | xloem/webdriver-setup | 3cd091559ea6d3017995ac7f252c2c107ff1f44c | [
"Apache-2.0"
] | 1 | 2021-11-28T14:03:23.000Z | 2021-11-28T14:03:23.000Z | webdriver_setup/opera.py | xloem/webdriver-setup | 3cd091559ea6d3017995ac7f252c2c107ff1f44c | [
"Apache-2.0"
] | 2 | 2021-07-21T11:24:56.000Z | 2021-09-20T11:13:24.000Z | from selenium import webdriver
from webdriver_manager.opera import OperaDriverManager
from webdriver_setup.driver import DriverBase
class OperaDriver(DriverBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def create_driver(self, **kwargs):
"""Create Opera webdriver
:type kwargs: dict
:param kwargs: Optional arguments
:rtype: selenium.webdriver.Opera
:returns: Opera webdriver instance
"""
cache_timeout = kwargs.get("cache_valid_range", 7)
driver_path = OperaDriverManager(cache_valid_range=cache_timeout).install()
return webdriver.Opera(executable_path=driver_path, **kwargs)
| 25.666667 | 83 | 0.701299 | 74 | 693 | 6.297297 | 0.486486 | 0.055794 | 0.064378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001828 | 0.210678 | 693 | 26 | 84 | 26.653846 | 0.850091 | 0.207792 | 0 | 0 | 0 | 0 | 0.033865 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.3 | 0 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2fa20b3896ff72ff33be98125f29809dab379a74 | 260 | py | Python | helper.py | vanashimko/discrete-fourier-transform | 9430ba2b0b7b994c495d6ea2a9beb1099c512e0c | [
"MIT"
] | 1 | 2018-10-06T10:17:42.000Z | 2018-10-06T10:17:42.000Z | helper.py | vanashimko/discrete-fourier-transform | 9430ba2b0b7b994c495d6ea2a9beb1099c512e0c | [
"MIT"
] | null | null | null | helper.py | vanashimko/discrete-fourier-transform | 9430ba2b0b7b994c495d6ea2a9beb1099c512e0c | [
"MIT"
] | null | null | null | from itertools import repeat
from random import randrange
def randoms_from(values, length=None):
_range = range(length) if length is not None else repeat(0)
values_len = len(values)
for _ in _range:
yield values[randrange(0, values_len)]
| 26 | 63 | 0.723077 | 38 | 260 | 4.789474 | 0.552632 | 0.076923 | 0.10989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009662 | 0.203846 | 260 | 9 | 64 | 28.888889 | 0.869565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fa21b6782bbe1bd275b38f48a0d897b2c47128c | 3,317 | py | Python | azure/cart_svc_serverless/getCartTotal/__init__.py | ishrivatsa/lambda-samples | 17acaa5014401cd9947445140de7ee638aff0b61 | [
"MIT"
] | null | null | null | azure/cart_svc_serverless/getCartTotal/__init__.py | ishrivatsa/lambda-samples | 17acaa5014401cd9947445140de7ee638aff0b61 | [
"MIT"
] | 1 | 2021-06-02T01:00:22.000Z | 2021-06-02T01:00:22.000Z | azure/cart_svc_serverless/getCartTotal/__init__.py | ishrivatsa/serverless-examples | 17acaa5014401cd9947445140de7ee638aff0b61 | [
"MIT"
] | null | null | null | import logging
import os
from os import environ
import redis
import json
import azure.functions as func
redisHost= ""
redisPort = 6379
redisPassword = ""
def connectRedis(host, port, password):
try:
logging.info("Connecting to Redis ")
redisConnection = redis.StrictRedis(host=host, port=port, password=password, db=0)
except Exception as e:
logging.error("Error connecting to REDIS %s", e)
return func.HttpResponse('Could not connect to REDIS', status_code=500)
try:
logging.info(redisConnection.ping())
except Exception as e:
logging.error("Could not Ping Redis server %s", e)
return func.HttpResponse('Could not Ping REDIS', status_code=500)
logging.info("Successfully Connected to Redis")
return redisConnection
## Get data from the redis db
def getItems(id, r):
if r.exists(id):
data = json.loads(r.get(id))
logging.info("Received data")
logging.info(data)
else:
data = 0
return data
## Request POST /cart/item/add/{userid}
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('getCartTotal was triggered')
if environ.get("REDIS_HOST") is not None:
if os.environ["REDIS_HOST"] != "":
redisHost = os.environ["REDIS_HOST"]
else:
logging.info("REDIS_HOST is empty")
return func.HttpResponse(status_code=500)
else:
logging.error("REDIS_HOST is not Set")
return func.HttpResponse(status_code=500)
if environ.get("REDIS_PORT") is not None:
if os.environ["REDIS_PORT"] != "":
redisPort = os.environ["REDIS_PORT"]
else:
redisPort = 6379
else:
logging.error("Could not find REDIS_PORT")
return func.HttpResponse(status_code=500)
if environ.get("REDIS_PASSWORD") is not None:
if os.environ["REDIS_PASSWORD"] != "":
redisPassword = os.environ["REDIS_PASSWORD"]
else:
logging.info("REDIS_PASSWORD is empty")
return func.HttpResponse(status_code=500)
else:
logging.error("REDIS_PASSWORD is not Set")
return func.HttpResponse(status_code=500)
## Connect to REDIS
r = connectRedis(redisHost, redisPort,redisPassword)
logging.info(req.route_params["userid"])
userID = req.route_params["userid"]
if userID != "":
existing_data = getItems(userID, r)
else:
logging.error("Missing User ID from path parameter")
return func.HttpResponse('Missing UserID from the request', status_code=400)
total = 0
if (existing_data):
for items in existing_data:
quantity = items['quantity']
price = items['price']
total += (float(quantity)*float(price))
response = {}
response['userid'] = userID
response['carttotal']=total
response = json.dumps(response)
logging.info("The total for user %s is %f", userID, total)
else:
logging.info('No items found in cart')
return func.HttpResponse('No items found in cart', status_code=204)
return func.HttpResponse(response, status_code=200)
| 30.431193 | 91 | 0.614109 | 392 | 3,317 | 5.122449 | 0.244898 | 0.060259 | 0.109562 | 0.069721 | 0.256972 | 0.239044 | 0.209163 | 0.13994 | 0.13994 | 0.114542 | 0 | 0.017184 | 0.280675 | 3,317 | 108 | 92 | 30.712963 | 0.824392 | 0.024118 | 0 | 0.243902 | 0 | 0 | 0.18776 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036585 | false | 0.109756 | 0.073171 | 0 | 0.256098 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2faa39feebe9d6a660e11520ad56bb2a92a90312 | 1,325 | py | Python | compliance_suite/functions/update_server_settings.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 1 | 2019-09-18T14:38:55.000Z | 2019-09-18T14:38:55.000Z | compliance_suite/functions/update_server_settings.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 14 | 2019-05-24T18:55:23.000Z | 2022-02-25T16:56:28.000Z | compliance_suite/functions/update_server_settings.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 8 | 2019-04-08T14:48:35.000Z | 2022-02-04T16:59:59.000Z | # -*- coding: utf-8 -*-
"""Module compliance_suite.functions.update_server_settings.py
Functions to update server config/settings based on the response of a previous
API request. Each function should accept a Runner object and modify its
"retrieved_server_settings" attribute
"""
def update_supported_filters(runner, resource, response_obj):
"""Update server settings with the supported filters for a resource
Arguments:
runner (Runner): reference to Runner object
resource (str): identifies project, study, expression, continuous
response_obj (Response): response object to parse
"""
for filter_obj in response_obj:
runner.retrieved_server_settings[resource]["supp_filters"]\
.append(filter_obj["filter"])
def update_expected_format(runner, resource, response_obj):
"""Update server settings with the expected file format for a resource
Arguments:
runner (Runner): reference to Runner object
resource (str): identifies project, study, expression, continuous
response_obj (Response): response object to parse
"""
format_str = response_obj["fileType"]
runner.retrieved_server_settings["expressions"]["exp_format"] = format_str
runner.retrieved_server_settings["continuous"]["exp_format"] = format_str
| 37.857143 | 79 | 0.731321 | 159 | 1,325 | 5.91195 | 0.371069 | 0.104255 | 0.097872 | 0.092553 | 0.42766 | 0.42766 | 0.42766 | 0.42766 | 0.42766 | 0.317021 | 0 | 0.000926 | 0.184906 | 1,325 | 35 | 80 | 37.857143 | 0.869444 | 0.584151 | 0 | 0 | 0 | 0 | 0.137014 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fab631d5340c51e5036835293e80b75871b2ef7 | 1,272 | py | Python | rnacentral_pipeline/rnacentral/ftp_export/fasta.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | 1 | 2018-08-09T14:41:16.000Z | 2018-08-09T14:41:16.000Z | rnacentral_pipeline/rnacentral/ftp_export/fasta.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | 60 | 2015-02-04T16:43:53.000Z | 2022-01-27T10:28:43.000Z | rnacentral_pipeline/rnacentral/ftp_export/fasta.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import itertools as it
from Bio import SeqIO
NHMMER_PATTERN = re.compile("^[ABCDGHKMNRSTVWXYU]+$", re.IGNORECASE)
def is_valid_nhmmer_record(record):
"""
Checks if a sequence is valid for nhmmer usage.
"""
return bool(NHMMER_PATTERN.match(str(record.seq)))
def valid_nhmmer(handle, output):
sequences = SeqIO.parse(handle, "fasta")
accepted = filter(is_valid_nhmmer_record, sequences)
SeqIO.write(accepted, output, "fasta")
def invalid_nhmmer(handle, output):
sequences = SeqIO.parse(handle, "fasta")
rejected = it.filterfalse(is_valid_nhmmer_record, sequences)
SeqIO.write(rejected, output, "fasta")
| 31.02439 | 72 | 0.751572 | 181 | 1,272 | 5.209945 | 0.574586 | 0.063627 | 0.041357 | 0.060445 | 0.182397 | 0.182397 | 0.182397 | 0.101803 | 0 | 0 | 0 | 0.012093 | 0.154874 | 1,272 | 40 | 73 | 31.8 | 0.865116 | 0.51022 | 0 | 0.142857 | 0 | 0 | 0.07 | 0.036667 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.214286 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fb2669f969726a9b2f0b2041c3aff48c8729aac | 1,746 | py | Python | setup.py | RicoViking9000/profanityfilter | 1c1c1dbc0e895b85ee1e685ab5ef8b4c37c5db00 | [
"BSD-3-Clause"
] | 63 | 2016-09-21T17:33:43.000Z | 2021-12-04T18:36:05.000Z | setup.py | RicoViking9000/profanityfilter | 1c1c1dbc0e895b85ee1e685ab5ef8b4c37c5db00 | [
"BSD-3-Clause"
] | 12 | 2017-07-14T04:47:37.000Z | 2022-02-15T09:50:21.000Z | setup.py | RicoViking9000/profanityfilter | 1c1c1dbc0e895b85ee1e685ab5ef8b4c37c5db00 | [
"BSD-3-Clause"
] | 28 | 2017-07-07T21:52:42.000Z | 2022-02-05T09:49:22.000Z | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='profanityfilter',
version='2.0.6.',
description='A universal Python library for detecting and/or filtering profane words.',
long_description='For more details visit https://github.com/areebbeigh/profanityfilter',
url='https://github.com/areebbeigh/profanityfilter',
author='Areeb Beigh',
author_email='areebbeigh@gmail.com',
license='BSD',
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Linguistic',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='profanity filter clean content',
packages=find_packages(exclude=['tests']),
install_requires=['inflection'],
package_data={
'profanityfilter': ['data/badwords.txt'],
},
entry_points={
'console_scripts': [
'profanityfilter=profanityfilter:main',
],
},
)
| 34.92 | 92 | 0.616838 | 174 | 1,746 | 6.12069 | 0.557471 | 0.160563 | 0.211268 | 0.146479 | 0.123944 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017517 | 0.247995 | 1,746 | 49 | 93 | 35.632653 | 0.793602 | 0.0252 | 0 | 0.046512 | 0 | 0 | 0.573703 | 0.021226 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.069767 | 0 | 0.069767 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fcb561133a4b45129e3d2727e197182d5c62b3e | 227 | py | Python | train.py | tamasandacian/twitter-sentiment-analysis | 154083feae49d4ff159aee6cfae0df6807a662b3 | [
"MIT"
] | 1 | 2019-08-08T05:08:42.000Z | 2019-08-08T05:08:42.000Z | train.py | tamasandacian/twitter-sentiment-analysis | 154083feae49d4ff159aee6cfae0df6807a662b3 | [
"MIT"
] | null | null | null | train.py | tamasandacian/twitter-sentiment-analysis | 154083feae49d4ff159aee6cfae0df6807a662b3 | [
"MIT"
] | null | null | null | # Create FastText model using from raw train data
import fastText
TRAIN_FILE = './datasets/raw_data/tweets.train'
su_model = fastText.train_supervised(input=TRAIN_FILE, wordNgrams=3)
su_model.save_model('model_sentiment.bin') | 32.428571 | 68 | 0.814978 | 34 | 227 | 5.205882 | 0.588235 | 0.146893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004831 | 0.088106 | 227 | 7 | 69 | 32.428571 | 0.850242 | 0.207048 | 0 | 0 | 0 | 0 | 0.284916 | 0.178771 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fcfdfc34dd0b46d25701e6315fbd7285e1e2ee4 | 555 | py | Python | backend/media/migrations/0010_auto_20210613_0444.py | LakesideMiners/rt911 | ce2eb82efa6eedbaabf16f567b3db1d8fb808809 | [
"Unlicense"
] | 8 | 2020-07-25T04:54:44.000Z | 2022-01-31T16:08:53.000Z | backend/media/migrations/0010_auto_20210613_0444.py | LakesideMiners/rt911 | ce2eb82efa6eedbaabf16f567b3db1d8fb808809 | [
"Unlicense"
] | 18 | 2020-07-20T00:53:19.000Z | 2022-03-03T21:48:17.000Z | backend/media/migrations/0010_auto_20210613_0444.py | LakesideMiners/rt911 | ce2eb82efa6eedbaabf16f567b3db1d8fb808809 | [
"Unlicense"
] | 2 | 2021-10-29T23:19:02.000Z | 2021-11-06T06:44:54.000Z | # Generated by Django 3.1.12 on 2021-06-13 04:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('media', '0009_auto_20210611_1441'),
]
operations = [
migrations.RemoveField(
model_name='collection',
name='media_item',
),
migrations.AddField(
model_name='collection',
name='media',
field=models.ManyToManyField(blank=True, limit_choices_to={'approved': True}, to='media.Media'),
),
]
| 24.130435 | 108 | 0.592793 | 57 | 555 | 5.631579 | 0.701754 | 0.056075 | 0.11838 | 0.143302 | 0.174455 | 0 | 0 | 0 | 0 | 0 | 0 | 0.080605 | 0.284685 | 555 | 22 | 109 | 25.227273 | 0.72796 | 0.082883 | 0 | 0.25 | 1 | 0 | 0.161736 | 0.045365 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fd243b0141adccef1b512ff16e5f007093b92d7 | 1,691 | py | Python | station/controller/handlers/read_letters_handler.py | GLO3013-E4/COViRondelle2021 | f8d23903d0a906e93a7698a555d90ebecdf83969 | [
"MIT"
] | null | null | null | station/controller/handlers/read_letters_handler.py | GLO3013-E4/COViRondelle2021 | f8d23903d0a906e93a7698a555d90ebecdf83969 | [
"MIT"
] | null | null | null | station/controller/handlers/read_letters_handler.py | GLO3013-E4/COViRondelle2021 | f8d23903d0a906e93a7698a555d90ebecdf83969 | [
"MIT"
] | null | null | null | import json
import rospy
from std_msgs.msg import String
from handlers.handler import Handler
from mapping.command_panel import CommandPanel
from mapping.resistance import Resistance
class ReadLettersHandler(Handler):
def initialize(self):
self.sub = rospy.Subscriber('letters', String, self.read_letters) # TODO: checker le nom du topic
self.is_finished = False
def handle(self, handled_data=None):
self.initialize()
command_panel = CommandPanel()
command_panel.set_resistance(handled_data['resistance'])
# handled_data["calculate_pucks_pub"].publish(True)
self.handled_data = handled_data
handled_data["read_letters_pub"].publish(True)
while not self.is_finished:
pass
rounded_resistance, _ = Resistance(handled_data["resistance"]).get_resistance_and_colors()
handled_data["letters"] = self.letters
command_panel.set_mapped_letters(self.letters)
command_panel.set_resistance(rounded_resistance)
first_corner = command_panel.find_first_corner_letter()
second_corner = first_corner.get_next_letter()
third_corner = second_corner.get_next_letter()
handled_data["corners"] = [first_corner.value, second_corner.value, third_corner.value]
return handled_data
def read_letters(self, data):
letters = json.loads(data.data)
self.letters = letters
rospy.logerr("READ LETTERS " + str(self.letters))
self.is_finished = len(letters) == 9
if not self.is_finished:
self.handled_data["read_letters_pub"].publish(True)
def unregister(self):
self.sub.unregister()
| 34.510204 | 105 | 0.702543 | 204 | 1,691 | 5.553922 | 0.333333 | 0.106796 | 0.049426 | 0.044131 | 0.121801 | 0.121801 | 0.063548 | 0 | 0 | 0 | 0 | 0.000746 | 0.207569 | 1,691 | 48 | 106 | 35.229167 | 0.844776 | 0.046718 | 0 | 0 | 0 | 0 | 0.053449 | 0 | 0 | 0 | 0 | 0.020833 | 0 | 1 | 0.111111 | false | 0.027778 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fd9550a294f6b02e8dabd77fe3e2f1337833926 | 1,813 | py | Python | openstates/openstates-master/openstates/ca/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/ca/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/ca/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | from billy.scrape.actions import Rule, BaseCategorizer
# These are regex patterns that map to action categories.
_categorizer_rules = (
Rule((r'\(Ayes (?P<yes_votes>\d+)\.\s+Noes\s+'
r'(?P<no_votes>\d+)\.( Page \S+\.)?\)')),
Rule(r'^Introduced', 'bill:introduced'),
Rule(r'(?i)Referred to (?P<committees>.+)', 'committee:referred'),
Rule(r'(?i)Referred to (?P<committees>.+?)(\.\s+suspense)',
'committee:referred'),
Rule(r're-refer to Standing (?P<committees>[^.]+)\.',
'committee:referred'),
Rule(r'Read first time\.', 'bill:reading:1'),
Rule(r'Read second time and amended',
['bill:reading:2']),
Rule(r'Read third time', 'bill:reading:3'),
Rule(r'Read third time. Refused passage\.',
'bill:failed'),
Rule([r'(?i)read third time.{,5}passed',
r'(?i)Read third time.+?Passed'],
['bill:passed', 'bill:reading:3']),
Rule(r'Approved by the Governor', 'governor:signed'),
Rule(r'Approved by the Governor with item veto',
'governor:vetoed:line-item'),
Rule('Vetoed by Governor', 'governor:vetoed'),
Rule(r'To Governor', 'governor:received'),
Rule(r'amendments concurred in', 'amendment:passed'),
Rule(r'refused to concur in Assembly amendments', 'amendment:failed'),
Rule(r'Failed passage in committee', 'committee:failed'),
Rule(r'(?i)From committee', 'committee:passed'),
Rule(r'(?i)From committee: Do pass', 'committee:passed:favorable'),
Rule(r'From committee with author\'s amendments', 'committee:passed'),
# Resolutions
Rule(r'Adopted', 'bill:passed'),
Rule(r'Read', 'bill:reading:1'),
Rule(r'^From committee: Be adopted', 'committee:passed:favorable'),
)
class CACategorizer(BaseCategorizer):
rules = _categorizer_rules
| 35.54902 | 74 | 0.630998 | 236 | 1,813 | 4.822034 | 0.338983 | 0.096661 | 0.026362 | 0.057996 | 0.279438 | 0.137083 | 0.047452 | 0 | 0 | 0 | 0 | 0.00403 | 0.178709 | 1,813 | 50 | 75 | 36.26 | 0.760242 | 0.036955 | 0 | 0.055556 | 0 | 0 | 0.583477 | 0.094091 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.305556 | 0.027778 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2fe2e978cb293b53e1d50c670400eb304e6da2dc | 982 | py | Python | EMPIRIC_POSTPROCESS/SPalignResults/run_pdb.py | yvehchan/TIM_EMPIRIC | 273091a4a257ed51dfd529709138396a18fa49ac | [
"MIT"
] | null | null | null | EMPIRIC_POSTPROCESS/SPalignResults/run_pdb.py | yvehchan/TIM_EMPIRIC | 273091a4a257ed51dfd529709138396a18fa49ac | [
"MIT"
] | null | null | null | EMPIRIC_POSTPROCESS/SPalignResults/run_pdb.py | yvehchan/TIM_EMPIRIC | 273091a4a257ed51dfd529709138396a18fa49ac | [
"MIT"
] | null | null | null | import subprocess as sub
import sys
# tm_pdb = ('Tm',"pdb1i4n_A.ent")
# tt_pdb = ('Tt',"pdb1vc4_A.ent")
# ss_pdb = ('Ss',"pdb2c3z_A.ent")
if sys.argv[1] == 'Tm':
template_name, current_template = 'Tm',"pdb1i4n_A.ent"
elif sys.argv[1] == 'Tt':
template_name, current_template = 'Tt',"pdb1vc4_A.ent"
elif sys.argv[1] == 'Ss':
template_name, current_template = 'Ss',"pdb2c3z_A.ent"
else:
print "blah! wrong input: Tm Tt or Ss are acceptable only!"
print "nohup python run_pdb.py Tm(Tt,Ss) &"
sys.exit(1)
our_pds = [s.strip() for s in sub.check_output('ls ./pdbA/ | grep .ent',shell=True).strip().split('\n')]
def get_cmd(pdb1,pdb2):
return "./SPalignNS -pair ./pdbA/%s ./pdbA/%s"%(pdb1,pdb2)
for pdb in our_pds:
result = sub.check_output(get_cmd(current_template,pdb),shell=True)
fp = open(pdb.replace('.','_')+'_%s.aln'%template_name,'w')
fp.write(result)
fp.close()
# with open(pdb+'.aln','w') as fp:
# fp.write(result)
| 29.757576 | 104 | 0.638493 | 160 | 982 | 3.75625 | 0.41875 | 0.039933 | 0.039933 | 0.134775 | 0.053245 | 0.053245 | 0 | 0 | 0 | 0 | 0 | 0.024361 | 0.163951 | 982 | 32 | 105 | 30.6875 | 0.707674 | 0.149695 | 0 | 0 | 0 | 0 | 0.251208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.1 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fe31a354876b84e075d42b8513e52335b95d67e | 407 | py | Python | search/migrations/0006_auto_20180516_0457.py | kimyou7/ParkGoGreen | a0b21210823d711af56d76226919950aa01a2b92 | [
"MIT"
] | null | null | null | search/migrations/0006_auto_20180516_0457.py | kimyou7/ParkGoGreen | a0b21210823d711af56d76226919950aa01a2b92 | [
"MIT"
] | null | null | null | search/migrations/0006_auto_20180516_0457.py | kimyou7/ParkGoGreen | a0b21210823d711af56d76226919950aa01a2b92 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.2 on 2018-05-16 11:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0005_auto_20180516_0455'),
]
operations = [
migrations.AlterField(
model_name='park',
name='zip_code',
field=models.CharField(blank=True, max_length=5, null=True),
),
]
| 21.421053 | 72 | 0.604423 | 47 | 407 | 5.106383 | 0.829787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108844 | 0.277641 | 407 | 18 | 73 | 22.611111 | 0.707483 | 0.110565 | 0 | 0 | 1 | 0 | 0.113889 | 0.063889 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2fe822a53dc8ca2f474f7ee2540a5f1ad8fb13ec | 1,903 | py | Python | signalr_async/core/messages.py | sam-mosleh/signalr-async | 40bf79d5482051f76522987cb348e7632bcc0c07 | [
"MIT"
] | 4 | 2021-01-20T18:11:52.000Z | 2022-01-12T16:24:39.000Z | signalr_async/core/messages.py | sam-mosleh/signalr-async | 40bf79d5482051f76522987cb348e7632bcc0c07 | [
"MIT"
] | 2 | 2021-10-15T15:21:44.000Z | 2021-12-08T22:26:39.000Z | signalr_async/core/messages.py | sam-mosleh/signalr-async | 40bf79d5482051f76522987cb348e7632bcc0c07 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from enum import IntEnum
from typing import Any, Dict, List, Optional, Union
class MessageTypes(IntEnum):
INVOCATION = 1
STREAM_ITEM = 2
COMPLETION = 3
STREAM_INVOCATION = 4
CANCEL_INVOCATION = 5
PING = 6
CLOSE = 7
class HubMessageBase:
type_: MessageTypes
@dataclass
class InvocationMessage(HubMessageBase):
invocation_id: str
target: str
arguments: List[Any]
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.INVOCATION
stream_ids: List[str] = field(default_factory=list)
@dataclass
class StreamItemMessage(HubMessageBase):
invocation_id: str
item: Dict[str, Any]
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.STREAM_ITEM
@dataclass
class CompletionMessage(HubMessageBase):
invocation_id: str
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.COMPLETION
error: Optional[str] = None
result: Optional[Dict[str, Any]] = None
@dataclass
class StreamInvocationMessage(HubMessageBase):
invocation_id: str
target: str
arguments: list
stream_ids: list
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.STREAM_INVOCATION
@dataclass
class CancelInvocationMessage(HubMessageBase):
invocation_id: str
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.CANCEL_INVOCATION
@dataclass
class PingMessage(HubMessageBase):
type_: MessageTypes = MessageTypes.PING
@dataclass
class CloseMessage(HubMessageBase):
type_: MessageTypes = MessageTypes.CLOSE
error: str = None
allow_reconnect: bool = None
HubMessage = Union[
InvocationMessage,
StreamItemMessage,
CompletionMessage,
StreamInvocationMessage,
CancelInvocationMessage,
PingMessage,
CloseMessage,
]
| 22.388235 | 56 | 0.7299 | 195 | 1,903 | 7.005128 | 0.271795 | 0.093704 | 0.051245 | 0.079063 | 0.355051 | 0.338946 | 0.338946 | 0.338946 | 0.264275 | 0.264275 | 0 | 0.004551 | 0.191802 | 1,903 | 84 | 57 | 22.654762 | 0.883615 | 0 | 0 | 0.296875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.046875 | 0 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2fea604bea2a0fd4db7527c2e44b6035cbadde44 | 1,613 | py | Python | setuper web app/handlers/admin/adminhandler.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | 22 | 2015-01-08T12:54:20.000Z | 2021-05-16T04:15:45.000Z | setuper web app/handlers/admin/adminhandler.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | null | null | null | setuper web app/handlers/admin/adminhandler.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | 11 | 2015-01-25T01:26:45.000Z | 2021-08-18T01:40:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tornado.web import authenticated, removeslash
from handlers.basehandlers import BaseHandler
adminusers = [
{
'username': 'admin',
'password': 'admin'
}
]
class AdminLoginHandler(BaseHandler):
role = "admin"
@removeslash
def get(self):
if self.current_user:
self.redirect('/admin')
self.render("admin/login.html", title="Cloud Setuper", username=self.current_user)
def post(self):
username = self.get_argument("username", "")
password = self.get_argument("password", "")
user = {
'username': username,
'password': password
}
checkstatus = self.checkUser(user)
if checkstatus['status']:
self.set_secure_cookie(self.role, username)
response = {
'status': "success",
'info': checkstatus['info']
}
else:
response = {
'status': "fail",
'info': checkstatus['info']
}
self.write(response)
def checkUser(self, user):
if user in adminusers:
return {
'status': True,
'info': "login success"
}
else:
return {
'status': False,
'info': "please check username or password."
}
class AdminLogoutHandler(BaseHandler):
role = "admin"
@authenticated
@removeslash
def post(self):
self.clear_cookie(self.get_current_user())
| 24.439394 | 90 | 0.520769 | 140 | 1,613 | 5.935714 | 0.421429 | 0.039711 | 0.048135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000969 | 0.360198 | 1,613 | 65 | 91 | 24.815385 | 0.804264 | 0.026038 | 0 | 0.269231 | 0 | 0 | 0.13703 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0.076923 | 0.038462 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2feed576dc500b8d42c7907dda91a0c63a0e4230 | 4,573 | py | Python | character-identifier/embedding_loader.py | vtt-project/vtt-char-identify | 8d991bc5b3bec1415cea2e5ea4dcc37b6c6b36b1 | [
"Apache-2.0"
] | 12 | 2018-05-20T22:01:05.000Z | 2020-05-08T06:38:38.000Z | character-identifier/embedding_loader.py | vtt-project/vtt-char-identify | 8d991bc5b3bec1415cea2e5ea4dcc37b6c6b36b1 | [
"Apache-2.0"
] | null | null | null | character-identifier/embedding_loader.py | vtt-project/vtt-char-identify | 8d991bc5b3bec1415cea2e5ea4dcc37b6c6b36b1 | [
"Apache-2.0"
] | 5 | 2018-11-18T21:16:27.000Z | 2021-09-23T13:46:52.000Z | import os
import sys
import numpy as np
import gensim
from gensim.models import word2vec
import data_utils
from config_utils import data_paths
DECREASE_FACTOR=1e-4 # TODO @Future: there should be a smarter way
def load_word2vec_embeddings(filename):
binary_file = ".bin" in filename
return gensim.models.KeyedVectors.load_word2vec_format(filename, binary=binary_file)
def filter_embeddings(word_vectors, vocabulary_idx_to_word, normalise_word=False, rnd_init=False):
"""
:param normalise_word: set words to lowercase and replace whitespace by '_'
:param rnd_init: If True, set initial weights to random numbers drawn from uniform distribution, otherwise set weights to zero.
"""
unknown_inds = []
found_inds = []
if rnd_init:
rel_vectors = np.random.rand(len(vocabulary_idx_to_word), word_vectors.vector_size)*DECREASE_FACTOR
else:
rel_vectors = np.zeros(shape=(len(vocabulary_idx_to_word), word_vectors.vector_size))
for (idx, word) in enumerate(vocabulary_idx_to_word):
if normalise_word:
word = word.replace(" ", "_").lower()
if word in word_vectors:
rel_vectors[idx,:] = word_vectors.wv[word]
found_inds.append(idx)
else:
unknown_inds.append(idx)
return rel_vectors, unknown_inds, found_inds
def fill_missing_embeddings(word_embeddings, unk_inds, found_inds):
"""
For unknown entities: add average emb vector of found entities to their random initialisation
TODO @Future: Is it better to initialize these as zeros instead of averages?
"""
avg_entity_vecs = np.mean(word_embeddings[found_inds],0)
word_embeddings[unk_inds] += avg_entity_vecs*1e-2
def load_word_embeddings(embeddings_fname, training_datapath, training_data, logger=None):
"""
:param embeddings_fname: The name of the file containing pre-trained embeddings.
E.g., the Google-news w2v embeddings
:param training_datapath: The name of the file containing the training data for
a model which uses word embeddings (loaded from embeddings_fname).
"""
# vocab_fname: The name of the file containing the relevant vocabulary.
# Each line contains the word idx and the word, separated by tabs ("\t").
vocab_fname = training_datapath.replace(".conll", ".vocab")
word_emb_fname = data_utils.get_embeddings_path_for_vocab(embeddings_fname, vocab_fname)
if os.path.exists(word_emb_fname):
if logger:
logger.whisper("Loading token embedding from {0}".format(word_emb_fname))
word_embeddings = np.load(word_emb_fname)
else:
vocabulary_idx_to_word,_ = data_utils.get_vocabulary(vocab_fname, extract_from=training_data, logger=logger)
all_word_vectors = load_word2vec_embeddings(embeddings_fname)
word_embeddings,_,_ = filter_embeddings(all_word_vectors, vocabulary_idx_to_word)
save_word_embeddings(word_embeddings, word_emb_fname)
return word_embeddings
def load_entity_embeddings(embeddings_fname, vocab_fname, logger=None):
"""
:param embeddings_fname: The name of the file containing pre-trained embeddings.
E.g., the Google-news w2v embeddings
:param vocab_fname: The name of the file containing the relevant vocabulary (entity names).
Each line contains the word idx and the word, separated by tabs ("\t").
"""
if not embeddings_fname.endswith(".npy"):
embeddings_fname = data_utils.get_embeddings_path_for_vocab(embeddings_fname, vocab_fname)
if os.path.exists(embeddings_fname):
if not logger is None:
logger.whisper("Loading entity embedding from {0}".format(embeddings_fname))
word_embeddings = np.load(embeddings_fname)
"""
# The model does not use embeddings (yet) which were extracted from some other source
else:
vocabulary_idx_to_word,_ = data_utils.load_vocabulary(vocab_fname)
all_entity_vectors = load_word2vec_embeddings(embeddings_fname)
word_embeddings, unk_inds, found_inds = filter_embeddings(all_entity_vectors, vocabulary_idx_to_word, normalise_word=True, rnd_init=True)
fill_missing_embeddings(word_embeddings, unk_inds, found_inds)
save_word_embeddings(word_embeddings, embeddings_fname)
"""
return word_embeddings
def save_word_embeddings(word_embeddings, outfname, logger=None):
np.save(outfname, word_embeddings)
if not logger is None:
logger.whisper("Embeddings saved in \n\t{0}".format(outfname))
| 44.833333 | 145 | 0.730374 | 618 | 4,573 | 5.122977 | 0.255663 | 0.084018 | 0.037903 | 0.04801 | 0.435565 | 0.371762 | 0.35597 | 0.283007 | 0.246368 | 0.186987 | 0 | 0.004061 | 0.192215 | 4,573 | 101 | 146 | 45.277228 | 0.853005 | 0.252132 | 0 | 0.127273 | 0 | 0 | 0.040934 | 0 | 0 | 0 | 0 | 0.019802 | 0 | 1 | 0.109091 | false | 0 | 0.127273 | 0 | 0.309091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2ff404338aa621080a3a500a159897b5f6c9013e | 5,821 | py | Python | TestInterfaceResiduePrediction.py | sebastiandaberdaku/AntibodyInterfacePrediction | 4d31f57f7cdac1fe68cfb4f3448f6e3129ae2838 | [
"BSD-3-Clause"
] | 10 | 2017-10-11T16:05:35.000Z | 2021-10-01T14:43:10.000Z | TestInterfaceResiduePrediction.py | sebastiandaberdaku/AntibodyInterfacePrediction | 4d31f57f7cdac1fe68cfb4f3448f6e3129ae2838 | [
"BSD-3-Clause"
] | null | null | null | TestInterfaceResiduePrediction.py | sebastiandaberdaku/AntibodyInterfacePrediction | 4d31f57f7cdac1fe68cfb4f3448f6e3129ae2838 | [
"BSD-3-Clause"
] | 2 | 2018-09-13T17:04:13.000Z | 2019-01-22T08:55:22.000Z | # This script runs the IF algorithm for outlier detection to remove false positive patches and maps the predicted LSPs on the underlying residues.
# The results are compared to other predictor software packages.
# Please remember to set the path variable to the current location of the test set.
import numpy as np
from sklearn.neighbors.kd_tree import KDTree
from glob import glob
from math import copysign
from sklearn.ensemble import IsolationForest
from os.path import basename
from os import path, makedirs
from Bio.PDB.PDBParser import PDBParser
p = PDBParser(QUIET=True, PERMISSIVE=True)
from Bio.PDB.Polypeptide import three_to_one
def convert3to1(s):
try :
return three_to_one(s)
except KeyError :
return "X"
import re
_hydrogen = re.compile("[123 ]*H.*")
def isHydrogen(atm):
return _hydrogen.match(atm.get_id())
def isHETATM(atm):
return atm.get_parent().get_id()[0] != " "
#######################
# import pickle
#######################
outlier_fraction = 0.18
threshold = 0.6232013
n_iterations = 100
mapping_distance = 6.0
def compute_average_scores(testing_set_path, prediction_path):
files = glob("%s/*_ab.pdb" % (testing_set_path))
for pdb_filename in sorted(files) :
file_id = basename(pdb_filename)[:-7]
pdb_patch_coord = ("%s/%s_ab_patch_centers.txt" % (testing_set_path, file_id))
pdb_patch_score = ("%s/%s_ab_patch_score.txt" % (testing_set_path, file_id))
with open(pdb_patch_coord) as coord, open(pdb_patch_score) as score:
patch_coord = [[float(x) for x in a.strip().split()] for a in coord.readlines()]
patch_score = [float(x) - threshold for x in score.readlines()]
min_v = min(patch_score)
max_v = max(patch_score)
patch_score_scaled = [(lambda x: -(x / min_v) if x < 0 else (x / max_v))(x) for x in patch_score]
X = np.array([a[0] for a in zip(patch_coord, patch_score_scaled) if a[1] >= 0])
X_weights = np.array([x for x in patch_score_scaled if x >= 0])
pdb_structure = p.get_structure(file_id, pdb_filename)
atoms = np.array([atm.get_coord() for atm in pdb_structure.get_atoms() if not isHydrogen(atm)])
atoms_tree = KDTree(atoms)
residues_coord = {}
for residue in pdb_structure.get_residues() :
for atm in residue :
residues_coord[tuple(atm.get_coord())] = residue
average_residues_scores = {residue : 0 for residue in pdb_structure.get_residues()}
# since the isollation forest algorithm is random, we run it several times to assess the average performance of the method
for iteration in xrange(n_iterations) :
print "Running iteration %d of %d" % (iteration + 1, n_iterations)
forest = IsolationForest(contamination=outlier_fraction, n_jobs=-1)
forest.fit(X, sample_weight=X_weights)
prediction_isolation_forest = forest.predict(patch_coord)
patch_pred_no_outliers = [copysign(1, x) for x in prediction_isolation_forest]
# here we map the patch predictions on the underlying residues
for i in xrange(len(patch_coord)) : # for each patch
# if it was predicted as non-interface continue to the next
if patch_pred_no_outliers[i] < 0 : continue
# multiple residues can be underneath a given patch, we do not want to consider the same residue more than once
marked_residues = set()
# get all atoms within mapping_distance from the given patch center
indexes = atoms_tree.query_radius([patch_coord[i]], r=mapping_distance, count_only = False, return_distance=True, sort_results = True)
for ind in zip(indexes[0][0], indexes[1][0]) :
# which residue does the current atom belong to?
current_res = residues_coord[tuple(atoms[ind[0]])]
# if already considered continue to the next
if current_res in marked_residues : continue
# increase the score of the current residue
average_residues_scores[current_res] += 1 / (1.0 + ind[1]) # patch_pred_no_outliers[i] / (1.0 + ind[1])
# mark as seen for the current patch
marked_residues.add(current_res)
average_residues_scores.update((x, y / n_iterations) for x, y in average_residues_scores.items())
residues_with_scores = [(lambda x, y, z : (convert3to1(z), x[2], x[3][1], x[3][2], y))(residue.get_full_id(), score, residue.get_resname()) for residue, score in average_residues_scores.items()]
residues_with_scores.sort(key=lambda x : x[2])
residues_with_scores.sort(key=lambda x : x[1])
if not path.exists(prediction_path) : makedirs(prediction_path)
print file_id
with open("%s/%s_ab_residue_prediction.txt" % (prediction_path, file_id), "wb") as output_residue_scores :
for r in residues_with_scores :
output_residue_scores.write("%s;%s;%d;%s;%s\n" %(r[0], r[1], r[2], r[3], str(r[4])))
compute_average_scores("./our_dataset/testing_set/LH_NonProtein/structures/", "./method_comparison/our_method/predictions/LH_NonProtein/")
compute_average_scores("./our_dataset/testing_set/LH_Protein/structures/", "./method_comparison/our_method/predictions/LH_Protein/")
compute_average_scores("./our_dataset/homology/LH_NonProtein/structures/", "./method_comparison/our_method_homology/predictions/LH_NonProtein/")
compute_average_scores("./our_dataset/homology_90/LH_Protein/structures/", "./method_comparison/our_method_homology/predictions/LH_Protein/")
| 47.325203 | 202 | 0.662429 | 811 | 5,821 | 4.532676 | 0.288533 | 0.027203 | 0.027203 | 0.007617 | 0.215996 | 0.194777 | 0.160501 | 0.108542 | 0 | 0 | 0 | 0.013653 | 0.232434 | 5,821 | 122 | 203 | 47.713115 | 0.809087 | 0.162171 | 0 | 0 | 0 | 0 | 0.12113 | 0.10721 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.133333 | null | null | 0.026667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2ff6da846eaf58d5ba7cf9c6f7b0be471d5b5563 | 9,139 | py | Python | solrcl/document.py | zaccheob/solrcl | de731c0e1f12361770121e6f2aad7e41c7a40f68 | [
"MIT"
] | null | null | null | solrcl/document.py | zaccheob/solrcl | de731c0e1f12361770121e6f2aad7e41c7a40f68 | [
"MIT"
] | null | null | null | solrcl/document.py | zaccheob/solrcl | de731c0e1f12361770121e6f2aad7e41c7a40f68 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
import warnings
import xml.etree.cElementTree as ET
import re
import logging
logger = logging.getLogger("solrcl")
logger.setLevel(logging.DEBUG)
import exceptions
class SOLRDocumentError(exceptions.SOLRError): pass
class SOLRDocumentWarning(UserWarning): pass
class SOLRDocument(object):
"""Class that stores data for a SOLR document. To instantiate SOLRDocument from xml use SOLRDocumentFactory"""
def __init__(self, solrid, solrcore):
self._fields = {}
self._child_docs = []
self.solr = solrcore
self.setField(self.solr.id_field, solrid)
def __getattr__(self, name):
#Shortcut to id field
if name == "id":
return self.getField(self.solr.id_field)
else:
raise AttributeError
def __eq__(self, other):
if type(other) is type(self):
return set(self._fields.keys()) == set(other._fields.keys()) and all(set(self._fields[x]) == set(other._fields[x]) for x in self._fields.keys()) and sorted(self._child_docs) == sorted(other._child_docs)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
#Need it to make the object sortable (sorting is used in __eq__ method to test child documents)
return self.id < other.id
def _serializeValue(self, v, encoding='utf8'):
if isinstance(v, unicode):
return v
elif isinstance(v, str):
return v.decode(encoding)
else:
return unicode(v)
def setField(self, fieldname, fieldvalue):
if self._fields.has_key(fieldname):
del self._fields[fieldname]
if isinstance(fieldvalue, list):
for x in fieldvalue:
self.appendFieldValue(fieldname, x)
else:
self.appendFieldValue(fieldname, fieldvalue)
def appendFieldValue(self, fieldname, fieldvalue):
logger.debug("appendFieldValue %s %s" % (repr(fieldname), repr(fieldvalue)))
if self.solr.fields.has_key(fieldname):
if fieldvalue is None:
self._fields.setdefault(fieldname, [])
self._fields[fieldname].append(fieldvalue)
else:
try:
self.solr.fields[fieldname].type.check(fieldvalue)
except AssertionError, e:
raise SOLRDocumentError, "Invalid value %s for field %s (type %s)" % (repr(fieldvalue), fieldname, self.solr.fields[fieldname].type.name)
self._fields.setdefault(fieldname, [])
if len(self._fields[fieldname]) > 0 and not self.solr.fields[fieldname].multi:
raise SOLRDocumentError, "Multiple values for not multivalued field %s" % fieldname
self._fields[fieldname].append(fieldvalue)
else:
raise SOLRDocumentError, "Field %s not in schema" % fieldname
logger.debug("SET: '%s' '%s'" % (fieldname, repr(self._fields[fieldname])))
def getField(self, fieldname):
ret = self._fields[fieldname]
if self.solr.fields[fieldname].multi:
return ret
else:
return ret[0]
def removeField(self, fieldname):
self._fields.pop(fieldname, None)
def getFieldDefault(self, fieldname, default=None):
try:
return self.getField(fieldname)
except KeyError:
return default
def addChild(self, doc):
self._child_docs.append(doc)
def getChildDocs(self):
return self._child_docs
def hasChildDocs(self):
return bool(self._child_docs)
def _toXML(self, update=True):
doc = ET.Element('doc')
for field, value in self._fields.iteritems():
if value[0] is None:
f = ET.SubElement(doc, 'field', null='true', name=field)
if field != self.solr.id_field and update:
f.set('update', 'set')
f.text = ''
else:
for v in value:
f = ET.SubElement(doc, 'field', null='false', name=field)
if field != self.solr.id_field and update:
f.set('update', 'set')
f.text = self.solr.fields[field].type.serialize(v)
for child in self._child_docs:
doc.append(child._toXML(update=update))
return doc
def toXML(self, update=True):
"""Serializes SOLRDocument into an XML string suitable for SOLR update request handler"""
#Unfortunately it seems there's no way to avoid xml declaration... so I've to remove it with a regexp
return re.sub(r"^<\?xml version='1.0' encoding='[^']*'\?>\s*", '', ET.tostring(self._toXML(update=update), encoding='utf8'))
def clone(self):
#Don't use copy.deepcopy because i don't want to clone also self.solr object
anotherme = SOLRDocument(self.id, self.solr)
for fieldname in self._fields.iterkeys():
anotherme.setField(fieldname, self.getField(fieldname))
for child in self.getChildDocs():
anotherme.addChild(child.clone())
return anotherme
def update(self, otherdoc, merge_child_docs=True):
for fieldname in otherdoc._fields.iterkeys():
self.setField(fieldname, otherdoc.getField(fieldname))
if not merge_child_docs:
#Shortcut! A removeChild method would be better, but I'm lazy :)
self._child_docs = []
actual_child_docs = dict(((d.id, d) for d in self.getChildDocs()))
for child_doc in otherdoc.getChildDocs():
if actual_child_docs.has_key(child_doc.id):
#update child
actual_child_docs[child_doc.id].update(child_doc)
else:
#new child
self.addChild(child_doc.clone())
class SOLRDocumentFactory(object):
"""Class with methods to create SOLRDocument instances that fits on solr core"""
def __init__(self, solr):
"""Initializes the instance with solr core"""
self.solr = solr
def _fromXMLDoc(self, xmldoc):
id_in_record = False
#Create a new document with a fake id, unfortunately id field is not necessarly in the first position so I should iterate all fields to find it before reading other fields. In this way I can set it later. The counterpart is that I have to enforce that id field exists in another way.
doc = SOLRDocument(u'changeme', self.solr)
for field in xmldoc:
if field.tag == 'field':
fieldname = field.get('name')
if not self.solr.fields.has_key(fieldname):
raise SOLRDocumentError, "Field %s does not exist in schema" % fieldname
if fieldname == self.solr.id_field:
id_in_record = True
if field.get('null') == 'true':
doc.setField(fieldname, None)
else:
value = field.text
# Note that when there is no text field.text returns None, not ''
# Let's transform it in '' because Nulls are already managed separately
value = u'' if value is None else value
try:
if fieldname == self.solr.id_field:
doc.setField(fieldname, self.solr.fields[fieldname].type.deserialize(unicode(value)))
else:
doc.appendFieldValue(fieldname, self.solr.fields[fieldname].type.deserialize(unicode(value)))
except ValueError as e:
raise SOLRDocumentError("%s" % e)
elif field.tag == 'doc':
doc.addChild(self._fromXMLDoc(field))
else:
raise SOLRDocumentError, "Invalid tag {0} in doc".format(field.tag)
if not id_in_record:
raise SOLRDocumentError, "Missing unique id field in doc"
return doc
def fromXML(self, fh):
"""Returns a generator over SOLRDocument instances from an xml document read from the file like object fh. Fields are checked against solr schema and if not valid a SOLRDocumentXMLError exception is raised."""
doc_depth = 0
for (event, element) in ET.iterparse(fh, events=('start', 'end')):
if event == 'start':
if element.tag == 'doc':
doc_depth += 1
elif event == 'end':
if element.tag == 'doc':
doc_depth -= 1
elif element.tag in ('field', 'add'):
pass
else:
raise SOLRDocumentError, "Invalid tag {0}".format(element.tag)
if element.tag == 'doc' and event == 'end' and doc_depth == 0:
try:
yield self._fromXMLDoc(element)
except SOLRDocumentError, e:
#Transform document errors in warnings to continue to next
warnings.warn("%s" % e, SOLRDocumentWarning)
| 40.083333 | 291 | 0.589561 | 1,065 | 9,139 | 4.956808 | 0.243192 | 0.031824 | 0.023868 | 0.017049 | 0.15628 | 0.124266 | 0.07312 | 0.054935 | 0.044327 | 0.021974 | 0 | 0.002228 | 0.312397 | 9,139 | 227 | 292 | 40.259912 | 0.837842 | 0.094759 | 0 | 0.191617 | 0 | 0 | 0.052931 | 0.002847 | 0 | 0 | 0 | 0 | 0.005988 | 0 | null | null | 0.017964 | 0.02994 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2ff8bf398acab99da30d643bdc618ebcc561b20a | 14,054 | py | Python | testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>2</version>
<name>TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging</name>
<primitive_test_id/>
<primitive_test_name>WIFIAgent_Get</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To check if the channel utilization marker CHUTIL_2 logging is happening according to the log interval set with Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval.</synopsis>
<groups_id/>
<execution_time>20</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
<box_type>RPI</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIAGENT_148</test_case_id>
<test_objective>To check if the channel utilization marker CHUTIL_2 logging is happening according to the log interval set with Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval.</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband, RPI</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>ParamName : Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval
ParamValue : 30 or 60
Type : int
</input_parameters>
<automation_approch>1. Load the modules
2. Get the initial value of Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval.
3. If the initial value is 30, set it to 60 else set the value of Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval to 30.
4. Check if the log file wifihealth.txt is present under /rdklogs/logs.
5. Get the initial count of the telemetry marker CHUTIL_2 and store it.
6. Sleep for a wait time of sum of initial value of Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval and the new value set.
7. After the wait time, check the final count of the telemetry marker CHUTIL_2 and compute the difference with the initial value.
8. The difference should be greater than or equal to 2.
9. Revert Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval to initial value.
10. Unload the modules.</automation_approch>
<expected_output>The channel utilization marker CHUTIL_2 logging should happen according to the log interval set with Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval.</expected_output>
<priority>High</priority>
<test_stub_interface>wifiagent</test_stub_interface>
<test_script>TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging</test_script>
<skipped>No</skipped>
<release_version>M93</release_version>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
def getLogFileTotalLinesCount(tdkTestObj, string, step):
cmd = "grep -ire " + "\"" + string + "\" " + "/rdklogs/logs/wifihealth.txt | wc -l";
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "\n*********************************************";
print "TEST STEP %d : Get the number of log lines currently present" %step;
print "EXPECTED RESULT %d : Should get the number of log lines currently present" %step;
print "Query : %s" %cmd;
count = 0;
if expectedresult in actualresult:
count = int(tdkTestObj.getResultDetails().strip().replace("\\n", ""));
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: Successfully captured the number of log lines present : %d" %(step, count);
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: Failed to capture the number of log lines present : %s" %(step, details);
print "[TEST EXECUTION RESULT] : FAILURE";
print "*********************************************\n";
return count,step;
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("pam","RDKB");
sysObj = tdklib.TDKScriptingLibrary("sysutil","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging');
sysObj.configureTestCase(ip,port,'TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging');
#Get the result of connection with test component and DUT
loadmodulestatus=obj.getLoadModuleResult();
sysutilloadmodulestatus=sysObj.getLoadModuleResult();
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in sysutilloadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
sysObj.setLoadModuleStatus("SUCCESS");
expectedresult="SUCCESS";
step = 1;
#Check whether the wifihealth.txt file is present or not
tdkTestObj = sysObj.createTestStep('ExecuteCmd');
cmd = "[ -f /rdklogs/logs/wifihealth.txt ] && echo \"File exist\" || echo \"File does not exist\"";
tdkTestObj.addParameter("command",cmd);
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
print "\nTEST STEP %d: Check for wifihealth log file presence" %step;
print "EXPECTED RESULT %d:wifihealth log file should be present" %step;
if details == "File exist":
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d:wifihealth log file is present" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
step = step + 1;
#Get the value of Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval
tdkTestObj = obj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
initial_value = tdkTestObj.getResultDetails();
print "\nTEST STEP %d: Get the TELEMETRY Channel Utility LogInterval from Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval" %step;
print "EXPECTED RESULT %d: Should get the TELEMETRY Channel Utility LogInterval from Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval" %step;
if expectedresult in actualresult and initial_value != "":
DeflogInt = int(initial_value);
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: TELEMETRY Channel Utility LogInterval: %d" %(step,DeflogInt);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if DeflogInt == 30:
newlogInt = "60";
else:
newlogInt = "30";
#Set the LogInterval to newlogInt, the set is cross checked with get
step = step + 1;
tdkTestObj = obj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval");
tdkTestObj.addParameter("ParamValue",newlogInt);
tdkTestObj.addParameter("Type","int");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "\nTEST STEP %d: Set the TELEMETRY Channel Utility LogInterval to %ss" %(step, newlogInt);
print "EXPECTED RESULT %d: Should set the TELEMETRY Channel Utility LogInterval to %ss" %(step, newlogInt);
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: TELEMETRY Channel Utility LogInterval: %s" %(step,details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
print "\nGet the number of log lines \"CHUTIL_2\" in /rdklogs/logs/wifihealth.txt";
step = step + 1;
tdkTestObj1 = sysObj.createTestStep('ExecuteCmd');
log = "CHUTIL_2";
no_of_lines_initial,step = getLogFileTotalLinesCount(tdkTestObj1, log, step);
print "The initial number of log lines \"CHUTIL_2\" in wifihealth.txt is : %d" %no_of_lines_initial;
#Sleeping for initial telemetry interval DeflogInt + newlogInt
sleep_time = DeflogInt + int(newlogInt);
print "\nSleeping for duration : %d to check if the logging is happening according to the new log interval set" %sleep_time;
sleep(sleep_time);
print "\nGet the final number of log lines \"CHUTIL_2\" in /rdklogs/logs/wifihealth.txt";
step = step + 1;
tdkTestObj1 = sysObj.createTestStep('ExecuteCmd');
log = "CHUTIL_2";
no_of_lines_final,step = getLogFileTotalLinesCount(tdkTestObj1, log, step);
print "The initial number of log lines \"CHUTIL_2\" in wifihealth.txt is : %d" %no_of_lines_final;
step = step + 1;
difference = no_of_lines_final - no_of_lines_initial;
print "\nThe CHUTIL_2 log lines can be >= 2, after accounting for the initial log interval and the new log interval set";
print "TEST STEP %d: Should get CHUTIL_2 markers count greater than or equal to 2" %step;
print "EXPECTED RESULT %d: The CHUTIL_2 markers count should be greater than or equal to 2" %step;
if difference >= 2:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: Number of new CHUTIL_2 markers are : %d" %(step, difference);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: Number of new CHUTIL_2 markers are : %d" %(step, difference);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: Set operation failed" %(step);
#Get the result of execution
print "[TEST EXECUTION RESULT] :FAILURE";
#Revert the Value
step = step + 1;
tdkTestObj = obj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval");
tdkTestObj.addParameter("ParamValue",initial_value);
tdkTestObj.addParameter("Type","int");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "\nTEST STEP %d: Revert the TELEMETRY Channel Utility LogInterval to initial value" %step;
print "EXPECTED RESULT %d: Should revert the TELEMETRY Channel Utility LogInterval to initial value" %step;
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: Revert successful" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: Revertion failed" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: TELEMETRY Channel Utility LogInterval: %s" %(step,details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d:wifihealth log file is not present" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] :FAILURE";
obj.unloadModule("pam")
sysObj.unloadModule("sysutil");
else:
print "Failed to load module";
obj.setLoadModuleStatus("FAILURE");
sysObj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 51.291971 | 221 | 0.674897 | 1,615 | 14,054 | 5.76904 | 0.19195 | 0.011592 | 0.025545 | 0.040571 | 0.578834 | 0.541913 | 0.518193 | 0.498444 | 0.459053 | 0.443061 | 0 | 0.007832 | 0.218728 | 14,054 | 273 | 222 | 51.479853 | 0.84071 | 0.110431 | 0 | 0.510067 | 0 | 0.026846 | 0.37698 | 0.078108 | 0.006711 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.013423 | null | null | 0.315436 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2ffb1e0056539f8edf4a7ae850e5442548db09df | 1,833 | py | Python | fern/models.py | edilio/dental | 3fa6b453939c7536883d1036fd414b3fae8977d7 | [
"MIT"
] | 1 | 2016-03-14T18:56:06.000Z | 2016-03-14T18:56:06.000Z | fern/models.py | edilio/dental | 3fa6b453939c7536883d1036fd414b3fae8977d7 | [
"MIT"
] | null | null | null | fern/models.py | edilio/dental | 3fa6b453939c7536883d1036fd414b3fae8977d7 | [
"MIT"
] | null | null | null | import datetime
from django.db import models
from django.utils import timezone
class Source(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
GENDER_CHOICES = (
('F', 'Female'),
('M', 'Male'),
)
class Patient(models.Model):
fullname = models.CharField(max_length=50)
phone = models.CharField(max_length=15, null=True, blank=True, editable=False)
source = models.ForeignKey(Source)
birth_date = models.DateField(null=True, blank=True)
gender = models.CharField(max_length=1, null=True, blank=True, choices=GENDER_CHOICES)
tour_date = models.DateField(default=timezone.now)
new_patient = models.BooleanField(default=True)
@property
def age(self):
if self.birth_date:
bday = self.birth_date
d = datetime.date.today()
return (d.year - bday.year) - int((d.month, d.day) < (bday.month, bday.day))
else:
return None
def __unicode__(self):
return self.fullname
TREATMENT_OPTIONS = (
(0, 'Implants'),
(1, 'Crowns'),
(2, 'Laser'),
(3, 'Surgery from July-2015')
)
class HappyBirthdayPatient(models.Model):
fullname = models.CharField(max_length=50)
birth_date = models.DateField()
address = models.CharField(max_length=120)
treatment = models.PositiveSmallIntegerField(default=0, choices=TREATMENT_OPTIONS)
@property
def birth_date_month(self):
return self.birth_date.strftime('%B')
@property
def age(self):
if self.birth_date:
bday = self.birth_date
d = datetime.date.today()
return (d.year - bday.year) - int((d.month, d.day) < (bday.month, bday.day))
else:
return None
def __unicode__(self):
return self.fullname | 26.185714 | 90 | 0.642662 | 225 | 1,833 | 5.088889 | 0.328889 | 0.062882 | 0.094323 | 0.125764 | 0.40524 | 0.361572 | 0.361572 | 0.361572 | 0.282969 | 0.282969 | 0 | 0.014979 | 0.235134 | 1,833 | 70 | 91 | 26.185714 | 0.801712 | 0 | 0 | 0.45283 | 0 | 0 | 0.029989 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.056604 | 0.075472 | 0.603774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2ffe9071010ecb101be8a813c44cbb919e9b3376 | 267 | py | Python | quoters/check_connection.py | suman-kr/random-quotes | 5d3241dc9647a7b16a93dece12e9f214072e64c3 | [
"MIT"
] | 22 | 2020-01-24T08:59:18.000Z | 2022-02-09T02:35:20.000Z | quoters/check_connection.py | suman-kr/random-quotes | 5d3241dc9647a7b16a93dece12e9f214072e64c3 | [
"MIT"
] | 9 | 2021-04-07T00:57:09.000Z | 2022-03-31T10:18:06.000Z | quoters/check_connection.py | suman-kr/random-quotes | 5d3241dc9647a7b16a93dece12e9f214072e64c3 | [
"MIT"
] | 1 | 2021-06-06T19:00:55.000Z | 2021-06-06T19:00:55.000Z | import socket
from quoters.constants import CONN_URL
def is_connected():
try:
sock_conn = socket.create_connection((CONN_URL, 80))
if(sock_conn):
sock_conn.close()
return True
except OSError:
pass
return False
| 20.538462 | 60 | 0.632959 | 33 | 267 | 4.909091 | 0.69697 | 0.148148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 0.29588 | 267 | 12 | 61 | 22.25 | 0.851064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.090909 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2ffffeae1ee94afdecc472f7586e718b4d8018e7 | 4,733 | py | Python | save_combine.py | firebrettbrown/bbgm | 31d41cef2175be452793866e1119150518936120 | [
"MIT"
] | 11 | 2019-06-25T17:20:48.000Z | 2020-07-04T03:09:17.000Z | save_combine.py | firebrettbrown/bbgm | 31d41cef2175be452793866e1119150518936120 | [
"MIT"
] | null | null | null | save_combine.py | firebrettbrown/bbgm | 31d41cef2175be452793866e1119150518936120 | [
"MIT"
] | 10 | 2019-06-28T06:26:28.000Z | 2022-01-17T18:12:36.000Z | import argparse
import os
import sys
import shutil
import subprocess
import re
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import pickle
from selenium import webdriver
tables = {}
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--folder', default='combine',type=str, help='folder to save combine stats')
args = parser.parse_args()
for folder in [args.folder]:
try:
os.mkdir(folder)
print("Directory {} created".format(folder))
except FileExistsError:
pass
driver = webdriver.Firefox()
for year in range(20):
target = os.path.join(args.folder,str(year) + '.html')
# get the files
if not os.path.exists(target):
driver.get('https://stats.nba.com/draft/combine-anthro/#!?SeasonYear=20{:02d}-{:02d}'.format(year,year+1))
with open(target, 'w') as f:
f.write(driver.page_source)
driver.get('http://google.com')
# # load the data
# with open(target,'rt') as fp:
# data = fp.read()
# # collect all the tables
# m = re.findall(r'<!--[ \n]*(<div[\s\S\r]+?</div>)[ \n]*-->',data)
# m2 = re.findall(r'(<div class="table_outer_container">[ \n]*<div class="overthrow table_container" id="div_roster">[\s\S\r]+?</table>[ \n]*</div>[ \n]*</div>)',data)
# m3 = re.findall(r'(<div class="table_outer_container">[ \n]*<div class="overthrow table_container" id="div_contracts">[\s\S\r]+?</table>[ \n]*</div>[ \n]*</div>)',datac)
# m = m2 + m + m3
# print(target,len(m))
# tables[team] = {}
# for test_table in m:
# try:
# soup = BeautifulSoup(test_table,features="lxml")
# table_id = str(soup.find('table').get('id'))
# if table_id == ['team_and_opponent']:
# continue
# soup.findAll('tr')
# table_size = {'shooting':2,'pbp':1,'playoffs_shooting':2,'playoffs_pbp':1,'contracts':1}
# # use getText()to extract the text we need into a list
# headers = [th.getText() for th in soup.findAll('tr')[table_size.get(table_id,0)].findAll('th')]
# # exclude the first column as we will not need the ranking order from Basketball Reference for the analysis
# start_col = 1
# if table_id in ['contracts','injury']:
# start_col = 0
# headers = headers[start_col:]
# rows = soup.findAll('tr')[start_col:]
# player_stats = [[td.getText() for td in rows[i].findAll('td')]
# for i in range(len(rows))]
# if table_id in ['contracts']:
# player_status = [[td.get('class') for td in rows[i].findAll('td')]
# for i in range(len(rows))]
# status_array = []
# for status in player_status:
# if len(status) > 0:
# s2 = [False] + [s[-1] in ['salary-pl','salary-et','salary-tm'] for s in status[1:]]
# else:
# s2 = np.array([])
# status_array.append(s2)
# status_array = np.array(status_array)
# player_stats_new = []
# for a,b in zip(status_array,player_stats):
# b_new = []
# for c,d in zip(a,b):
# b_new.append(d if not c else '')
# player_stats_new.append(b_new)
# player_stats = player_stats_new
# if table_id in ['contracts','injury']:
# player_names = [[td.getText() for td in rows[i].findAll('th')]
# for i in range(len(rows))]
# player_stats = [a + b for a,b in zip(player_names[1:],player_stats[1:])]
# headers[0] = 'Name'
# stats = pd.DataFrame(player_stats, columns = headers).set_index('Name')
# if table_id in ['contracts']:
# stats = stats.drop(['Player'])
# stats = stats.iloc[:stats.index.get_loc('')]
# # drop nan
# stats = stats[~ stats.index.isin([None])]
# # convert to float
# obj_cols = stats.loc[:, stats.dtypes == object]
# conv_cols = obj_cols.apply(pd.to_numeric, errors = 'ignore')
# stats.loc[:, stats.dtypes == object] = conv_cols
# #print(table_id,stats.index)
# tables[team][table_id]= stats.fillna('')
# except:
# pass
# #print('FAILED TO PARSE ' +str(soup.find('table').get('id') ))
# with open('combine_{}.pkl'.format(args.year),'wb') as fp:
# pickle.dump(tables,fp) | 41.156522 | 175 | 0.539193 | 596 | 4,733 | 4.172819 | 0.318792 | 0.04423 | 0.018094 | 0.017692 | 0.232006 | 0.190189 | 0.145155 | 0.118617 | 0.090873 | 0.090873 | 0 | 0.0094 | 0.30319 | 4,733 | 115 | 176 | 41.156522 | 0.744694 | 0.760828 | 0 | 0 | 0 | 0.034483 | 0.149763 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.034483 | 0.37931 | 0 | 0.37931 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
64052ebf346968245c36b8f3711569edc3bcb093 | 454 | py | Python | navigationCommand.py | islam-shamiul/Selenium_python | ee4cea5e58ab9afa88b3ba3e70aef52ec4808d4a | [
"MIT"
] | null | null | null | navigationCommand.py | islam-shamiul/Selenium_python | ee4cea5e58ab9afa88b3ba3e70aef52ec4808d4a | [
"MIT"
] | null | null | null | navigationCommand.py | islam-shamiul/Selenium_python | ee4cea5e58ab9afa88b3ba3e70aef52ec4808d4a | [
"MIT"
] | 1 | 2020-07-21T08:43:25.000Z | 2020-07-21T08:43:25.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(executable_path="E:/SQA/chromedriver_win32/chromedriver.exe")
driver.get("http://newtours.demoaut.com/")
time.sleep(5)
print(driver.title)
driver.get("https://www.google.com/")
time.sleep(5)
print(driver.title)
driver.back()
time.sleep(5)
print(driver.title)
driver.forward()
time.sleep(5)
print(driver.title)
driver.close() | 15.133333 | 87 | 0.762115 | 66 | 454 | 5.212121 | 0.469697 | 0.104651 | 0.116279 | 0.174419 | 0.389535 | 0.389535 | 0.389535 | 0.203488 | 0 | 0 | 0 | 0.014423 | 0.0837 | 454 | 30 | 88 | 15.133333 | 0.8125 | 0 | 0 | 0.470588 | 0 | 0 | 0.204396 | 0.092308 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
641072198fc4166738877f258f40200bcaf10935 | 539 | py | Python | geotrek/outdoor/migrations/0026_auto_20210915_1346.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | 50 | 2016-10-19T23:01:21.000Z | 2022-03-28T08:28:34.000Z | geotrek/outdoor/migrations/0026_auto_20210915_1346.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | 1,422 | 2016-10-27T10:39:40.000Z | 2022-03-31T13:37:10.000Z | geotrek/outdoor/migrations/0026_auto_20210915_1346.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | 46 | 2016-10-27T10:59:10.000Z | 2022-03-22T15:55:56.000Z | # Generated by Django 3.1.13 on 2021-09-15 13:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('outdoor', '0025_merge_ratings_min_max'),
]
operations = [
migrations.RemoveField(
model_name='site',
name='ratings_max',
),
migrations.AddField(
model_name='site',
name='ratings',
field=models.ManyToManyField(blank=True, related_name='sites', to='outdoor.Rating'),
),
]
| 23.434783 | 96 | 0.588126 | 56 | 539 | 5.517857 | 0.696429 | 0.058252 | 0.084142 | 0.110032 | 0.15534 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052356 | 0.29128 | 539 | 22 | 97 | 24.5 | 0.756545 | 0.085343 | 0 | 0.25 | 1 | 0 | 0.158859 | 0.052953 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
64146f732312850431c198582abd3211505c6c74 | 233 | py | Python | Python/Arquivos/conta_palavras.py | lucsap/APC | 344011552dccdf8210d65cee0531460a1a2d6fae | [
"MIT"
] | null | null | null | Python/Arquivos/conta_palavras.py | lucsap/APC | 344011552dccdf8210d65cee0531460a1a2d6fae | [
"MIT"
] | null | null | null | Python/Arquivos/conta_palavras.py | lucsap/APC | 344011552dccdf8210d65cee0531460a1a2d6fae | [
"MIT"
] | null | null | null | def palavras_repetidas(fileName, word):
file = open(fileName, 'r')
count = 0
for i in file.readlines():
if word in i:
count += 1
print(f'{word} aparece no arquivo {fileName} {count} vez(es).') | 29.125 | 67 | 0.575107 | 32 | 233 | 4.15625 | 0.71875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012121 | 0.291845 | 233 | 8 | 67 | 29.125 | 0.793939 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6416796cf00258667b65d09bd35e52a24ddcd9ba | 145 | py | Python | CodeForces/Bit++.py | PratikGarai/Coding-Challenges | af5f4458505b26538ec2d7dc0ca09aa236b2d01c | [
"MIT"
] | null | null | null | CodeForces/Bit++.py | PratikGarai/Coding-Challenges | af5f4458505b26538ec2d7dc0ca09aa236b2d01c | [
"MIT"
] | 2 | 2020-10-01T16:13:37.000Z | 2020-10-30T19:12:38.000Z | CodeForces/Bit++.py | PratikGarai/Coding-Challenges | af5f4458505b26538ec2d7dc0ca09aa236b2d01c | [
"MIT"
] | 6 | 2020-10-03T09:04:26.000Z | 2022-01-09T11:57:40.000Z | n = int(input())
x = 0
for i in range(n):
l = set([j for j in input()])
if "+" in l:
x += 1
else :
x -= 1
print(x)
| 12.083333 | 33 | 0.4 | 27 | 145 | 2.148148 | 0.592593 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035294 | 0.413793 | 145 | 11 | 34 | 13.181818 | 0.647059 | 0 | 0 | 0 | 0 | 0 | 0.006897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
64199ae9b43b9d4ae5575ea15f308f73ddc6c547 | 889 | py | Python | geotrek/feedback/urls.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | null | null | null | geotrek/feedback/urls.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | null | null | null | geotrek/feedback/urls.py | GeotrekCE/Geotrek | c1393925c1940ac795ab7fc04819cd8c78bc79fb | [
"BSD-2-Clause"
] | null | null | null | from django.conf import settings
from django.urls import path, register_converter
from mapentity.registry import registry
from rest_framework.routers import DefaultRouter
from geotrek.common.urls import LangConverter
from geotrek.feedback import models as feedback_models
from .views import CategoryList, FeedbackOptionsView, ReportAPIViewSet
register_converter(LangConverter, 'lang')
app_name = 'feedback'
urlpatterns = [
path('api/<lang:lang>/feedback/categories.json', CategoryList.as_view(), name="categories_json"),
path('api/<lang:lang>/feedback/options.json', FeedbackOptionsView.as_view(), name="options_json"),
]
router = DefaultRouter(trailing_slash=False)
router.register(r'^api/(?P<lang>[a-z]{2})/reports', ReportAPIViewSet, basename='report')
urlpatterns += router.urls
urlpatterns += registry.register(feedback_models.Report, menu=settings.REPORT_MODEL_ENABLED)
| 38.652174 | 102 | 0.80315 | 109 | 889 | 6.431193 | 0.449541 | 0.028531 | 0.031384 | 0.042796 | 0.065621 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001227 | 0.08324 | 889 | 22 | 103 | 40.409091 | 0.858896 | 0 | 0 | 0 | 0 | 0 | 0.172103 | 0.121485 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.411765 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
6423b272fd3f08098ebf8d314748cf75a6207bb0 | 1,157 | py | Python | accounts/accounts/doctype/purchase_invoice/purchase_invoice.py | sumaiya2908/Accounting-App | 4de21b6a5d69ab422483303c4a743f3e323783a3 | [
"MIT"
] | null | null | null | accounts/accounts/doctype/purchase_invoice/purchase_invoice.py | sumaiya2908/Accounting-App | 4de21b6a5d69ab422483303c4a743f3e323783a3 | [
"MIT"
] | null | null | null | accounts/accounts/doctype/purchase_invoice/purchase_invoice.py | sumaiya2908/Accounting-App | 4de21b6a5d69ab422483303c4a743f3e323783a3 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Summayya and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
from ..gl_entry.gl_entry import create_gl_entry
class PurchaseInvoice(Document):
def validate(self):
self.set_status()
self.set_total_amount()
def set_total_amount(self):
self.amount = 0
self.total_quantity = 0
self.amount = sum(item.amount for item in self.item)
self.total_quantity = sum(item.quantity for item in self.item)
def set_status(self):
'''
Draft: 0
Submitted: 1, Paid or Unpaid or Overdue
Cancelled: 2
'''
if self.is_new():
if self.get('amended_form'):
self.status = 'Draft'
return
if self.docstatus == 1:
self.status = 'Unpaid'
def on_submit(self):
create_gl_entry(self, "Purchase Invoice",
self.credit_account, self.debit_account)
def on_cancel(self):
create_gl_entry(self, "Purchase Invoice",
self.debit_account, self.credit_account) | 26.906977 | 70 | 0.611063 | 143 | 1,157 | 4.776224 | 0.412587 | 0.051245 | 0.057101 | 0.038067 | 0.166911 | 0.11713 | 0.11713 | 0.11713 | 0 | 0 | 0 | 0.012346 | 0.299914 | 1,157 | 43 | 71 | 26.906977 | 0.830864 | 0.146932 | 0 | 0.083333 | 0 | 0 | 0.05814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.208333 | false | 0 | 0.083333 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6425b9d5a46f13b1d3e986f11cbc61c7dd984f83 | 499 | py | Python | setup.py | dgaston/kvasir | 5e938193a936944c765b8dd78600acc258fedbab | [
"MIT"
] | null | null | null | setup.py | dgaston/kvasir | 5e938193a936944c765b8dd78600acc258fedbab | [
"MIT"
] | null | null | null | setup.py | dgaston/kvasir | 5e938193a936944c765b8dd78600acc258fedbab | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='Kvasir',
version='0.9',
description='Kvasir OpenShift App',
author='Dan Gaston',
author_email='admin@deaddriftbio.com',
url='https://www.python.org/community/sigs/current/distutils-sig',
install_requires=['Flask', 'MarkupSafe', 'Flask-MongoEngine', 'Flask-Script',
'numpy', 'Flask-OpenID', 'Cython', 'Flask-Login', 'Flask-Mail',
'pytz', 'pyzmq', 'scipy', 'gemini'],
)
| 38.384615 | 87 | 0.593186 | 52 | 499 | 5.653846 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005236 | 0.234469 | 499 | 12 | 88 | 41.583333 | 0.764398 | 0 | 0 | 0 | 0 | 0 | 0.456914 | 0.044088 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
64286b89fc025e5e539006ca29420f5163f9fba7 | 19,894 | py | Python | cytomine-applications/landmark_model_builder/validation.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | null | null | null | cytomine-applications/landmark_model_builder/validation.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | null | null | null | cytomine-applications/landmark_model_builder/validation.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2015. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Vandaele Rémy <remy.vandaele@ulg.ac.be>"
__contributors__ = ["Marée Raphaël <raphael.maree@ulg.ac.be>"]
__copyright__ = "Copyright 2010-2015 University of Liège, Belgium, http://www.cytomine.be/"
from multiprocessing import Pool
import numpy as np
import scipy.ndimage as snd
from cytomine import models
from scipy import misc
from shapely.geometry import Point
from sklearn.ensemble import ExtraTreesClassifier
from download import *
"""
This function extracts the coordinates of a given term from an offline
cytomine images/coordinates repository.
"""
def getcoords(repository, termid):
if not repository.endswith('/'):
repository = repository + '/'
liste = os.listdir(repository)
x = []
y = []
xp = []
yp = []
im = []
for f in os.listdir(repository):
if (f.endswith('.txt')):
filename = repository + f
F = open(filename, 'rb')
L = F.readlines()
imageid = int(f.rstrip('.txt'))
for j in range(len(L)):
line = L[j].rstrip()
v = line.split(' ')
if (int(v[0]) == termid):
x.append(int(float(v[1])))
y.append(int(float(v[2])))
xp.append(float(v[3]))
yp.append(float(v[4]))
im.append(imageid)
F.close()
return np.array(x), np.array(y), np.array(xp), np.array(yp), np.array(im)
"""
This function returns a 8 bit grey-value image given its identifier in the
offline cytomine repository.
"""
def readimage(repository, idimage, image_type='jpg'):
if (not repository.endswith('/')):
repository = repository + '/'
if (image_type == 'png'):
IM = misc.imread('%s%d.png' % (repository, idimage), flatten=True)
elif (image_type == 'bmp'):
IM = misc.imread('%s%d.bmp' % (repository, idimage), flatten=True)
elif (image_type == 'jpg'):
IM = misc.imread('%s%d.jpg' % (repository, idimage), flatten=True)
IM = np.double(IM)
IM = IM - np.mean(IM)
IM = IM / np.std(IM)
return IM
"""
Given the classifier clf, this function will try to find the landmark on the
image current
"""
def searchpoint(repository, current, clf, mx, my, cm, depths, window_size, image_type,
npred):
simage = readimage(repository, current, image_type)
(height, width) = simage.shape
P = np.random.multivariate_normal([mx, my], cm, npred)
x_v = np.round(P[:, 0] * width)
y_v = np.round(P[:, 1] * height)
height = height - 1
width = width - 1
n = len(x_v)
pos = 0
maxprob = -1
maxx = []
maxy = []
# maximum number of points considered at once in order to not overload the
# memory.
step = 100000
for index in range(len(x_v)):
xv = x_v[index]
yv = y_v[index]
if (xv < 0):
x_v[index] = 0
if (yv < 0):
y_v[index] = 0
if (xv > width):
x_v[index] = width
if (yv > height):
y_v[index] = height
while (pos < n):
xp = np.array(x_v[pos:min(n, pos + step)])
yp = np.array(y_v[pos:min(n, pos + step)])
DATASET = build_dataset_image(simage, window_size, xp, yp, depths)
pred = clf.predict_proba(DATASET)
pred = pred[:, 1]
maxpred = np.max(pred)
if (maxpred >= maxprob):
positions = np.where(pred == maxpred)
positions = positions[0]
xsup = xp[positions]
ysup = yp[positions]
if (maxpred > maxprob):
maxprob = maxpred
maxx = xsup
maxy = ysup
else:
maxx = np.concatenate((maxx, xsup))
maxy = np.concatenate((maxy, ysup))
pos = pos + step
return np.median(maxx), np.median(maxy), height - np.median(maxy)
"""
0-padding of an image IM of wp pixels on all sides
"""
def makesize(IM, wp):
(h, w) = IM.shape
IM2 = np.zeros((h + 2 * wp, w + 2 * wp))
IM2[wp:wp + h, wp:wp + w] = IM
return IM2
"""
Build the dataset on a single image
"""
def build_dataset_image(IM, wp, x_v, y_v, depths):
swp = (2 * wp) ** 2
wp1 = wp + 1
ndata = len(x_v)
dwp = 2 * wp
ndepths = len(depths)
DATASET = np.zeros((ndata, swp * ndepths))
REP = np.zeros(ndata)
images = {}
for z in xrange(ndepths):
images[z] = makesize(snd.zoom(IM, depths[z]), wp)
X = [[int(x * depths[z]) for x in x_v] for z in xrange(ndepths)]
Y = [[int(y * depths[z]) for y in y_v] for z in xrange(ndepths)]
cub = np.zeros((ndepths, dwp, dwp))
for j in xrange(ndata):
x = x_v[j]
y = y_v[j]
for z in xrange(ndepths):
im = images[z]
cub[z, :, :] = im[Y[z][j]:Y[z][j] + dwp, X[z][j]:X[z][j] + dwp]
DATASET[j, :] = cub.flatten()
return DATASET
def rotate_coordinates(repository, num, x, y, angle, image_type):
image = readimage(repository, num, image_type)
image_rotee = snd.rotate(image, -angle)
(h, w) = image.shape
(hr, wr) = image_rotee.shape
angle_rad = angle * (np.pi / 180.)
c = np.cos(angle_rad)
s = np.sin(angle_rad)
x = x - (w / 2.)
y = y - (h / 2.)
xr = ((x * c) - (y * s)) + (wr / 2.)
yr = ((x * s) + (y * c)) + (hr / 2.)
return xr.tolist(), yr.tolist(), image_rotee
def dataset_image_rot(repository, Xc, Yc, R, RMAX, proportion, step, i, ang, depths, window_size, image_type):
# print i
x_v = []
y_v = []
REP = []
IMGS = []
deuxpi = 2. * np.pi
for x in np.arange(np.int(Xc) - R, np.int(Xc) + R + 1, step):
for y in np.arange(np.int(Yc) - R, np.int(Yc) + R + 1, step):
if (np.linalg.norm([Xc - x, Yc - y]) <= R):
x_v.append(x)
y_v.append(y)
REP.append(1)
IMGS.append(i)
n = len(x_v)
image = readimage(repository, i, image_type)
(height, width) = image.shape
height = height - 1
width = width - 1
for t in range(int(round(proportion * n))):
angle = np.random.ranf() * deuxpi
r = R + (np.random.ranf() * (RMAX - R))
tx = int(r * np.cos(angle))
ty = int(r * np.sin(angle))
x_v.append(min(width, Xc + tx))
y_v.append(min(height, Yc + ty))
REP.append(0)
IMGS.append(i)
(x_r, y_r, im) = rotate_coordinates(repository, i, np.round(np.array(x_v)), np.round(np.array(y_v)), angle,
image_type)
(hr, wr) = im.shape
hr = hr - 1
wr = wr - 1
x_r = np.round(x_r)
y_r = np.round(y_r)
for index in range(len(x_r)):
xr = x_r[index]
yr = y_r[index]
if (xr < 0):
x_r[index] = 0
if (yr < 0):
y_r[index] = 0
if (xr > wr):
x_r[index] = wr
if (yr > hr):
y_r[index] = hr
return build_dataset_image(im, window_size, x_r, y_r, depths), REP, IMGS
def mp_helper_rot(job_args):
return dataset_image_rot(*job_args)
def build_datasets_rot_mp(repository, imc, Xc, Yc, R, RMAX, proportion, step, ang, window_size, depths, nimages,
image_type, njobs):
TOTDATA = None
TOTREP = None
IMGS = []
X = None
Y = None
deuxpi = 2. * np.pi
p = Pool(njobs)
job_args = [(
repository, Xc[i], Yc[i], R, RMAX, proportion, step, imc[i], (np.random.ranf() * 2 * ang) - ang, depths,
window_size, image_type) for i in range(nimages)]
T = p.map(mp_helper_rot, job_args)
p.close()
p.join()
return T
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
if __name__ == "__main__":
parameters = {
'cytomine_host': '',
'cytomine_public_key': '',
'cytomine_private_key': '',
'cytomine_id_software': 0,
'cytomine_base_path': '',
'cytomine_working_path': None,
'cytomine_id_term': None,
'cytomine_id_project': None,
'image_type': '',
'model_njobs': None,
'model_R': None,
'model_RMAX': None,
'model_P': None,
'model_npred': None,
'model_ntrees': None,
'model_ntimes': None,
'model_angle': None,
'model_depth': None,
'model_step': None,
'model_wsize': None,
'verbose': False
}
p = optparse.OptionParser(description='Cytomine Landmark Detection : Model building',
prog='Cytomine Landmark Detector : Model builder', version='0.1')
p.add_option('--cytomine_host', type="string", default='beta.cytomine.be', dest="cytomine_host",
help="The Cytomine host (eg: beta.cytomine.be, localhost:8080)")
p.add_option('--cytomine_public_key', type="string", default='XXX', dest="cytomine_public_key",
help="Cytomine public key")
p.add_option('--cytomine_private_key', type="string", default='YYY', dest="cytomine_private_key",
help="Cytomine private key")
p.add_option('--cytomine_id_software', type="int", dest="cytomine_id_software",
help="The Cytomine software identifier")
p.add_option('--cytomine_base_path', type="string", default='/api/', dest="cytomine_base_path",
help="Cytomine base path")
p.add_option('--cytomine_working_path', default="/tmp/", type="string", dest="cytomine_working_path",
help="The working directory (eg: /tmp)")
p.add_option('--cytomine_id_project', type="int", dest="cytomine_id_project",
help="The Cytomine project identifier")
p.add_option('--cytomine_id_term', type='int', dest='cytomine_id_term',
help="The identifier of the term to create a detection model for")
p.add_option('--image_type', type='string', default='jpg', dest='image_type',
help="The type of the images that will be used (jpg, bmp, png,...)")
p.add_option('--model_njobs', type='int', default=1, dest='model_njobs',
help="The number of processors used for model building")
p.add_option('--model_R', type='int', default=6, dest='model_R', help="Max distance for extracting landmarks")
p.add_option('--model_RMAX', type='int', default=200, dest='model_RMAX',
help="Max distance for extracting non-landmarks")
p.add_option('--model_P', type='float', default=3, dest='model_P', help="Proportion of non-landmarks")
p.add_option('--model_npred', type='int', default=50000, dest='model_npred',
help="Number of pixels extracted for prediction")
p.add_option('--model_ntrees', type='int', default=50, dest='model_ntrees', help="Number of trees")
p.add_option('--model_ntimes', type='int', default=3, dest='model_ntimes',
help="Number of rotations to apply to the image")
p.add_option('--model_angle', type='float', default=30, dest='model_angle', help="Max angle for rotation")
p.add_option('--model_depth', type='int', default=5, dest='model_depth', help="Number of resolutions to use")
p.add_option('--model_step', type='int', default=1, dest='model_step',
help="Landmark pixels will be extracted in a grid (x-R:step:x+r,y-R:step:y+R) around the landmark")
p.add_option('--model_wsize', type='int', default=8, dest='model_wsize', help="Window size")
p.add_option('--validation_K', type='int', default=10, dest='validation_K', help="K for K-fold cross validation")
p.add_option('--verbose', type="string", default="0", dest="verbose", help="Turn on (1) or off (0) verbose mode")
options, arguments = p.parse_args(args=sys.argv)
parameters['cytomine_host'] = options.cytomine_host
parameters['cytomine_public_key'] = options.cytomine_public_key
parameters['cytomine_private_key'] = options.cytomine_private_key
parameters['cytomine_id_software'] = options.cytomine_id_software
parameters['cytomine_base_path'] = options.cytomine_base_path
parameters['cytomine_working_path'] = options.cytomine_working_path
parameters['cytomine_id_term'] = options.cytomine_id_term
parameters['cytomine_id_project'] = options.cytomine_id_project
parameters['image_type'] = options.image_type
parameters['model_njobs'] = options.model_njobs
parameters['model_R'] = options.model_R
parameters['model_RMAX'] = options.model_RMAX
parameters['model_P'] = options.model_P
parameters['model_npred'] = options.model_npred
parameters['model_ntrees'] = options.model_ntrees
parameters['model_ntimes'] = options.model_ntimes
parameters['model_angle'] = options.model_angle
parameters['model_depth'] = options.model_depth
parameters['model_step'] = options.model_step
parameters['model_wsize'] = options.model_wsize
parameters['validation_K'] = options.validation_K
parameters['verbose'] = str2bool(options.verbose)
if (not parameters['cytomine_working_path'].endswith('/')):
parameters['cytomine_working_path'] = parameters['cytomine_working_path'] + '/'
cytomine_connection = cytomine.Cytomine(parameters['cytomine_host'], parameters['cytomine_public_key'],
parameters['cytomine_private_key'],
base_path=parameters['cytomine_base_path'],
working_path=parameters['cytomine_working_path'],
verbose=parameters['verbose'])
current_user = cytomine_connection.get_current_user()
run_by_user_job = False
if current_user.algo == False:
user_job = cytomine_connection.add_user_job(parameters['cytomine_id_software'],
parameters['cytomine_id_project'])
cytomine_connection.set_credentials(str(user_job.publicKey), str(user_job.privateKey))
else:
user_job = current_user
run_by_user_job = True
job = cytomine_connection.get_job(user_job.job)
job = cytomine_connection.update_job_status(job, status=job.RUNNING, progress=0,
status_comment="Beginning validation...")
download_images(cytomine_connection, parameters['cytomine_id_project'])
download_annotations(cytomine_connection, parameters['cytomine_id_project'], parameters['cytomine_working_path'])
repository = parameters['cytomine_working_path'] + str(parameters['cytomine_id_project']) + '/'
txt_repository = parameters['cytomine_working_path'] + '%d/txt/' % parameters['cytomine_id_project']
depths = 1. / (2. ** np.arange(parameters['model_depth']))
image_type = parameters['image_type']
(xc, yc, xr, yr, imc) = getcoords(txt_repository, parameters['cytomine_id_term'])
nimages = np.max(xc.shape)
mx = np.mean(xr)
my = np.mean(yr)
P = np.zeros((2, nimages))
P[0, :] = xr
P[1, :] = yr
cm = np.cov(P)
passe = False
for times in range(parameters['model_ntimes']):
if (times == 0):
rangrange = 0
else:
rangrange = parameters['model_angle']
T = build_datasets_rot_mp(repository, imc, xc, yc, parameters['model_R'], parameters['model_RMAX'],
parameters['model_P'], parameters['model_step'], rangrange, parameters['model_wsize'],
depths, nimages, parameters['image_type'], parameters['model_njobs'])
for i in range(len(T)):
(data, rep, img) = T[i]
(height, width) = data.shape
if (not passe):
passe = True
DATA = np.zeros((height * (len(T) + 100) * parameters['model_ntimes'], width))
REP = np.zeros(height * (len(T) + 100) * parameters['model_ntimes'])
IMG = np.zeros(height * (len(T) + 100) * parameters['model_ntimes'])
b = 0
be = height
DATA[b:be, :] = data
REP[b:be] = rep
IMG[b:be] = img
b = be
be = be + height
REP = REP[0:b]
DATA = DATA[0:b, :]
IMG = IMG[0:b]
erreur = []
g = np.random.randint(0, parameters['validation_K'], nimages)
groupes = np.zeros(IMG.shape)
G = {}
for i in range(parameters['validation_K']):
G[i] = []
for i in range(nimages):
t = np.where(IMG == imc[i])
t = t[0]
groupes[t] = g[i]
G[g[i]].append(imc[i])
Xh = {}
Yh = {}
for i in range(nimages):
Xh[imc[i]] = xc[i]
Yh[imc[i]] = yc[i]
for k in range(parameters['validation_K']):
t = np.where(groupes != k)
t = t[0]
TRDATA = DATA[t, :]
TRREP = REP[t]
clf = ExtraTreesClassifier(n_jobs=parameters['model_njobs'], n_estimators=parameters['model_ntrees'])
clf = clf.fit(TRDATA, TRREP)
for j in G[k]:
(x, y, yp) = searchpoint(repository, j, clf, mx, my, cm, depths, parameters['model_wsize'],
parameters['image_type'], parameters['model_npred'])
circle = Point(x, yp)
location = circle.wkt
new_annotation = cytomine_connection.add_annotation(location, j)
cytomine_connection.add_annotation_term(new_annotation.id, term=parameters['cytomine_id_term'],
expected_term=parameters['cytomine_id_term'], rate=1.0,
annotation_term_model=models.AlgoAnnotationTerm)
er = np.linalg.norm([x - Xh[j], y - Yh[j]])
print j, x, y, Xh[j], Yh[j], er
erreur.append(er)
moy = np.mean(erreur)
ste = np.std(erreur)
ec95 = 1.96 * (np.std(erreur) / np.sqrt(nimages))
print parameters['cytomine_id_term'], moy - ec95, moy, moy + ec95
job_parameters = {}
job_parameters['landmark_term'] = parameters['cytomine_id_term']
job_parameters['landmark_r'] = parameters['model_R']
job_parameters['landmark_rmax'] = parameters['model_RMAX']
job_parameters['landmark_p'] = parameters['model_P']
job_parameters['landmark_npred'] = parameters['model_npred']
job_parameters['landmark_ntimes'] = parameters['model_ntimes']
job_parameters['landmark_alpha'] = parameters['model_angle']
job_parameters['landmark_depth'] = parameters['model_depth']
job_parameters['landmark_window_size'] = parameters['model_wsize']
job_parameters['forest_n_estimators'] = parameters['model_ntrees']
job_parameters['forest_max_features'] = 10
job_parameters['forest_min_samples_split'] = 2
job_parameters['validation_K'] = parameters['validation_K']
job_parameters['validation_result_mean'] = moy
if run_by_user_job == False:
job_parameters_values = cytomine_connection.add_job_parameters(user_job.job, cytomine_connection.get_software(
parameters['cytomine_id_software']), job_parameters)
job = cytomine_connection.update_job_status(job, status=job.TERMINATED, progress=100,
status_comment="Validation done.")
| 38.111111 | 120 | 0.595255 | 2,609 | 19,894 | 4.364124 | 0.165964 | 0.047427 | 0.019322 | 0.014491 | 0.208238 | 0.106973 | 0.040488 | 0.025382 | 0.019234 | 0 | 0 | 0.010458 | 0.264602 | 19,894 | 521 | 121 | 38.184261 | 0.767806 | 0.03584 | 0 | 0.056511 | 0 | 0.002457 | 0.184657 | 0.023596 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.007371 | 0.019656 | null | null | 0.004914 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6431a7d544c46cfb92483bf64ab03075de332a4d | 2,477 | py | Python | virtool/validators.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 39 | 2016-10-31T23:28:59.000Z | 2022-01-15T00:00:42.000Z | virtool/validators.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 1,690 | 2017-02-07T23:39:48.000Z | 2022-03-31T22:30:44.000Z | virtool/validators.py | ReeceHoffmann/virtool | f9befad060fe16fa29fb80124e674ac5a9c4f538 | [
"MIT"
] | 25 | 2017-02-08T18:25:31.000Z | 2021-09-20T22:55:25.000Z | import re
from email_validator import validate_email, EmailSyntaxError
from virtool.users.utils import PERMISSIONS
RE_HEX_COLOR = re.compile("^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$")
def strip(value: str) -> str:
"""
Strip flanking whitespace from the passed string. Used to coerce values in Cerberus validators.
:param value: the string to strip
:return: the stripped string
"""
return value.strip()
def is_permission_dict(field: str, value: dict, error: callable):
"""
Checks that all keys included in permissions dictionary are valid permissions.
If invalid key is found, error message is updated to "keys must be valid permissions"
:param field: permissions field to check
:param value: permissions dictionary value
:param error: points to the calling validator’s _error method
"""
if any(key not in PERMISSIONS for key in value):
error(field, "keys must be valid permissions")
def has_unique_segment_names(field: str, value: list, error: callable):
"""
Checks that no duplicate names are used for segment names in list
If duplicate names are found, error message is updated to "list contains duplicate names"
:param field: field to check
:param value: list value
:param error: points to the calling validator’s _error method
"""
if len({seg["name"] for seg in value}) != len(value):
error(field, "list contains duplicate names")
def is_valid_hex_color(field: str, value: str, error: callable):
"""
Checks that color is a valid Hexadecimal color, performs check using regex format comparison
If color is an invalid Hexadecimal color, error message is updated to "This is not a valid Hexadecimal color"
:param field: color field to check
:param value: color string value
:param error: points to the calling validator’s _error method
"""
if not RE_HEX_COLOR.match(value):
error(field, "This is not a valid Hexadecimal color")
def is_valid_email(field: str, value: str, error: callable):
"""
Checks that email is a valid email according to email_validator.validate_email
If email is invalid, error message is updated to "Not a valid email"
:param field: email field to check
:param value: email string value
:param error: points to the calling validator’s _error method
"""
try:
validate_email(value)
except EmailSyntaxError:
error(field, "Not a valid email")
| 32.168831 | 113 | 0.705289 | 358 | 2,477 | 4.815642 | 0.256983 | 0.020882 | 0.030162 | 0.053364 | 0.357309 | 0.24942 | 0.216937 | 0.180974 | 0.135731 | 0.135731 | 0 | 0.003093 | 0.216795 | 2,477 | 76 | 114 | 32.592105 | 0.885567 | 0.549859 | 0 | 0 | 0 | 0.05 | 0.156315 | 0.035197 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.15 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6436ac8ba5b8954f205ea6a742afb28c7e206a5a | 799 | py | Python | oo/pessoa.py | thaila52/pythonbirds | 55101aaf319361dae7f3d927c3a8d2777b0f8a8b | [
"MIT"
] | null | null | null | oo/pessoa.py | thaila52/pythonbirds | 55101aaf319361dae7f3d927c3a8d2777b0f8a8b | [
"MIT"
] | null | null | null | oo/pessoa.py | thaila52/pythonbirds | 55101aaf319361dae7f3d927c3a8d2777b0f8a8b | [
"MIT"
] | null | null | null | class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=35):
self.nome = nome
self.idade = idade
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá{id(self)}'
if __name__ == '__main__':
thaila = Pessoa(nome='Thaila')
junior = Pessoa(thaila, nome='Junior')
print(Pessoa.cumprimentar(junior))
print(id(junior))
print(junior.cumprimentar())
print(junior.nome)
print(junior.idade)
for filho in junior.filhos:
print(filho.nome)
junior.sobrenome = 'Reis'
del junior.filhos
junior.olhos = 1
print(junior.__dict__)
print(thaila.__dict__)
print(Pessoa.olhos)
print(thaila.olhos)
print(junior.olhos)
print(id(Pessoa.olhos), id(thaila.olhos), id(junior.olhos))
| 25.774194 | 63 | 0.638298 | 100 | 799 | 4.9 | 0.32 | 0.112245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006494 | 0.229036 | 799 | 30 | 64 | 26.633333 | 0.788961 | 0 | 0 | 0 | 0 | 0 | 0.046366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0 | 0.037037 | 0.185185 | 0.444444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
644138068d0aa242e34e9494e5392a407d2e6756 | 4,694 | py | Python | src/fognode/app.py | hehaichi/dist-fog-c | 1d09134c4059718d3f67021f574a85e51754ed8b | [
"MIT"
] | null | null | null | src/fognode/app.py | hehaichi/dist-fog-c | 1d09134c4059718d3f67021f574a85e51754ed8b | [
"MIT"
] | null | null | null | src/fognode/app.py | hehaichi/dist-fog-c | 1d09134c4059718d3f67021f574a85e51754ed8b | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request
app = Flask(__name__)
# Imports
import psutil
from flask import jsonify
import redis
import json
from os import urandom
import hashlib
import docker
from docker import APIClient
import requests
import zipfile
from celery import Celery
from celery.utils.log import get_task_logger
import os
import StringIO
import time
client = docker.from_env()
redis_cli = redis.StrictRedis(host='localhost', port=6380, db=0)
redis_shared = redis.StrictRedis(host='192.168.1.100', port=6381, db=0)
raw_cli = APIClient(base_url='unix://var/run/docker.sock')
# Celery config
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6380/0'
app.config['CELERY_TIMEZONE'] = 'UTC'
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
logger = get_task_logger(__name__)
# app.config['CELERYBEAT_SCHEDULE'] = {
# 'get-heartbeat': {
# 'task': 'get_heartbeat_task',
# 'schedule': 5.0,
# },
# }
fognodes = []
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/utilization')
def get_util():
cpu_count = psutil.cpu_count()
cpu_freq = {
'current': psutil.cpu_freq().current,
'max': psutil.cpu_freq().max,
'min': psutil.cpu_freq().min
}
cpu_percent = psutil.cpu_percent()
disk_util = {
'total': psutil.disk_usage('/').total,
'used': psutil.disk_usage('/').used,
'free': psutil.disk_usage('/').free,
'percent': psutil.disk_usage('/').percent
}
temperatures = psutil.sensors_temperatures()
swap_mem = {
'total': psutil.swap_memory().total,
'used': psutil.swap_memory().used,
'free': psutil.swap_memory().free,
'percent': psutil.swap_memory().percent
}
memory = {
'total': psutil.virtual_memory().total,
'available': psutil.virtual_memory().available,
'percent': psutil.virtual_memory().percent,
'used': psutil.virtual_memory().used,
'free': psutil.virtual_memory().free
}
utilization = {
'cpu_count': cpu_count,
'cpu_freq': cpu_freq,
'cpu_percent': cpu_percent,
'disk_util': disk_util,
'temperatures': temperatures,
'swap_memory': swap_mem,
'memory': memory,
'containers': len(client.containers.list())
}
return jsonify(utilization)
@app.route('/servicedata', methods=['POST'])
def propagate_data():
print request.data
form = json.loads(request.data)
redis_cli.set(str(form['service_id']+"-service_data"), form['service_data'])
parent_node = getParentNode(request)
request_uri = "http://{}:8080/servicedata/".format(parent_node)
# print request_uri
requests.post(request_uri, data=request.data)
return "OK"
def getParentNode(request):
#get parent from shared redis
parent = redis_shared.get('192.168.1.102')
print parent
return parent
def getChildren():
#get parent from shared redis
children = redis_cli.get('fognodes')
return children
@app.route('/heartbeat/')
def heartbeat():
return "OK"
def register_fog_master():
requests.get("http://192.168.1.100:8080/register/fognode/")
@app.route('/build_trigger/<service_id>')
def trigger_build(service_id):
get_service_data.delay(service_id)
return "OK"
@celery.task(name="get_service_data")
def get_service_data(service_id):
url = "http://192.168.1.100:8080/services/{}".format(service_id)
req = requests.get(url, stream=True)
try:
path = 'service-data/{}'.format(service_id)
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
z = zipfile.ZipFile(StringIO.StringIO(req.content))
z.extractall('service-data/{}'.format(service_id))
build_and_deploy(service_id)
def build_and_deploy(service_id):
service_data = redis_cli.get(service_id)
if not service_data:
command = "docker build -t rakshith/{} service-data/{}/".format(service_id, service_id)
print "Building"
os.system(command)
# a = client.images.build(fileobj=f)
# for line in raw_cli.build(fileobj=f, tag='rakshith/{}'.format(service_id), custom_context=True):
# print line
# # service_data = {}
service_data = 'rakshith/{}'.format(service_id)
redis_cli.set(service_id, service_data)
print "Time start"
start = time.time()
print client.containers.run(service_data)
end = time.time()
print "Time End - Total Time = {}".format(end-start)
if __name__ == '__main__':
register_fog_master()
app.run(debug=True, host='0.0.0.0', port=8080)
| 28.621951 | 110 | 0.659139 | 597 | 4,694 | 4.978224 | 0.261307 | 0.048452 | 0.030283 | 0.010094 | 0.086137 | 0.012113 | 0 | 0 | 0 | 0 | 0 | 0.020408 | 0.196208 | 4,694 | 163 | 111 | 28.797546 | 0.767294 | 0.083937 | 0 | 0.024 | 0 | 0 | 0.162269 | 0.017978 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.136 | null | null | 0.048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
64428d8d713a83be8c76fc25d74c248dd372769d | 13,566 | py | Python | tarot_deck.py | Soren98/tarot | 648f543034ef0e660b2f7f1f876052398286d227 | [
"Unlicense"
] | null | null | null | tarot_deck.py | Soren98/tarot | 648f543034ef0e660b2f7f1f876052398286d227 | [
"Unlicense"
] | null | null | null | tarot_deck.py | Soren98/tarot | 648f543034ef0e660b2f7f1f876052398286d227 | [
"Unlicense"
] | null | null | null | import json
from copy import deepcopy
from random import shuffle
cards = ['magician', 'high priestess', 'empress', 'emperor', 'hierophant', 'lovers', 'chariot', 'justice', 'hermit',
'wheel of fortune', 'strength', 'hanged man', 'death', 'temperance', 'devil', 'tower', 'star', 'moon', 'sun',
'judgement', 'world', 'fool', 'king of wands', 'queen of wands', 'knight of wands', 'page of wands',
'ten of wands', 'nine of wands', 'eight of wands', 'seven of wands', 'six of wands', 'five of wands',
'four of wands', 'three of wands', 'two of wands', 'ace of wands', 'king of cups', 'queen of cups',
'knight of cups', 'page of cups', 'ten of cups', 'nine of cups', 'eight of cups', 'seven of cups',
'six of cups', 'five of cups', 'four of cups', 'three of cups', 'two of cups', 'ace of cups', 'king of swords',
'queen of swords', 'knight of swords', 'page of swords', 'ten of swords', 'nine of swords', 'eight of swords',
'seven of swords', 'six of swords', 'five of swords', 'four of swords', 'three of swords', 'two of swords',
'ace of swords', 'king of coins', 'queen of coins', 'knight of coins', 'page of coins', 'ten of coins',
'nine of coins', 'eight of coins', 'seven of coins', 'six of coins', 'five of coins', 'four of coins',
'three of coins', 'two of coins', 'ace of coins']
upright = {'magician': 'creativity, self-confidence, dexterity, sleight of hand,will-power, skill',
'high priestess': 'knowledge, wisdom, learning, intuition, impatience, virtue, purity',
'empress': 'development, accomplishment action, evolution',
'emperor': 'authority, father-figure, structure, solid foundation',
'hierophant': 'mercy, conformity, forgiveness, social approval, bonded, inspiration',
'lovers': 'harmony, trust,romance, optimism, honor, love, harmony',
'chariot': 'perseverance, rushed decision, turmoil, vengeance, adversity',
'justice': 'equality, righteousness, virtue, honor, harmony, balance',
'hermit': 'inner strength, prudence, withdrawal, caution, vigilance',
'wheel of fortune': 'unexpected events, advancement, destiny, fortune, progress',
'strength': 'courage, conviction, strength, determination, action, heroism, virility',
'hanged man': 'change, reversal, boredom, improvement, rebirth, suspension, change',
'death': 'unexpected change, loss, failure, transformation, death, bad luck',
'temperance': 'temperance, patience, good influence, confidence, moderation',
'devil': 'downfall, unexpected failure, controversy, ravage, disaster, ill tempered',
'tower': 'downfall, unexpected failure, controversy, ravage, disaster, ill tempered',
'star': 'balance, pleasure, optimism, insight, spiritual love, hope, faith',
'moon': 'double-dealing Deception, disillusionment, trickery, error, danger, disgrace',
'sun': 'accomplishment, success, love, joy, happy marriage, satisfaction',
'judgement': 'awakening, renewal, rejuvenation, rebirth, improvement, promotion, atonement, judgment',
'world': 'perfection, recognition, success, fulfillment, eternal life',
'fool': 'beginnings possibilities, pleasure, thoughtlessness, adventure, opportunity',
'king of wands': 'passionate, good leader, noble',
'queen of wands': 'fondness, attraction, command ',
'knight of wands': 'generous, journey, impetuous',
'page of wands': 'enthusiasm, exploration, discovery, free spirit',
'ten of wands': 'pain, ruined, failure',
'nine of wands': 'victory, good health, obstinacy',
'eight of wands': 'new ideas, love, journey',
'seven of wands': 'stiff competition, victory, courage, energy',
'six of wands': 'leadership, good news, success',
'five of wands': 'lawsuit or quarrel, courage, competition',
'four of wands': 'dissatisfaction, kindness, reevaluation ',
'three of wands': 'cooperation, good partnership, success',
'two of wands': 'generous person, courage, patience, courage ',
'ace of wands': 'profitable journey, new business, beginning, new career, birth, inheritance',
'king of cups': 'kindness, willingness, enjoyment',
'queen of cups': 'loving mother, gentle, happiness',
'knight of cups': 'emotional, romantic dreamer, intelligence',
'page of cups': 'sweetness, interest in literature, gentleness',
'ten of cups': 'friendship, happiness, life',
'nine of cups': 'physical well-being, hopes, security',
'eight of cups': 'disappointment, abandonment, misery',
'seven of cups': 'imagination, illusion, directionless',
'six of cups': 'acquaintance, good memories, acquaintance, happiness',
'five of cups': 'broken marriage,vain regret, sorrow, loss',
'four of cups': 'dissatisfaction, kindness, reevaluation, redemption',
'three of cups': 'fortune, hospitality, discovery',
'two of cups': 'romance, friendship, cooperation',
'ace of cups': 'good health, love, joy, beauty',
'king of swords': 'powerful, friendship, counselor',
'queen of swords': 'skillful, brave, clever, rush',
'knight of swords': 'strong man, braver, clever person',
'page of swords': 'grace, diplomacy, dexterity, grace',
'ten of swords': 'defeat, failure, pain',
'nine of swords': 'desolation, illness, suspicion, cruelty',
'eight of swords': 'weakness, indecision, censure',
'seven of swords': 'betrayal, insolence, unwise attempt',
'six of swords': 'harmony, sorrow, journey',
'five of swords': 'defeat, cowardliness, empty victory',
'four of swords': 'temporary exile, strife, retreat',
'three of swords': 'broken relationship, civil war',
'two of swords': 'indecision, trouble, balanced',
'ace of swords': 'love, valiant, victory',
'king of coins': 'reliable person, steadiness ',
'queen of coins': 'thoughtfulness, intelligence, talents, melancholy ',
'knight of coins': 'dull outlook, patience, animal lover, trustworthy ',
'page of coins': 'kindness,new ideas/opinions, scholar ',
'ten of coins': 'wealth, property, stability ',
'nine of coins': 'solitude, well-being, green thumb ',
'eight of coins': 'employment, money, learning, trade',
'seven of coins': 'development, re-evaluation, effort, hard work ',
'six of coins': 'prosperity, philanthropy, charity, gifts ',
'five of coins': 'destitution, poor health, despair, loneliness ',
'four of coins': 'ungenerous, greed, miserly ',
'three of coins': 'abilities, approval,effort, abilities ',
'two of coins': 'harmony, new projects, helpful ',
'ace of coins': 'prosperity, happiness, pleasure'}
reverse = {'magician': 'delay, unimaginative, insecurity, lack of self-confidence',
'high priestess': 'selfishness, shallowness, misunderstanding, ignorance',
'empress': 'inaction, lack on concentration, vacillation, anxiety, infidelity',
'emperor': 'domination, excessive control, rigidity, inflexibility',
'hierophant': 'vulnerability, unconventionality, foolish generosity, impotence, frailty, unorthodoxy',
'lovers': 'separation, frustration, unreliability,fickleness, untrustworthy',
'chariot': 'vanquishment, defeat, failure, unsuccessful',
'justice': 'alse accusation, unfairness, abuse, biased',
'hermit': 'hastiness, rashness,immaturity, imprudence, foolishness',
'wheel of fortune': 'interruption, outside influences, failure, bad luck',
'strength': 'pettiness, sickness, unfaithfulness, weakness',
'hanged man': 'alse prophecy, useless sacrifice, unwillingness',
'death': 'immobility, slow changes, cheating, death, stagnation',
'temperance': 'conflict, disunion, frustration, impatience, discord',
'devil': 'release, enlightenment, divorce, recovery',
'tower': 'entrapment, imprisonment, old ways, rustic',
'star': 'disappointment, bad luck, imbalance, broken dreams',
'moon': 'trifling mistakes, deception discovered, negative advantage',
'sun': 'loneliness, canceled plans, unhappiness, break ups',
'judgement': 'disappointment, indecision, death, failure, ill-health, theft, worry',
'world': 'ack of vision, disappointment, imperfection',
'fool': 'indecision, hesitation, injustice, apathy, bad choice',
'king of wands': 'unyielding, prejudice, quarrels',
'queen of wands': 'jealous, revengeful, infidelity',
'knight of wands': 'suspicion, jealousy, narrow-mindedness',
'page of wands': 'setbacks to new ideas, pessimism, lack of direction',
'ten of wands': 'cleverness, energy, strength',
'nine of wands': 'weakness, ill-health, adversity',
'eight of wands': 'violence, quarrels, courage',
'seven of wands': 'advantage, patience, indecision',
'six of wands': 'postponement, bad news, pride in riches',
'five of wands': 'new opportunities, harmony, generosity',
'four of wands': 'new relationship, new ambitions, action',
'three of wands': 'carelessness, arrogance, pride, mistakes',
'two of wands': 'impatience, domination',
'ace of wands': 'selfishness, lack of determination, setback',
'king of cups': 'double-dealer, scandal, crafty, violent',
'queen of cups': 'perverse, unhappy, gloom, over-active imagination',
'knight of cups': 'idleness, untruthful, fraud, sensuality',
'page of cups': 'poor imagination, selfishness, no desires',
'ten of cups': 'waste, broken relationships, quarrel',
'nine of cups': 'illness, failure, overindulgence',
'eight of cups': 'pleasure, success, joy',
'seven of cups': 'will-power, determination',
'six of cups': 'friendship, disappointment, past',
'five of cups': 'return, summon, hope',
'four of cups': 'new goals, ambitions, beginning',
'three of cups': 'hidden, overindulgence, pain, gossip',
'two of cups': 'violent passion, misunderstanding',
'ace of cups': 'egotism, selfishness, hesitancy',
'king of swords': 'obstinate, evil intentions, judgments',
'queen of swords': 'sly, keen, deceitful',
'knight of swords': 'troublemaker, a crafty, tyranny',
'page of swords': 'imposture, ill-health, cunningness',
'ten of swords': 'courage, positive energy, good health',
'nine of swords': 'unselfishness, good news, healing',
'eight of swords': 'freedom, new beginnings, relaxation',
'seven of swords': 'counsel, helpful, advice',
'six of swords': 'obstacles, difficulties, defeat',
'five of swords': 'unfairness, defeat, loss',
'four of swords': 'social unrest, labor strikes, renewed activity',
'three of swords': 'sorrow, loss, confusion',
'two of swords': 'unscrupulous, release',
'ace of swords': 'obstacles, tyranny, power',
'king of coins': 'bribes, materialistic, calm',
'queen of coins': 'mistrust, suspicion, neglect',
'knight of coins': 'carelessness, standstill, irresponsible',
'page of coins': 'luxury, rebellious, bad news',
'ten of coins': 'dull, slothfulness, misfortune',
'nine of coins': 'caution, possible loss',
'eight of coins': 'void, no ambition, dislike',
'seven of coins': 'impatience, slow progress, investments',
'six of coins': 'jealousy, miserliness, unfairness',
'five of coins': 'employment, courage, revival',
'four of coins': 'spendthrift, obstacles, earthy possessions',
'three of coins': 'preoccupation, ambitions',
'two of coins': 'difficulty, discouragement',
'ace of coins': 'misery, greedy, money'}
class TarotDeck:
def __init__(self):
self.deck = None
self.reset()
def draw(self):
if len(self.deck) == 0:
return 'the deck is empty. use !tarot reset to reset the deck', None
card = self.deck.pop()
ret = []
if card in {'justice', 'strength', 'death', 'temperance', 'judgement'}:
ret.append('you drew {}'.format(card))
ret.append('you drew the {}'.format(card))
ret.append('upright meaning: {}'.format(upright[card]))
ret.append('reverse meaning: {}'.format(reverse[card]))
return '\n'.join(ret), '{}.jpg'.format(card.replace(' ', '_'))
def reset(self):
self.deck = deepcopy(cards)
shuffle(self.deck)
return 'tarot deck reset'
def save(self, save_path):
with open(save_path, 'w') as file:
json.dump(self.deck, file)
print('saved deck')
def load(self, load_path):
with open(load_path, 'r') as file:
self.deck = json.load(file)
print('loaded deck')
| 65.221154 | 120 | 0.618605 | 1,433 | 13,566 | 5.849965 | 0.387997 | 0.035071 | 0.00501 | 0.008589 | 0.014553 | 0.014553 | 0.014553 | 0.014553 | 0 | 0 | 0 | 0.000098 | 0.250258 | 13,566 | 207 | 121 | 65.536232 | 0.824108 | 0 | 0 | 0 | 0 | 0 | 0.682736 | 0.001843 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025253 | false | 0.010101 | 0.015152 | 0 | 0.060606 | 0.010101 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff24c50a953f9de8e5c507018c8a393f88c6968a | 3,394 | py | Python | src/library/blas/AutoGemm/KernelsToPreCompile.py | tingxingdong/clean | 9cdaed4c755b825b0c10a99f9974224993aa39a9 | [
"Apache-2.0"
] | 1 | 2021-07-07T11:28:56.000Z | 2021-07-07T11:28:56.000Z | src/library/blas/AutoGemm/KernelsToPreCompile.py | tingxingdong/clean | 9cdaed4c755b825b0c10a99f9974224993aa39a9 | [
"Apache-2.0"
] | null | null | null | src/library/blas/AutoGemm/KernelsToPreCompile.py | tingxingdong/clean | 9cdaed4c755b825b0c10a99f9974224993aa39a9 | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import AutoGemmParameters
import Common
################################################################################
# Auto-Gemm
################################################################################
def writeOfflineCompilation(args):
print "AutoGemm.py: Generating list of kernels to pre-compile."
if not os.path.exists( Common.getIncludePath() ):
os.makedirs( Common.getIncludePath() )
ocFileName = Common.getIncludePath() + "AutoGemmKernelsToPreCompile.h"
ocFile = open(ocFileName, "w")
ocFile.write( Common.getAutoGemmHeader() )
fileStr = "\n/*precision, order, transA, transB, beta, tileNumRows, tileNumCols, unroll*/\n"
fileStr += "\nunsigned int gemmPreCompile[][8] = {\n"
count = 0
for precision in args.precisions:
ocFile.write( fileStr )
fileStr = ""
validTiles = AutoGemmParameters.getTilesForPrecision(precision)
for order in args.orders:
for transpose in args.transposes:
transA = transpose[0]
transB = transpose[1]
if (transA=="C" or transB=="C") and (precision=="s" or precision=="d"):
# real precision doesn't have conjugate transpose
continue
for beta in args.betas:
for tile in validTiles:
# print combination
kernelStr = " { %1u, %1u, %1u, %1u, %1u, %3u, %3u, %2u },\n" \
% (
Common.precisionInt[precision],
Common.orderInt[order],
Common.transposeInt[transA],
Common.transposeInt[transB],
beta,
tile.macroTileNumRows,
tile.macroTileNumCols,
tile.unroll
)
fileStr += kernelStr
#print kernelStr
count+=1
if count is 0:
fileStr += " { %1u, %1u, %1u, %1u, %1u, %3u, %3u, %2u },\n" \
% ( 0, 0, 0, 0, 0, 0, 0, 0 )
fileStr += "};\n"
fileStr += "unsigned int gemmPreCompileNum = " + str(count) + ";\n"
ocFile.write( fileStr )
ocFile.close()
count *= 4
print "AutoGemm.py: %u kernels will be pre-compiled." % count
################################################################################
# Main
################################################################################
if __name__ == "__main__":
# parse arguments
ap = argparse.ArgumentParser(description="Which gemm kernels to compile offline.")
ap.add_argument("--output-path", dest="output" )
ap.add_argument("--precisions", dest="precisions", action="store", nargs="+", choices=AutoGemmParameters.precisions )
ap.add_argument("--orders", dest="orders", action="store", nargs="+", choices=AutoGemmParameters.orders )
ap.add_argument("--transposes", dest="transposes", action="store", nargs="+", choices=AutoGemmParameters.getTransposeChoices() )
ap.add_argument("--betas", dest="betas", action="store", nargs="+", type=int, choices=AutoGemmParameters.betas )
args = ap.parse_args()
if args.output:
Common.setOutputPath(args.output)
else:
print "Warning: No output path specified; default is working directory."
# write offline compilation header
if args.precisions is None:
args.precisions = []
if args.transposes is None:
args.transposes = []
if args.orders is None:
args.orders = []
if args.betas is None:
args.betas = []
writeOfflineCompilation(args)
| 36.891304 | 130 | 0.570418 | 338 | 3,394 | 5.686391 | 0.360947 | 0.016649 | 0.01873 | 0.010406 | 0.085848 | 0.021852 | 0.021852 | 0.01769 | 0.01769 | 0 | 0 | 0.011615 | 0.213612 | 3,394 | 91 | 131 | 37.296703 | 0.708505 | 0.042428 | 0 | 0.028986 | 0 | 0.028986 | 0.209175 | 0.009928 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.057971 | null | null | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff2a2e9042dfa861f9dd3f86cd1a00194ad688f3 | 5,184 | py | Python | python/iperf_mar1.py | ttlequals0/misc | 95ce083312bc241b196afcf675f93480ca6866b0 | [
"MIT"
] | null | null | null | python/iperf_mar1.py | ttlequals0/misc | 95ce083312bc241b196afcf675f93480ca6866b0 | [
"MIT"
] | null | null | null | python/iperf_mar1.py | ttlequals0/misc | 95ce083312bc241b196afcf675f93480ca6866b0 | [
"MIT"
] | null | null | null | #!/usr/local/apps/networking/bin/python2.7
# script will test all server clinet pairs with iperf
### this is the prd script
# pmw 2/18/16
import os
import re
import sys
import datetime
#from subprocess import Popen, PIP
import subprocess
import logging
from pprint import pprint
import argparse
import socket
import Queue
import threading
import thread
import signal
import time
from httplib import HTTPSConnection
from base64 import b64encode
#from servicenow import ChangeRequest, WebService, CSVService, ConfigurationItemFactory, CustomerFactory
socket.setdefaulttimeout(2)
report =''
#constants for sn
def get_time():
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def run_script(list,fo,lock):
global report
try:
cmd = 'ssh -q -i ttnet_key -o \"StrictHostKeyChecking no\" -l ttnetsftp ';
cmd += list[1]
cmd += ' \"iperf -s -p51281 -u -D\"'
history = cmd + '\n'
#os.spawnl(os.P_NOWAIT,cmd)
proc = subprocess.Popen(cmd,shell=True ,stdout=subprocess.PIPE)
tmp = proc.stdout.read()
print(tmp)
cmd = 'ssh -q -i ttnet_key -o \"StrictHostKeyChecking no\" -l ttnetsftp ';
cmd += list[2]
cmd += ' \"taskkill /f /im iperf.exe;iperf -c'
cmd += list[1]
cmd += ' -b'
bw = str(list[5])
bw = re.sub(r'\D','',bw)
if bw =='':
bw =10
if float(bw) >100:
cmd += '10'
else:
cmd += str(bw)
print('%s %s -> %s list of 5 (bandwidth) is %s' % (list[0],list[1],list[2],bw))
cmd += 'M'
cmd += ' -p51281 -i1 -t10 -r\"'
history += cmd + '\n'
# add timeout. kill after 60 sec
start = datetime.datetime.now()
proc = subprocess.Popen(cmd,shell=True ,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while proc.poll() is None:
time.sleep(0.5)
now = datetime.datetime.now()
if (now - start).seconds> 60:
os.kill(proc.pid, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
tmp = proc.stdout.read()
# remove
# end remove
summary = get_summ(list[1],cmd,tmp)
cmd = 'ssh -q -i ttnet_key -o "StrictHostKeyChecking no" -l ttnetsftp ';
cmd += list[1]
cmd += ' "taskkill /f /im iperf.exe"'
history += cmd + '\n' + list[0] + ','+ list[4]+ ','+ list[5] + ','+ str(summary)
rep = list[0] + ','+ list[4]+ ','+ list[5] + ','+ str(summary) + '\n'
proc = subprocess.Popen(cmd,shell=True ,stdout=subprocess.PIPE)
tmp = proc.stdout.read()
print(tmp)
lock.acquire_lock()
if fo.closed:
fo = open('iperf_new.csv','a')
report += rep
fo.write(rep)
print "%s\n" % history
lock.release_lock()
return rep
except socket.error, e:
lock.acquire_lock()
print "%s,%s" % (ip, e)
# kill server iperf
lock.release_lock()
return False
def get_summ(ip,cmd,tmp):
lines = tmp.split('\n')
print(len(lines))
i =0
sum = 0
for line in lines:
if re.search(r'Mbits/sec.*ms.*\%',line):
line = re.sub(r'^.*Bytes\s+','',line)
print(line)
a = line.split()
i += 1
rate = float(a[0])
sum += rate
print('lin -> %s, %i' % (rate,i))
if i==0:
return 0
return round((sum / i),2)
def initLogger():
logger = logging.getLogger()
logFormatter = logging.Formatter('%(levelname)s: %(message)s')
logStreamHandler = logging.StreamHandler()
logStreamHandler.setFormatter(logFormatter)
logger.addHandler(logStreamHandler)
def get_pairs(inp):
f = open(inp)
lines = f.read().split('\n')
f.close()
list = []
for line in lines:
a = line.split(',')
#print('line %s\n' % (line))
#print('a is -> %s\n' % (a))
if len(a) >2:
list.append(a)
return list
def run_unsched(list,frep,lock):
for items in list:
#print(items[0]+' '+items[1]+' '+items[2]+' '+items[5]+'M' )
rep = run_script(items, frep, lock)
frep.write(rep)
print(rep)
return True
if __name__=='__main__':
global report
report =''
initLogger()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(description='test ap xtr pairs')
parser.add_argument('-f', '--file', nargs=1 ,help='file. format: group,from,to', required=False)
parser.add_argument('-o', '--output', nargs=1 ,help='file. output file', required=False)
args = parser.parse_args()
clientIps = []
print('args are -> %s' % args)
lock = thread.allocate_lock()
if not args.output:
frep = open('iperf_results.csv','w')
else:
frep = open(args.output[0],'w')
if not args.file:
list = get_pairs("iperf_pairs_short.csv")
else:
print('args are -> %s' % args.file)
list = get_pairs(args.file[0])
run_unsched(list,frep,lock)
frep.close()
frep = open('iperf_rep_2.csv','w')
frep.write(report)
frep.close()
| 29.622857 | 104 | 0.565586 | 681 | 5,184 | 4.25257 | 0.317181 | 0.008633 | 0.01105 | 0.008287 | 0.203729 | 0.164365 | 0.150207 | 0.150207 | 0.132942 | 0.115331 | 0 | 0.019267 | 0.279128 | 5,184 | 174 | 105 | 29.793103 | 0.755686 | 0.094136 | 0 | 0.201389 | 0 | 0 | 0.139162 | 0.019025 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.111111 | null | null | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff3079fc8a9e3d739eb7e1706631d0a9711499a3 | 7,769 | py | Python | calvin_models/calvin_agent/datasets/npz_dataset.py | nikepupu/calvin | 23ce311bb14a07d284b87589842f0a542ab0a6fa | [
"MIT"
] | null | null | null | calvin_models/calvin_agent/datasets/npz_dataset.py | nikepupu/calvin | 23ce311bb14a07d284b87589842f0a542ab0a6fa | [
"MIT"
] | null | null | null | calvin_models/calvin_agent/datasets/npz_dataset.py | nikepupu/calvin | 23ce311bb14a07d284b87589842f0a542ab0a6fa | [
"MIT"
] | null | null | null | import logging
import os
from pathlib import Path
import re
from typing import Dict, List, Optional, Tuple
from calvin_agent.datasets.base_dataset import BaseDataset
from calvin_agent.datasets.utils.episode_utils import (
get_state_info_dict,
process_actions,
process_depth,
process_rgb,
process_state,
)
import numpy as np
import torch
logger = logging.getLogger(__name__)
class NpzDataset(BaseDataset):
"""
Dataset Loader that uses a shared memory cache
parameters
----------
datasets_dir: path of folder containing episode files (string must contain 'validation' or 'training')
save_format: format of episodes in datasets_dir (.pkl or .npz)
obs_space: DictConfig of the observation modalities of the dataset
max_window_size: maximum length of the episodes sampled from the dataset
"""
def __init__(self, *args, skip_frames: int = 0, n_digits: Optional[int] = None, **kwargs): # type: ignore
super().__init__(*args, **kwargs)
self.skip_frames = skip_frames
if self.with_lang:
(
self.episode_lookup,
self.lang_lookup,
self.max_batched_length_per_demo,
self.lang_ann,
) = self.load_file_indices_lang(self.abs_datasets_dir)
else:
self.episode_lookup, self.max_batched_length_per_demo = self.load_file_indices(self.abs_datasets_dir)
self.naming_pattern, self.n_digits = self.lookup_naming_pattern(n_digits)
def lookup_naming_pattern(self, n_digits):
it = os.scandir(self.abs_datasets_dir)
while True:
filename = Path(next(it))
if self.save_format in filename.suffix:
break
aux_naming_pattern = re.split(r"\d+", filename.stem)
naming_pattern = [filename.parent / aux_naming_pattern[0], filename.suffix]
n_digits = n_digits if n_digits is not None else len(re.findall(r"\d+", filename.stem)[0])
assert len(naming_pattern) == 2
assert n_digits > 0
return naming_pattern, n_digits
def get_episode_name(self, idx: int) -> Path:
"""
Convert frame idx to file name
"""
return Path(f"{self.naming_pattern[0]}{idx:0{self.n_digits}d}{self.naming_pattern[1]}")
def zip_sequence(self, start_idx: int, end_idx: int, idx: int) -> Dict[str, np.ndarray]:
"""
Load consecutive individual frames saved as npy files and combine to episode dict
parameters:
-----------
start_idx: index of first frame
end_idx: index of last frame
returns:
-----------
episode: dict of numpy arrays containing the episode where keys are the names of modalities
"""
episodes = [self.load_episode(self.get_episode_name(file_idx)) for file_idx in range(start_idx, end_idx)]
episode = {key: np.stack([ep[key] for ep in episodes]) for key, _ in episodes[0].items()}
if self.with_lang:
episode["language"] = self.lang_ann[self.lang_lookup[idx]][0] # TODO check [0]
return episode
def get_sequences(self, idx: int, window_size: int) -> Dict:
"""
parameters
----------
idx: index of starting frame
window_size: length of sampled episode
returns
----------
seq_state_obs: numpy array of state observations
seq_rgb_obs: tuple of numpy arrays of rgb observations
seq_depth_obs: tuple of numpy arrays of depths observations
seq_acts: numpy array of actions
"""
start_file_indx = self.episode_lookup[idx]
end_file_indx = start_file_indx + window_size
episode = self.zip_sequence(start_file_indx, end_file_indx, idx)
seq_state_obs = process_state(episode, self.observation_space, self.transforms, self.proprio_state)
seq_rgb_obs = process_rgb(episode, self.observation_space, self.transforms)
seq_depth_obs = process_depth(episode, self.observation_space, self.transforms)
seq_acts = process_actions(episode, self.observation_space, self.transforms)
info = get_state_info_dict(episode)
seq_lang = {"lang": torch.from_numpy(episode["language"]) if self.with_lang else torch.empty(0)}
seq_dict = {**seq_state_obs, **seq_rgb_obs, **seq_depth_obs, **seq_acts, **info, **seq_lang} # type:ignore
seq_dict["idx"] = idx # type:ignore
return seq_dict
def load_file_indices_lang(self, abs_datasets_dir: Path) -> Tuple[List, List, List, np.ndarray]:
"""
this method builds the mapping from index to file_name used for loading the episodes
parameters
----------
abs_datasets_dir: absolute path of the directory containing the dataset
returns
----------
episode_lookup: list for the mapping from training example index to episode (file) index
max_batched_length_per_demo: list of possible starting indices per episode
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
try:
print("trying to load lang data from: ", abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy")
lang_data = np.load(abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy", allow_pickle=True).reshape(
-1
)[0]
except Exception:
print("Exception, trying to load lang data from: ", abs_datasets_dir / "auto_lang_ann.npy")
lang_data = np.load(abs_datasets_dir / "auto_lang_ann.npy", allow_pickle=True).reshape(-1)[0]
ep_start_end_ids = lang_data["info"]["indx"]
lang_ann = lang_data["language"]["emb"]
lang_lookup = []
max_batched_length_per_demo = []
for i, (start_idx, end_idx) in enumerate(ep_start_end_ids):
assert end_idx >= self.max_window_size
cnt = 0
for idx in range(start_idx, end_idx + 1 - self.max_window_size):
if cnt % self.skip_frames == 0:
lang_lookup.append(i)
episode_lookup.append(idx)
cnt += 1
possible_indices = end_idx + 1 - start_idx - self.max_window_size # TODO: check it for skip_frames
max_batched_length_per_demo.append(possible_indices)
return episode_lookup, lang_lookup, max_batched_length_per_demo, lang_ann
def load_file_indices(self, abs_datasets_dir: Path) -> Tuple[List, List]:
"""
this method builds the mapping from index to file_name used for loading the episodes
parameters
----------
abs_datasets_dir: absolute path of the directory containing the dataset
returns
----------
episode_lookup: list for the mapping from training example index to episode (file) index
max_batched_length_per_demo: list of possible starting indices per episode
"""
assert abs_datasets_dir.is_dir()
episode_lookup = []
ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy")
logger.info(f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.')
max_batched_length_per_demo = []
for start_idx, end_idx in ep_start_end_ids:
assert end_idx > self.max_window_size
for idx in range(start_idx, end_idx + 1 - self.max_window_size):
episode_lookup.append(idx)
possible_indices = end_idx + 1 - start_idx - self.max_window_size
max_batched_length_per_demo.append(possible_indices)
return episode_lookup, max_batched_length_per_demo
| 41.324468 | 118 | 0.644098 | 1,026 | 7,769 | 4.584795 | 0.192982 | 0.037415 | 0.041667 | 0.040391 | 0.442177 | 0.404337 | 0.366071 | 0.318027 | 0.27551 | 0.26233 | 0 | 0.004031 | 0.265543 | 7,769 | 187 | 119 | 41.545455 | 0.820365 | 0.246235 | 0 | 0.156863 | 0 | 0.009804 | 0.063588 | 0.021257 | 0 | 0 | 0 | 0.010695 | 0.058824 | 1 | 0.068627 | false | 0 | 0.088235 | 0 | 0.22549 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff3706c2486945d76ad59f2792a976df3fa7f5b3 | 464 | py | Python | multimenus/migrations/0006_auto_20200715_1100.py | arck1/djangocms-multimenus | 45accefcb280b96373121331f3294ffffe3e36af | [
"MIT"
] | null | null | null | multimenus/migrations/0006_auto_20200715_1100.py | arck1/djangocms-multimenus | 45accefcb280b96373121331f3294ffffe3e36af | [
"MIT"
] | null | null | null | multimenus/migrations/0006_auto_20200715_1100.py | arck1/djangocms-multimenus | 45accefcb280b96373121331f3294ffffe3e36af | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-07-15 11:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('multimenus', '0005_auto_20200415_1621'),
]
operations = [
migrations.AlterField(
model_name='menuitem',
name='url',
field=models.CharField(blank=True, help_text='Page url, can be absolute or related.', max_length=200, null=True),
),
]
| 24.421053 | 125 | 0.625 | 55 | 464 | 5.163636 | 0.836364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101744 | 0.258621 | 464 | 18 | 126 | 25.777778 | 0.723837 | 0.099138 | 0 | 0 | 1 | 0 | 0.194712 | 0.055288 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff3d57b7a5ef287c60851d0bb59634ded80f741c | 1,584 | py | Python | day4/day4.py | areddish/aoc2020 | 9d609e8147e370e31ffe92e4170c5ca7c463186c | [
"MIT"
] | 1 | 2020-12-08T03:02:00.000Z | 2020-12-08T03:02:00.000Z | day4/day4.py | areddish/aoc2020 | 9d609e8147e370e31ffe92e4170c5ca7c463186c | [
"MIT"
] | null | null | null | day4/day4.py | areddish/aoc2020 | 9d609e8147e370e31ffe92e4170c5ca7c463186c | [
"MIT"
] | null | null | null | def check_hgt(h):
if "cm" in h:
return 150 <= int(h.replace("cm","")) <= 193
elif "in" in h:
return 59 <= int(h.replace("in","")) <= 76
else:
return False
def check_hcl(h):
if h[0] != '#' or len(h) != 7:
return False
for x in h[1:]:
if not x in list("abcdef0123456789"):
return False
return True
def check_range(v, low, high):
return low <= int(v) <= high
validation = {
"hgt": check_hgt,
"hcl": check_hcl,
"pid": lambda x: len(x) == 9,
"byr": lambda x: check_range(x, 1920, 2002),
"iyr": lambda x: check_range(x, 2010, 2020),
"eyr": lambda x: check_range(x, 2020, 2030),
"ecl": lambda x: x in ["amb","blu","brn","gry", "grn", "hzl", "oth"],
"cid": lambda x: True
}
def validate_passport(p):
return len(p) == 8 or (len(p) == 7 and not p.get("cid", None))
#with open("test.txt", "rt") as file:
with open("day4.txt", "rt") as file:
valid = 0
valid2 = 0
passport = {}
passport2 = {}
for l in file.read().splitlines():
if l.strip() == "":
valid += validate_passport(passport)
valid2 += validate_passport(passport2)
passport = {}
passport2 = {}
else:
for x in l.strip().split(' '):
p = x.split(":")
passport[p[0]] = p[1]
if (validation[p[0]](p[1])):
passport2[p[0]] = p[1]
valid += validate_passport(passport)
valid2 += validate_passport(passport2)
print (valid)
print (valid2)
| 27.310345 | 73 | 0.508207 | 216 | 1,584 | 3.666667 | 0.361111 | 0.05303 | 0.045455 | 0.064394 | 0.219697 | 0.151515 | 0.151515 | 0.151515 | 0 | 0 | 0 | 0.062673 | 0.315025 | 1,584 | 57 | 74 | 27.789474 | 0.667281 | 0.022727 | 0 | 0.265306 | 0 | 0 | 0.054945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0.22449 | 0 | 0.040816 | 0.244898 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
ff4088986ff98e499e58a08879fd7b5b158a61fc | 893 | py | Python | marmot/features/google_translate_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 19 | 2015-08-21T13:06:37.000Z | 2021-07-26T09:56:29.000Z | marmot/features/google_translate_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 36 | 2015-01-13T13:01:07.000Z | 2016-06-22T06:59:59.000Z | marmot/features/google_translate_feature_extractor.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 8 | 2015-12-11T16:41:47.000Z | 2019-04-08T16:28:40.000Z | from goslate import Goslate
from nltk import word_tokenize
import ipdb
from marmot.features.feature_extractor import FeatureExtractor
from marmot.exceptions.no_data_error import NoDataError
class GoogleTranslateFeatureExtractor(FeatureExtractor):
def __init__(self, lang='en'):
self.lang = lang
def get_features(self, context_obj):
if 'source' not in context_obj:
raise NoDataError('source', context_obj, 'GoogleTranslateFeatureExtractor')
if 'pseudo-reference' in context_obj:
translation = context_obj['pseudo-reference']
else:
gs = Goslate()
translation = word_tokenize(gs.translate(' '.join(context_obj['source']), self.lang))
if context_obj['token'] in translation:
return [1]
return [0]
def get_feature_names(self):
return ["pseudo-reference"]
| 30.793103 | 97 | 0.677492 | 98 | 893 | 5.979592 | 0.44898 | 0.119454 | 0.040956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002924 | 0.234043 | 893 | 28 | 98 | 31.892857 | 0.853801 | 0 | 0 | 0 | 0 | 0 | 0.117581 | 0.034714 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.238095 | 0.047619 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
ff40b32664a84e5bb2c3ecaa367f1779b82a8367 | 235 | py | Python | hackerrank/built_ins/zipped.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | hackerrank/built_ins/zipped.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | hackerrank/built_ins/zipped.py | alex-d-bondarev/learn-python | b119cb1e09a57e93abc73383c014cc8ceba18acf | [
"MIT"
] | null | null | null | if __name__ == '__main__':
_, X = input().split()
subjects = list()
for _ in range(int(X)):
subjects.append(map(float, input().split()))
for i in zip(*subjects):
print("{0:.1f}".format(sum(i)/len(i)))
| 23.5 | 52 | 0.544681 | 32 | 235 | 3.6875 | 0.71875 | 0.169492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011173 | 0.238298 | 235 | 9 | 53 | 26.111111 | 0.648045 | 0 | 0 | 0 | 0 | 0 | 0.06383 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff45f44cc60ac4208193a70ca5502f0bb47aa1a1 | 9,110 | py | Python | scripts/dspace_dsa_maker.py | masonpublishing/dspace_simple_archives_import | 02b90aca955dc34019e4efd77d823dbe91a9812e | [
"Apache-2.0"
] | 1 | 2019-06-04T15:36:39.000Z | 2019-06-04T15:36:39.000Z | scripts/dspace_dsa_maker.py | masonpublishing/dspace_simple_archives_import | 02b90aca955dc34019e4efd77d823dbe91a9812e | [
"Apache-2.0"
] | null | null | null | scripts/dspace_dsa_maker.py | masonpublishing/dspace_simple_archives_import | 02b90aca955dc34019e4efd77d823dbe91a9812e | [
"Apache-2.0"
] | 1 | 2019-06-04T15:42:39.000Z | 2019-06-04T15:42:39.000Z | """
dspace_dsa_maker.py - prepare DSpace Simple Archives for import.
Copyright 2018 University of Toronto Libraries
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/python
# coding=utf8
from zipfile import ZipFile
import os
import shutil
import glob
import sys
from bs4 import BeautifulSoup as makesoup
class DSpaceDSAMaker(object):
"""Generate DSpace Simple Archives for each zip archive in deposit directory
"""
def __init__(self):
"""Make working directories and start iteration of deposited zips.
"""
print "Launching DSpace DSA Maker. \nPreparing work directories."
self.root = os.path.dirname(os.path.abspath(__file__))
self.deposit = os.path.join(self.root, '../deposit/')
self.extract = os.path.join(self.root, '../extract/')
self.ingest = os.path.join(self.root, '../ingest/')
# purge the extract directory of failed ingests
if os.path.isdir(self.extract):
shutil.rmtree(self.extract)
# make deposit, extract and ingest work directories
for current_dir in [self.deposit, self.extract, self.ingest]:
if not os.path.isdir(current_dir):
os.mkdir(current_dir)
print "Made " + current_dir
# Set the year of publication based on metadata
self.year = ''
self.filename = ''
self.extract_dir = ''
self.journal_name = ''
self.iterate()
def iterate(self):
"""For each zip file in the deposit directory, call work functions.
"""
if not os.listdir(self.deposit):
print "Nothing to ingest. Please check " + self.deposit
else:
print "Checking " + self.deposit + " for new items to ingest."
for original_zip in os.listdir(self.deposit):
if original_zip.endswith('.zip'):
print "Found " + original_zip
self.current_zip = original_zip
self.extract_zip(original_zip)
self.crosswalk()
self.contents()
self.move_to_ingest()
def extract_zip(self, zipname):
"""Extract one zip file and set up instance variables for later use.
"""
zipfile = ZipFile(self.deposit + zipname)
zipfile.extractall(self.extract)
self.filename = zipname.split(".zip")[0]
self.extract_dir = os.path.join(self.extract, self.filename)
self.journal_name = zipname.split("-")[0]
def crosswalk(self):
"""Produce dublin_core.xml by matching tags from original XML to user-defined tags.
Change this function to match your original XML fieldset.
"""
os.chdir(self.extract_dir)
# assume the main metadata is the only XML file in the original directory
base = open(glob.glob("*-metadata.xml")[0])
soup = makesoup(base, 'xml')
# make a container soup, and make smaller soups for each field,
# which are inserted into the container.
newsoup = makesoup('<dublin_core schema="dc"></dublin_core>', 'xml')
tag_list = []
# title
tag_list.append(makesoup("<dcvalue element='title'>" + \
soup.find('article-title').string + "</dcvalue>", 'xml').contents[0])
# author(s)
for author_container in soup.find_all('contrib', {'contrib-type' : 'author'}):
tag_list.append(makesoup("<dcvalue element='contributor' qualifier='author'>" + \
author_container.surname.string + ", " + \
author_container.find('given-names').string + \
"</dcvalue>", 'xml').contents[0])
for author_container in soup.find_all('contrib', {'contrib-type' : 'editor'}):
tag_list.append(makesoup("<dcvalue element='contributor' qualifier='editor'>" + \
author_container.surname.string + ", " + \
author_container.find('given-names').string + \
"</dcvalue>", 'xml').contents[0])
# abstract
tag_list.append(makesoup("<dcvalue element='abstract'>" + soup.abstract.p.string + \
"</dcvalue>", 'xml').contents[0])
# date(s)
date_accepted = soup.find('date', {'date-type' : 'accepted'})
self.year = date_accepted.year.string
tag_list.append(makesoup("<dcvalue element='date' qualifier='issued'>" + \
"-".join((date_accepted.year.string, date_accepted.month.string, \
date_accepted.day.string)) + "</dcvalue>", 'xml').contents[0])
# publisher
tag_list.append(makesoup("<dcvalue element='publisher'>" \
+ soup.find('publisher-name').string + \
"</dcvalue>", 'xml').contents[0])
# issn
tag_list.append(makesoup("<dcvalue element='identifier' qualifier='issn'>" \
+ soup.find("issn", {'pub-type' : 'ppub'}).string \
+ "</dcvalue>", 'xml').contents[0])
# essn
tag_list.append(makesoup("<dcvalue element='identifier' qualifier='issn'>" \
+ soup.find("issn", {'pub-type' : 'epub'}).string \
+ "</dcvalue>", 'xml').contents[0])
# DOI
tag_list.append(makesoup("<dcvalue element='identifier'" \
+ "qualifier='doi'>https://dx.doi.org/" \
+ soup.find('article-id', {'pub-id-type' : 'doi'}).string \
+ "</dcvalue>", 'xml').contents[0])
# insert all created tags from taglist into the container soup
dublin_core = newsoup.find('dublin_core')
for tag in tag_list:
dublin_core.append(tag)
# write out the complete DSpace DC
dublin_core = open("dublin_core.xml", 'w')
dublin_core.write(str(newsoup))
dublin_core.close()
os.chdir("../")
def contents(self):
"""Generate the plain text list of bitstreams for DSpace import.
Common use case is a single PDF, sometimes with supplementary files of all formats.
This uses filename matching, so change the pattern according to your setup.
"""
os.chdir(self.extract_dir)
# assuming dublin_core.xml has been successfully made. Old MD can be deleted.
os.remove(glob.glob("*-metadata.xml")[0])
# also delete the directories that were transferred
# but never used - ScholarOne sends these by default
os.remove(glob.glob("*-manifest.html")[0])
shutil.rmtree('pdf_renditions')
shutil.rmtree('doc')
# move the main PDF from its containing directory to the root directory we are working in
if os.path.isdir('pdf'):
manuscript = os.path.basename(glob.glob('pdf/*.pdf')[0])
shutil.move('pdf/' + manuscript, '.')
shutil.rmtree('pdf')
else:
sys.exit("Unable to produce DSpaceSA for zip file " + \
self.current_zip + "\nDirectory 'pdf' could not be found.")
# in the same fashion, move any suppl files to the root directory
if os.path.isdir('suppl_data'):
for suppl_file in os.listdir('suppl_data'):
shutil.move('suppl_data/' + suppl_file, '.')
shutil.rmtree('suppl_data')
# add all non-dspace files into the contents file list text file for dspace import
# assuming we cleaned up all non-importable files such as
# pdf_renditions, doc and the manifest
contents = open('contents', 'w')
contents.write(manuscript + "\n")
for found_file in os.listdir("."):
if found_file not in ['dublin_core.xml', 'contents', manuscript]:
contents.write(found_file + '\n')
contents.close()
def move_to_ingest(self):
"""Since DSpace import requires a root directory for each collection,
separate deposit item into directories that map to collections in DSpace.
The import python script will pick these up.
"""
ingest_path = os.path.join(self.ingest, self.journal_name, self.year)
if not os.path.exists(ingest_path):
os.makedirs(ingest_path)
target = os.path.join(ingest_path, self.filename)
if os.path.exists(target):
shutil.rmtree(target)
shutil.move(self.extract_dir, os.path.join(ingest_path, self.filename))
if __name__ == "__main__":
DSpaceDSAMaker()
| 42.570093 | 97 | 0.597475 | 1,080 | 9,110 | 4.951852 | 0.288889 | 0.017951 | 0.021877 | 0.03534 | 0.212603 | 0.15445 | 0.121167 | 0.121167 | 0.077786 | 0.077786 | 0 | 0.003837 | 0.284852 | 9,110 | 213 | 98 | 42.769953 | 0.817038 | 0.10944 | 0 | 0.140496 | 0 | 0 | 0.181215 | 0.010487 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.049587 | null | null | 0.041322 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff46471a452e693a79e98c3a031f0fa30bba73ed | 533 | py | Python | tests/test_notes_append_header.py | fujiawei-dev/tookit-py | 5ab3a18a41885f6166150cc27183621b96f8f991 | [
"BSD-3-Clause"
] | null | null | null | tests/test_notes_append_header.py | fujiawei-dev/tookit-py | 5ab3a18a41885f6166150cc27183621b96f8f991 | [
"BSD-3-Clause"
] | null | null | null | tests/test_notes_append_header.py | fujiawei-dev/tookit-py | 5ab3a18a41885f6166150cc27183621b96f8f991 | [
"BSD-3-Clause"
] | null | null | null | """
Date: 2022.04.13 10:28
Description: Omit
LastEditors: Rustle Karl
LastEditTime: 2022.04.13 10:28
"""
import tempfile
from pathlib import Path
from create_config_file.notes import notes_append_header
def test_notes_append_header():
path = tempfile.gettempdir()
file = Path(path) / "file.txt"
file.write_text("old_content")
notes_append_header(file)
print(file.read_text())
file.unlink()
file.write_text("---old_content")
notes_append_header(file)
print(file.read_text())
file.unlink()
| 20.5 | 56 | 0.718574 | 75 | 533 | 4.88 | 0.453333 | 0.120219 | 0.185792 | 0.054645 | 0.453552 | 0.387978 | 0.387978 | 0.387978 | 0.387978 | 0.387978 | 0 | 0.053691 | 0.161351 | 533 | 25 | 57 | 21.32 | 0.765101 | 0.180113 | 0 | 0.428571 | 0 | 0 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff46c556255a2b8fc5d5a10506e048de2e0a70b8 | 959 | py | Python | examples/multifig.py | ianhi/mpl-playback | 2c0896205b94e2c3c1ee9e8582291cc2e94aab99 | [
"BSD-3-Clause"
] | 1 | 2021-05-06T22:38:23.000Z | 2021-05-06T22:38:23.000Z | examples/multifig.py | ianhi/mpl-playback | 2c0896205b94e2c3c1ee9e8582291cc2e94aab99 | [
"BSD-3-Clause"
] | 4 | 2021-01-12T02:15:00.000Z | 2021-06-30T21:14:43.000Z | examples/multifig.py | ianhi/mpl-playback | 2c0896205b94e2c3c1ee9e8582291cc2e94aab99 | [
"BSD-3-Clause"
] | null | null | null | """
================
Multiple Figures
================
Test case for putting multiple figures into sphinx gallery.
The playback file was generated using:
``record_file("multifig.py", ["slider_fig", "fig"])``
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
# The parametrized function to be plotted
def f(t, amplitude, frequency):
return amplitude * np.sin(2 * np.pi * frequency * t)
t = np.linspace(0, 1, 1000)
# Define initial parameters
init_amplitude = 5
init_frequency = 3
# Create the figure and the line that we will manipulate
fig, ax = plt.subplots()
(line,) = plt.plot(t, f(t, init_amplitude, init_frequency), lw=2)
# The function to be called anytime a slider's value changes
def update(val):
line.set_ydata(f(t, 5, val))
fig.canvas.draw_idle()
slider_fig, s_ax = plt.subplots(figsize=(6.4, 2))
slider = Slider(s_ax, "freq", 0, 30, valinit=3)
slider.on_changed(update)
plt.show()
| 22.833333 | 65 | 0.694473 | 149 | 959 | 4.389262 | 0.583893 | 0.009174 | 0.036697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022222 | 0.15537 | 959 | 41 | 66 | 23.390244 | 0.785185 | 0.402503 | 0 | 0 | 1 | 0 | 0.00713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0.058824 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff4a89c35219dc2f2732b8374cd3ee1ff05ce0d0 | 572 | py | Python | src/tratamientos/migrations/0006_auto_20170201_1554.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | null | null | null | src/tratamientos/migrations/0006_auto_20170201_1554.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | 32 | 2016-05-09T19:37:08.000Z | 2022-01-13T01:00:52.000Z | src/tratamientos/migrations/0006_auto_20170201_1554.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-02-01 18:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tratamientos', '0005_sesion_profesional'),
]
operations = [
migrations.AlterField(
model_name='sesion',
name='profesional',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sesiones', to='core.Profesional'),
),
]
| 26 | 129 | 0.662587 | 64 | 572 | 5.765625 | 0.671875 | 0.065041 | 0.075881 | 0.119241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044643 | 0.216783 | 572 | 21 | 130 | 27.238095 | 0.779018 | 0.117133 | 0 | 0 | 1 | 0 | 0.151394 | 0.045817 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff4a945a55cd1faab9edb57d2954c239971d2727 | 3,749 | py | Python | src/alembic/versions/89944f8b35b3_db_games_model_migrations.py | green2cerberuz/db_vgames | d6d71f6b40c7e0c28c307bc314f46675ac0bf5e9 | [
"MIT"
] | null | null | null | src/alembic/versions/89944f8b35b3_db_games_model_migrations.py | green2cerberuz/db_vgames | d6d71f6b40c7e0c28c307bc314f46675ac0bf5e9 | [
"MIT"
] | null | null | null | src/alembic/versions/89944f8b35b3_db_games_model_migrations.py | green2cerberuz/db_vgames | d6d71f6b40c7e0c28c307bc314f46675ac0bf5e9 | [
"MIT"
] | null | null | null | """DB Games model migrations
Revision ID: 89944f8b35b3
Revises:
Create Date: 2020-11-14 03:49:03.255055
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "89944f8b35b3"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"company",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("creation_year", sa.DateTime(), nullable=True),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("logo", sa.String(length=500), nullable=True),
sa.Column("is_publisher", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"franchise",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("title", sa.String(length=250), nullable=False),
sa.Column("first_release", sa.DateTime(), nullable=True),
sa.Column("description", sa.String(length=250), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"console",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("release_year", sa.DateTime(), nullable=False),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("cover", sa.String(length=500), nullable=True),
sa.Column("motto", sa.String(length=100), nullable=False),
sa.Column("company_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["company_id"], ["company.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"game",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("publication_year", sa.DateTime(), nullable=True),
sa.Column("score", sa.Integer(), nullable=True),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("cover", sa.String(length=500), nullable=True),
sa.Column("franchise_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["franchise_id"], ["franchise.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"franchiseassociation",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("franchise_id", sa.Integer(), nullable=True),
sa.Column("game_id", sa.Integer(), nullable=True),
sa.Column("console_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["console_id"], ["console.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["franchise_id"], ["franchise.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["game_id"], ["game.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"game_console_table",
sa.Column("game_id", sa.Integer(), nullable=True),
sa.Column("console_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["console_id"],
["console.id"],
),
sa.ForeignKeyConstraint(
["game_id"],
["game.id"],
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("game_console_table")
op.drop_table("franchiseassociation")
op.drop_table("game")
op.drop_table("console")
op.drop_table("franchise")
op.drop_table("company")
# ### end Alembic commands ###
| 37.868687 | 88 | 0.623099 | 430 | 3,749 | 5.344186 | 0.181395 | 0.104439 | 0.115753 | 0.113142 | 0.724543 | 0.71671 | 0.684943 | 0.648825 | 0.535248 | 0.404265 | 0 | 0.024791 | 0.203788 | 3,749 | 98 | 89 | 38.255102 | 0.745059 | 0.078688 | 0 | 0.45 | 0 | 0 | 0.161545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.025 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff4bc07be12dc2426e91dab41e8aaf03e6096812 | 468 | py | Python | way/python/first_steps/basics/testgui.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | way/python/first_steps/basics/testgui.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | way/python/first_steps/basics/testgui.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | from tkinter import *
clicks = 0
def click_button():
global clicks
clicks += 1
buttonText.set("Clicks {}".format(clicks))
root = Tk()
root.title("GUI на Python")
root.geometry("300x250")
buttonText = StringVar()
buttonText.set("Clicks {}".format(clicks))
btn = Button(textvariable=buttonText, background="#555", foreground="#ccc",
padx="20", pady="8", font="16", command=click_button)
btn.pack()
root.mainloop()
| 19.5 | 76 | 0.632479 | 55 | 468 | 5.345455 | 0.672727 | 0.07483 | 0.129252 | 0.170068 | 0.210884 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042895 | 0.202991 | 468 | 23 | 77 | 20.347826 | 0.745308 | 0 | 0 | 0.133333 | 0 | 0 | 0.114607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff4eece822bfb55c7cca08af69a260d962fcf85b | 406 | py | Python | string/Python/0067-add-binary-1.py | ljyljy/LeetCode-Solution-in-Good-Style | 0998211d21796868061eb22e2cbb9bcd112cedce | [
"Apache-2.0"
] | 1 | 2021-01-10T17:03:21.000Z | 2021-01-10T17:03:21.000Z | string/Python/0067-add-binary-1.py | lemonnader/LeetCode-Solution-Well-Formed | baabdb1990fd49ab82a712e121f49c4f68b29459 | [
"Apache-2.0"
] | null | null | null | string/Python/0067-add-binary-1.py | lemonnader/LeetCode-Solution-Well-Formed | baabdb1990fd49ab82a712e121f49c4f68b29459 | [
"Apache-2.0"
] | 1 | 2021-07-25T07:53:14.000Z | 2021-07-25T07:53:14.000Z | class Solution:
def addBinary(self, a: str, b: str) -> str:
return bin(int(a, 2) + int(b, 2))[2:]
if __name__ == '__main__':
a = "11"
b = "1"
solution = Solution()
result = solution.addBinary(a, b)
print(result)
result1 = int(a, 2)
result2 = int(b, 2)
print(result1)
print(result2)
print(bin(result1 + result2))
print(bin(result1 + result2)[2:])
| 20.3 | 47 | 0.561576 | 56 | 406 | 3.928571 | 0.375 | 0.036364 | 0.045455 | 0.2 | 0.231818 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057432 | 0.270936 | 406 | 19 | 48 | 21.368421 | 0.685811 | 0 | 0 | 0 | 0 | 0 | 0.027094 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0.066667 | 0.2 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff51372a814f4b42e447cbc94944724fd9e86cc6 | 3,017 | py | Python | envergo/evaluations/migrations/0007_auto_20210816_1212.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | null | null | null | envergo/evaluations/migrations/0007_auto_20210816_1212.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | 6 | 2021-07-12T14:33:18.000Z | 2022-02-14T10:36:09.000Z | envergo/evaluations/migrations/0007_auto_20210816_1212.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | null | null | null | # Generated by Django 3.1.12 on 2021-08-16 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('evaluations', '0006_auto_20210709_1235'),
]
operations = [
migrations.AddField(
model_name='evaluation',
name='commune',
field=models.CharField(default='', help_text='The name and postcode of the project commune', max_length=256, verbose_name='Commune'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='created_surface',
field=models.IntegerField(default=0, help_text='In square meters', verbose_name='Created surface'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='existing_surface',
field=models.IntegerField(blank=True, help_text='In square meters', null=True, verbose_name='Existing surface'),
),
migrations.AddField(
model_name='evaluation',
name='flood_zone_impact',
field=models.TextField(default='', verbose_name='Flood zone impact'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='flood_zone_probability',
field=models.IntegerField(choices=[(1, 'Unlikely'), (2, 'Possible'), (3, 'Likely'), (4, 'Very likely')], default=2, verbose_name='Flood zone probability'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='global_probability',
field=models.IntegerField(choices=[(1, 'Unlikely'), (2, 'Possible'), (3, 'Likely'), (4, 'Very likely')], default=2, verbose_name='Probability'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='rainwater_runoff_impact',
field=models.TextField(default='', verbose_name='Rainwater runoff impact'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='rainwater_runoff_probability',
field=models.IntegerField(choices=[(1, 'Unlikely'), (2, 'Possible'), (3, 'Likely'), (4, 'Very likely')], default=2, verbose_name='Rainwater runoff probability'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='wetland_impact',
field=models.TextField(default='', verbose_name='Wetland impact'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='wetland_probability',
field=models.IntegerField(choices=[(1, 'Unlikely'), (2, 'Possible'), (3, 'Likely'), (4, 'Very likely')], default=2, verbose_name='Wetland probability'),
preserve_default=False,
),
]
| 41.328767 | 173 | 0.598608 | 292 | 3,017 | 6.017123 | 0.246575 | 0.102447 | 0.130905 | 0.153671 | 0.707456 | 0.682413 | 0.659078 | 0.58395 | 0.55037 | 0.480933 | 0 | 0.025431 | 0.270136 | 3,017 | 72 | 174 | 41.902778 | 0.77248 | 0.015247 | 0 | 0.590909 | 1 | 0 | 0.233412 | 0.032334 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.015152 | 0 | 0.060606 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff5d7939ba961dd0adaed7bb04d2952b658dec7e | 33,088 | py | Python | lib/devops/supervisor.py | jpotter/angel | 66e98f73621a735c7e5a7f3a70926ad99dd39f3e | [
"Apache-2.0"
] | null | null | null | lib/devops/supervisor.py | jpotter/angel | 66e98f73621a735c7e5a7f3a70926ad99dd39f3e | [
"Apache-2.0"
] | null | null | null | lib/devops/supervisor.py | jpotter/angel | 66e98f73621a735c7e5a7f3a70926ad99dd39f3e | [
"Apache-2.0"
] | null | null | null | import fcntl
import os
import random
import select
import signal
import sys
import time
import traceback
from devops.file_and_dir_helpers import *
from angel.util.pidfile import *
from devops.unix_helpers import set_proc_title
from angel.stats.disk_stats import disk_stats_get_usage_for_path
from devops.process_helpers import *
import angel.settings
# This function is similar to Python's subprocess module, with some tweaks and customizations.
# Like subprocess, it forks a child process, waits for it to exit, and re-starts it on exit. It never returns.
# Our supervisor handles shutdown conditions, calling a stop_func when the supervisor process receives SIGTERM.
# We also handle log rotation, rolling over stdout/stderr when the supervisor process receives SIGWINCH.
# Most other signals are propogated to the child process -- that is, sending the supervisor process SIGHUP will
# be passed through to the child process.
def supervisor_manage_process(config, name, pid_filename_for_daemon, run_as_user, run_as_group, log_basepath,
restart_daemon_on_exit, process_oom_adjustment, init_func, exec_func, stop_func):
''' Creates and manages a child process, running given functions.
- If init_func is defined, it is called in the child process first. If it returns a non-zero status, then supervisor will exit.
- exec_func is then called. If restart_daemon_on_exit is True, exec_func is restarted whenever it exits.
- If stop_func is defined, it is called when this managing process receives a SIGTERM.
- pid_filename_for_daemon is used by this manager process to update status info and track that the manager should be running.
- process_oom_adjustment is a value, typically between -15 and 0, that indicates to the Linux kernel how "important" the process is.
This function never returns.
'''
# Create supervisor logger:
supervisor_logfile_path = launcher_get_logpath(config, log_basepath, 'supervisor')
if 0 != create_dirs_if_needed(os.path.dirname(supervisor_logfile_path), owner_user=run_as_user, owner_group=run_as_group):
print >>sys.stderr, "Supervisor error: unable to create log dirs."
os._exit(0) # Never return
try:
supervisor_logger = SupervisorLogger(open(supervisor_logfile_path, 'a', buffering=0))
except Exception as e:
print >>sys.stderr, "Supervisor error: unable to create supervisor log (%s: %s)." % (supervisor_logfile_path, e)
os._exit(0) # Never return
# Send SIGTERM to the supervisor daemon to tell it to quit the child process and exit.
# Send SIGWINCH to the supervisor daemon to tell it to rotate logs.
# Any other trappable_signal is sent to the child process to do any service-defined logic as necessary.
trappable_signals = (signal.SIGINT, signal.SIGWINCH, signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2, signal.SIGQUIT)
global supervisor_daemon_exit_requested
supervisor_daemon_exit_requested = False
global run_init_instead_of_exec
run_init_instead_of_exec = False
set_proc_title('supervisor[%s]: starting' % name)
# Always run supervisor with kernel out-of-memory flags set to hold off on killing us.
# This is reset back up to 0 in the child process (or whatever process_oom_adjustment is set to).
set_process_oom_factor(-15)
supervisor_pid = os.getpid()
child_pid = None
daemon_start_time = int(time.time())
last_start_time = None
start_count = 0
continous_restarts = 0
min_delay_between_continous_restarts = 5
max_delay_between_continous_restarts = 30
restart_delay_jitter = 60 # If we hit max_delay, we'll re-try at some interval between (max_delay - jitter) and (max_delay)
# Define a function that waits for a child pid to exit OR for us to receive a signal:
def _supervisor_daemon_waitpid(pid):
if pid is None or pid < 2:
supervisor_logger.warn("Supervisor[%s]: can't wait on invalid pid %s." % (name, pid))
return -1
try:
# To-do: periodically wake up and check that pid_filename_for_daemon contains our pid, or exit
(wait_pid, wait_exitcode) = os.waitpid(pid, 0)
return (wait_exitcode >> 8) % 256
except OSError:
return -2 # waitpid will throw an OSError when our supervisor recieves a kill signal (i.e. SIGTERM to tell us to exit); our code below will loop and re-call this.
return -3
# Define a function that receives a signal and passes it through to our child process:
def _supervisor_daemon_signal_passthru(signum, frame):
if child_pid is None or child_pid < 2:
# This can happen if the supervised child was *just* killed, or isn't running yet (during a re-spawn).
supervisor_logger.warn("Supervisor: invalid pid %s found during kill -%s of process %s" % (child_pid, signum, name))
return
try:
supervisor_logger.info("_supervisor_daemon_signal_passthru: kill -%s %s" % (signum, child_pid))
os.kill(child_pid, signum)
except Exception as e:
supervisor_logger.error("Supervisor %s[%s/%s managing %s]: unable to send signal %s to pid %s: %s" % (name, supervisor_pid, os.getpid(), child_pid, signum, child_pid, e))
# Define a function that receives a signal and rotates logs:
def _supervisor_daemon_rotate_logs(signum, frame):
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: rotate logs not implemented yet; log_basepath=%s" % (name, supervisor_pid, os.getpid(), child_pid, log_basepath))
# Define a function that receives a signal and cleanly shuts down the server:
def _supervisor_daemon_quit(signum, frame):
# Flag that quit has been requested:
global supervisor_daemon_exit_requested
supervisor_daemon_exit_requested = True
if child_pid is None or child_pid < 2:
# This can happen if the supervised child was *just* killed, or isn't running yet (during a re-spawn).
supervisor_logger.warn("Supervisor: invalid pid %s found during kill -%s of process %s" % (child_pid, signum, name))
return
# Check if we're still in an init phase (can't call stop_func on something that hasn't actually started):
global run_init_instead_of_exec
if run_init_instead_of_exec:
# if we're currently invoking a custom init function, then we need to send the supervisor process the kill signal directly so it exits
return _supervisor_daemon_signal_passthru(signum, frame)
# Run stop function if given, otherwise pass along given kill signal to child process:
if stop_func is not None:
try:
import threading
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: quit request received (sig %s in thread %s); calling stop function" % (name, supervisor_pid, os.getpid(), child_pid, signum, threading.currentThread().name))
ret_val = stop_func(child_pid)
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: quit request received (sig %s in thread %s); stop function done (%s)" % (name, supervisor_pid, os.getpid(), child_pid, signum, threading.currentThread().name, ret_val))
return
except Exception:
supervisor_logger.error("Supervisor %s[%s/%s managing %s]: error in stop function: %s" % (name, supervisor_pid, os.getpid(), child_pid, traceback.format_exc(sys.exc_info()[2])))
else:
supervisor_logger.warn("Supervisor %s[%s/%s managing %s]: no stop function given" % (name, supervisor_pid, os.getpid(), child_pid))
return _supervisor_daemon_signal_passthru(signum, frame)
def _install_signal_functions():
signal.signal(signal.SIGWINCH, _supervisor_daemon_rotate_logs)
signal.signal(signal.SIGTERM, _supervisor_daemon_quit)
for sig in trappable_signals:
if sig not in (signal.SIGWINCH, signal.SIGTERM):
signal.signal(sig, _supervisor_daemon_signal_passthru)
def _remove_signal_functions():
for sig in trappable_signals:
signal.signal(sig, signal.SIG_DFL)
def _sleep_without_signal_functions(duration):
# Because there are cases where *we* need to be interrupted:
_remove_signal_functions()
time.sleep(duration)
_install_signal_functions()
# Install signal functions:
_install_signal_functions()
# chdir() to /, to avoid potentially holding a mountpoint open:
os.chdir('/')
# Reset umask:
os.umask(022)
# Redirect STDOUT/STDERR:
# (Redirects run as separate threads in our supervisor process -- don't move these to the child process; os.exec will wipe them out.)
os.setsid()
stdout_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, ''), run_as_user=run_as_user, run_as_group=run_as_group)
stderr_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, 'error'), run_as_user=run_as_user, run_as_group=run_as_group)
supervisor_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, 'supervisor'), run_as_user=run_as_user, run_as_group=run_as_group)
stdout_redirector.startRedirectThread(sys.stdout)
stderr_redirector.startRedirectThread(sys.stderr)
supervisor_redirector.startRedirectThread(supervisor_logger.logger_fd)
# Close STDIN:
sys.stdin.close()
os.close(0)
new_stdin = open(os.devnull, 'r', 0) # So FD 0 isn't available
#new_stdin = open(os.devnull, 'r', 0)
#try:
# os.dup2(new_stdin.fileno(), sys.stdin.fileno())
#except ValueError:
# print >>sys.stderr, "Can't set up STDIN, was it closed on us?"
# Loop until shutdown requested, handling signals and logs and making sure that our server remains running:
while not supervisor_daemon_exit_requested:
if not is_pid_in_pidfile_our_pid(pid_filename_for_daemon):
supervisor_logger.warn("Supervisor[%s/%s]: Warning: invalid pid %s in lock file %s. Re-checking..." % (supervisor_pid, os.getpid(), get_pid_from_pidfile(pid_filename_for_daemon), pid_filename_for_daemon))
try:
time.sleep(0.5)
except:
pass
if not is_pid_in_pidfile_our_pid(pid_filename_for_daemon):
supervisor_logger.error("Supervisor[%s/%s]: FATAL: invalid pid %s in lock file %s. Exiting now." % (supervisor_pid, os.getpid(), get_pid_from_pidfile(pid_filename_for_daemon), pid_filename_for_daemon))
sys.stdout.flush()
sys.stderr.flush()
time.sleep(0.5) # Need to sleep so that logger threads can write out above stderr message. Gross, but it works.
os._exit(1)
lockfile_pid = get_pid_from_pidfile(pid_filename_for_daemon)
if lockfile_pid is None or supervisor_pid != lockfile_pid:
supervisor_logger.error("Supervisor[%s/%s]: FATAL: lock file %s not owned by current process! (pid is %s) Exiting now." % (supervisor_pid, os.getpid(), pid_filename_for_daemon, lockfile_pid))
os._exit(1)
one_time_run = False
run_init_instead_of_exec = False
if start_count == 0 and init_func is not None:
run_init_instead_of_exec = True
if not restart_daemon_on_exit:
# This is a clever trick: we might want to run a command in the background one-time (i.e. priming a service).
# By passing restart_daemon_on_exit as false from way up above us in the callstack,
# we can use our run logic inside the supervisor process and let it exit cleanly.
# This works by reading one_time_run after we've started and flipping supervisor_daemon_exit_requested to True.
one_time_run = True
try:
log_disk_stats = disk_stats_get_usage_for_path(config['LOG_DIR'])
data_disk_stats = disk_stats_get_usage_for_path(config['DATA_DIR'])
run_disk_stats = disk_stats_get_usage_for_path(config['RUN_DIR'])
if log_disk_stats is not None and data_disk_stats is not None and run_disk_stats is not None:
# Only do this check when we can get stats -- otherwise it's possible to rm -rf log_dir and then have the service die.
if log_disk_stats['free_mb'] < 100 or data_disk_stats['free_mb'] < 100 or run_disk_stats['free_mb'] < 100:
supervisor_logger.error("Supervisor[%s/%s]: insufficent disk space to run %s." % (supervisor_pid, os.getpid(), name))
try:
_sleep_without_signal_functions(10)
except:
supervisor_daemon_exit_requested = True
continue
except Exception as e:
supervisor_logger.error("Supervisor[%s/%s]: disk check failed: %s" % (supervisor_pid, os.getpid(), e))
if child_pid is None and not supervisor_daemon_exit_requested:
if one_time_run:
supervisor_daemon_exit_requested = True
# Then we need to fork and start child process:
try:
sys.stdout.flush() # If we have a ' print "Foo", ' statement (e.g. with trailing comma), the forked process ends up with a copy of it, too.
sys.stderr.flush()
child_pid = os.fork()
if child_pid:
# Parent process:
supervisor_logger.info("Supervisor[%s/%s]: managing process %s running as pid %s" % (supervisor_pid, os.getpid(), name, child_pid))
set_proc_title('supervisor: managing %s[%s]' % (name, child_pid))
prior_child_start_time = last_start_time
last_start_time = time.time()
start_count += 1
if 0 != update_pidfile_data(pid_filename_for_daemon, { \
angel.constants.LOCKFILE_DATA_DAEMON_START_TIME: daemon_start_time, \
angel.constants.LOCKFILE_DATA_PRIOR_CHILD_START_TIME: prior_child_start_time, \
angel.constants.LOCKFILE_DATA_CHILD_START_TIME: int(time.time()), \
angel.constants.LOCKFILE_DATA_CHILD_PID: child_pid, \
angel.constants.LOCKFILE_DATA_START_COUNT: start_count, \
} ):
supervisor_logger.error("Supervisor[%s/%s]: error updating pidfile data in pidfile %s" % (supervisor_pid, os.getpid(), pid_filename_for_daemon))
else:
# Child process:
supervisor_logger.info("Supervisor[%s/%s]: running %s" % (supervisor_pid, os.getpid(), name))
set_proc_title('supervisor: starting %s' % name)
# Set our process_oom_adjustment, as the parent process ALWAYS has it set to a very low value to avoid the supervisor from being killed:
set_process_oom_factor(process_oom_adjustment)
# Drop root privileges (has to be done after oom adjustment):
if 0 != process_drop_root_permissions(run_as_user, run_as_group):
supervisor_logger.error("Supervisor[%s/%s]: error setting user/group to %s/%s in child process." % (supervisor_pid, os.getpid(), run_as_user, run_as_group))
os._exit(1)
# We need to reset the signal handlers so as to NOT trap any signals because exec_func and init_func will have python code that runs within our current process.
# We have to unset this in the child process; if we set it in the "parent" branch of the if statement, then we'd be missing them on the next loop.
_remove_signal_functions()
# If there's an init function, run it instead:
if run_init_instead_of_exec:
set_proc_title('%s worker init' % name)
supervisor_logger.info("Supervisor[%s/%s]: starting init for %s" % (supervisor_pid, os.getpid(), name))
init_okay = True
ret_val = None
try:
ret_val = init_func()
except Exception as e:
supervisor_logger.error("Error in init function: %s; bailing." % e)
init_okay = False
if type(ret_val) is not int:
supervisor_logger.warn("Warning: init_func for %s returned non-int; please return 0 on success; non-zero otherwise; or throw an exception." % (name, ret_val))
else:
if ret_val != 0:
init_okay = False
if not init_okay:
supervisor_logger.error("Supervisor[%s/%s]: FATAL: init failed for %s" % (supervisor_pid, os.getpid(), name))
os.kill(supervisor_pid, signal.SIGTERM)
else:
supervisor_logger.info("Supervisor[%s/%s]: init finished for %s" % (supervisor_pid, os.getpid(), name))
os._exit(ret_val) # Exit child process; supervisor will pick
# Run the exec function:
set_proc_title('%s worker' % name)
try:
exec_func() # This should be a function that calls os.exec and replaces our current process
except Exception as e:
supervisor_logger.error("Error in exec function: %s" % e)
supervisor_logger.error("MAJOR ERROR: Supervisor[%s/%s]: function for %s unexepectedly returned." % (supervisor_pid, os.getpid(), name))
os._exit(2)
except Exception as e:
supervisor_logger.error("Supervisor[%s/%s]: child process failed (%s)." % (supervisor_pid, os.getpid(), e))
try:
_sleep_without_signal_functions(10) # Sleep in child to prevent parent from rapidly re-spawning
except:
pass
continue
if child_pid is None:
supervisor_logger.error("Supervisor[%s/%s]: child process setup failed (supervisor_daemon_exit_requested: %s)." % (supervisor_pid, os.getpid(), supervisor_daemon_exit_requested))
try:
_sleep_without_signal_functions(10) # Sleep in child to prevent parent from rapidly re-spawning
except:
supervisor_daemon_exit_requested = True
continue
# The parent process needs to wait for the child process to exit:
wait_exitcode = _supervisor_daemon_waitpid(child_pid)
set_proc_title('supervisor: managing %s[%s exited %s]' % (name, child_pid, wait_exitcode))
if run_init_instead_of_exec:
supervisor_logger.info("Supervisor[%s/%s]: init function finished." % (supervisor_pid, os.getpid()))
child_pid = None
continue
if supervisor_daemon_exit_requested:
set_proc_title('supervisor: managing %s[%s exited %s for exit]' % (name, child_pid, wait_exitcode))
if one_time_run:
supervisor_logger.info('Supervisor[%s/%s]: %s[%s] exited (exit code %s) for one-time run.' % (supervisor_pid, os.getpid(), name, child_pid, wait_exitcode))
else:
supervisor_logger.info('Supervisor[%s/%s]: %s[%s] exited (exit code %s) for shutdown.' % (supervisor_pid, os.getpid(), name, child_pid, wait_exitcode))
break
# The wait-for-child logic above may have returned early due to a signal that we received and passed off to child or otherwise handled.
# Only reset stuff for a restart if the child process actually exited (i.e. waitpid() returned because the child exited, not because the parent received a signal):
if not is_pid_running(child_pid):
set_proc_title('supervisor: restarting %s' % (name))
this_run_duration = time.time() - last_start_time
# Re-try service starts no faster than some minimum interval, backing off to some maximum interval so lengthy outages don't trigger a sudden spike
delay_until_next_restart = 0
if continous_restarts > 0:
delay_until_next_restart = min_delay_between_continous_restarts + (continous_restarts - 1) * 10 - this_run_duration + 2*random.random()
if delay_until_next_restart < min_delay_between_continous_restarts:
delay_until_next_restart = min_delay_between_continous_restarts + 2*random.random()
if delay_until_next_restart > max_delay_between_continous_restarts:
delay_until_next_restart = max_delay_between_continous_restarts - random.random() * restart_delay_jitter
supervisor_logger.error('Supervisor[%s/%s]: %s[%s] unexpected exit (exit code %s) after %s seconds on run number %s, waiting %s seconds before restarting.' %
(supervisor_pid, os.getpid(), name, child_pid, wait_exitcode, this_run_duration, start_count, delay_until_next_restart))
supervisor_logger.error('Supervisor[%s/%s]: more info: run_init_instead_of_exec: %s; restart_daemon_on_exit: %s' %
(supervisor_pid, os.getpid(), run_init_instead_of_exec, restart_daemon_on_exit))
child_pid = None
if this_run_duration < max_delay_between_continous_restarts:
continous_restarts += 1
try:
time_left = delay_until_next_restart
while time_left > 0:
# spit out a log every few seconds so we can see what's going on in the logs -- otherwise it looks wedged:
supervisor_logger.error('Supervisor[%s/%s]: %s[%s] waiting %s seconds.' % (supervisor_pid, os.getpid(), name, child_pid, int(time_left)))
sleep_time = 5
if sleep_time > time_left:
sleep_time = time_left
_sleep_without_signal_functions(sleep_time)
time_left -= sleep_time
except Exception as e:
supervisor_logger.error('Supervisor[%s/%s]: %s had exception while waiting; bailing (%s).' % (supervisor_pid, os.getpid(), name, e))
supervisor_daemon_exit_requested = True
else:
continous_restarts = 0
# We'll only exit above loop when supervisor_daemon_exit_requested is true.
# We keep running until the child process exits, otherwise there's no way
# for the outside world to send further signals to the process.
while is_pid_running(child_pid):
try:
# While we can still send signals to the supervisor process, wait on it
set_proc_title('supervisor: waiting for exit %s[%s]' % (name, child_pid))
supervisor_logger.info("Supervisor[%s/%s]: waiting for exit %s[%s]" % (supervisor_pid, os.getpid(), name, child_pid))
_supervisor_daemon_waitpid(child_pid)
except OSError:
pass
set_proc_title('supervisor: finished monitoring %s[%s]; closing logfiles' % (name, child_pid))
supervisor_logger.info("Supervisor[%s/%s] finished monitoring %s[%s]; exiting" % (supervisor_pid, os.getpid(), name, child_pid))
if os.path.isfile(pid_filename_for_daemon):
# The pid file really should exist, but if it doesn't, there's not a lot we can do anyway, and logging it wi
os.remove(pid_filename_for_daemon)
else:
supervisor_logger.warn("Supervisor[%s/%s]: no lockfile at %s to remove, oh well." % (supervisor_pid, os.getpid(), pid_filename_for_daemon))
# Stop logging threads:
stdout_redirector.stopRedirectThread()
stderr_redirector.stopRedirectThread()
supervisor_redirector.stopRedirectThread()
# Do not return from this function -- and use os._exit instead of sys.exit to nuke any stray threads:
os._exit(0)
# For use by supervisor only -- please consider this 'private' to supervisor.
class SupervisorLogger():
# Yes, re-inventing the wheel here. Trying to keep the external dependencies down to a minimum.
logger_fd = None
def __init__(self, logger_fd):
self.logger_fd = logger_fd
def info(self, message):
self.log('info', message)
def warn(self, message):
self.log('warn', message)
def error(self, message):
self.log('error', message)
def log(self, level, message):
self.logger_fd.write("%s, %s, %s\n" % (time.time(), level, message))
self.logger_fd.flush()
# For use by supervisor only -- please consider this 'private' to supervisor.
from threading import Thread
class SupervisorStreamRedirector(Thread):
supervisor_logger = None
log_data_source = None
stop_event = None
run_as_user = None
run_as_group = None
logfile_inode = None
logfile_dir = None
logfile_path = None
logfile_fd = None
def __init__(self, supervisor_logger, logfile_path, run_as_user=None, run_as_group=None):
Thread.__init__(self)
self.supervisor_logger = supervisor_logger
self.logfile_path = logfile_path
self.logfile_dir = os.path.dirname(self.logfile_path)
self.run_as_user = run_as_user
self.run_as_group = run_as_group
self._create_logdir()
def startRedirectThread(self, data_stream):
if self.stop_event:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: redirect already started?")
return -4
self.stop_event = threading.Event()
try:
reader, writer = os.pipe()
self.log_data_source = os.fdopen(reader, 'rb', 0)
original_output_dest = os.fdopen(writer, 'wb', 0)
# Flip on non-blocking, otherwise calls to select.select() will block:
flags = fcntl.fcntl(original_output_dest, fcntl.F_GETFL)
fcntl.fcntl(original_output_dest, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(self.log_data_source, fcntl.F_GETFL)
fcntl.fcntl(self.log_data_source, fcntl.F_SETFL, flags | os.O_NONBLOCK)
data_stream.flush()
os.dup2(original_output_dest.fileno(), data_stream.fileno())
except Exception as e:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: error setting up file streams for redirect: %s" % e)
return -5
try:
self.start()
except Exception as e:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: error starting redirect thread: %s" % e)
return -6
return 0
def stopRedirectThread(self):
if self.stop_event:
self.stop_event.set()
else:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: stop_logger not running? (%s)" % self.stop_event)
def _filter_lines(self, lines):
''' Given an array of lines, return a filtered / altered string as desired. '''
# The intent here is to someday pass an object in that implements the filter, so that
# sensative strings can be filtered out of the log files before getting written to disk
# and then sent across the wire via logstash or what have you.
# For now, we do a no-op.
if len(lines) == 0:
return ''
return '\n'.join(lines) + '\n'
# Here's an example that would timestamp every line:
#if len(lines) == 0:
# return ''
#line_beginning = '%11.1f ' % (time.time())
#line_ending = '\n'
#return line_beginning + (line_ending + line_beginning).join(lines) + line_ending
def _create_logdir(self):
if 0 != create_dirs_if_needed(self.logfile_dir, owner_user=self.run_as_user, owner_group=self.run_as_group):
self.supervisor_logger.error("SupervisorStreamRedirector[%s]: unable to create logdir %s" % (os.getpid(), self.logfile_path))
return -7
return 0
def _reset_logfile(self):
if 0 != self._create_logdir():
return -8
try:
if os.path.exists(self.logfile_path):
if os.path.islink(self.logfile_path) or not os.path.isfile(self.logfile_path):
self.supervisor_logger.error("SupervisorStreamRedirector: invalid file at logfile path %s" % self.logfile_path)
return -9
new_fh = open(self.logfile_path, 'a')
if self.logfile_fd is not None:
self.logfile_fd.close()
self.logfile_fd = new_fh
self.logfile_inode = os.stat(self.logfile_path).st_ino
self.supervisor_logger.info("SupervisorStreamRedirector[%s]: writing to logfile %s" % (os.getpid(), self.logfile_path))
except Exception as e:
self.supervisor_logger.error("SupervisorStreamRedirector[%s]: unable to open logfile %s: %s" % (os.getpid(), self.logfile_path, e))
return -10
return 0
def run(self):
okay_to_run = True
last_read_size = 0
last_remainder = ''
while okay_to_run or last_read_size > 0:
if self.stop_event.is_set():
self.supervisor_logger.info("SupervisorStreamRedirector[%s]: stopping logger at %s" % (os.getpid(), self.logfile_path))
okay_to_run = False
self.stop_event.clear()
try:
# Don't use readline() -- it blocks, and there's no way for the main thread
# to tell the logger thread to exit while the i/o call is blocked. Sigh.
[rlist, wlist, xlist] = select.select([self.log_data_source], [], [], 0.25)
if not os.path.exists(self.logfile_dir):
# Re-create the logdir if it goes missing -- do this check every pass through,
# so that if logdir gets completely reset we instantly recreate the path for other
# processes which might also depend on it.
self._create_logdir()
if not rlist:
last_read_size = 0
else:
data = self.log_data_source.read(1024)
last_read_size = len(data)
# We split the data into lines so that we can filter sensative stings out, and potentially do some line-based formatting.
# Because we're not using readline (due to blocking reasons), we have to split the data into lines, and carry over the remainder
# of the last line (if it's mid-line) to the next pass through the loop.
lines = data.split('\n')
if data.endswith('\n'):
lines = lines[:-1]
if len(last_remainder):
lines[0] = last_remainder + lines[0]
last_remainder = ''
if not data.endswith('\n'):
last_remainder = lines[-1]
lines = lines[:-1]
try:
current_inode = os.stat(self.logfile_path).st_ino
if self.logfile_inode != current_inode:
self._reset_logfile()
except:
self._reset_logfile()
if self.logfile_fd is not None:
self.logfile_fd.write(self._filter_lines(lines))
if not okay_to_run and len(last_remainder):
# Then it's our last loop through -- purge out the remainder:
self.logfile_fd.write(self._filter_lines(last_remainder,))
last_remainder = ''
self.logfile_fd.flush()
except Exception as e:
self.supervisor_logger.error("SupervisorStreamRedirector: error in log thread: %s" % e)
self.supervisor_logger.info("SupervisorStreamRedirector stopping; closing %s." % self.logfile_path)
self.logfile_fd.flush()
self.logfile_fd.close()
self.stop_event = None
| 53.977162 | 242 | 0.634913 | 4,304 | 33,088 | 4.663104 | 0.141961 | 0.050224 | 0.023916 | 0.033483 | 0.423767 | 0.36283 | 0.293523 | 0.230394 | 0.179322 | 0.118336 | 0 | 0.004898 | 0.284272 | 33,088 | 612 | 243 | 54.065359 | 0.842581 | 0.214882 | 0 | 0.322892 | 0 | 0.024096 | 0.14015 | 0.017115 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.019277 | 0.038554 | null | null | 0.004819 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff65c0e6971ce7212c2e0454417283a2f36e752e | 1,886 | py | Python | Eval/model.py | gabbar-08/Coupon-Redemption-Prediction | 98a65f1ca2c14bf9d6430aeebcb1594b20211bcf | [
"MIT"
] | null | null | null | Eval/model.py | gabbar-08/Coupon-Redemption-Prediction | 98a65f1ca2c14bf9d6430aeebcb1594b20211bcf | [
"MIT"
] | null | null | null | Eval/model.py | gabbar-08/Coupon-Redemption-Prediction | 98a65f1ca2c14bf9d6430aeebcb1594b20211bcf | [
"MIT"
] | null | null | null |
#Step 1 :- Importing dependancies and train test data generated
from config import *
train_data = pd.read_csv("data/train_data/train_feature.csv")
test_data = pd.read_csv("data/test_data/test_feature.csv")
#Step 2 :- Getting train data insights and drop unnecessary columns, Splitting data into input and target variable sets.
print(list(train_data['redemption_status']).count(0) * 100 / len(train_data['redemption_status']), "% coupons not redeemed in training data ")
X = train_data
X.dropna(inplace=True)
X.drop(["id","campaign_id","c_freq_category","c_rare_category","start_date","end_date","duration","age_range","overall_freq_category","overall_rare_category"], axis=1,inplace=True)
y = train_data['redemption_status']
X.drop('redemption_status',axis = 1, inplace = True)
#Step 3 :- Train-test Split for the model
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
#Step 4 :- Initiate model and fit transform
model = GaussianNB()
model.fit(X_train, y_train)
#Step 5 :- Predict on the test part of the split
y_pred = model.predict(X_test)
#Step 6 :- Save the model for the inference engine
filename = 'model/finalized_model_2.sav'
pickle.dump(model, open(filename, 'wb'))
#Step 7 :- Calculate Training data accuracy of the model
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
#Step 8 :- Use the model on test data to predict the target in test data
Y = test_data
Y.drop(["id","campaign_id","c_freq_category","c_rare_category","start_date","end_date","duration","age_range","overall_freq_category","overall_rare_category"], axis=1,inplace=True)
Y.dropna(inplace = True)
Predictions = model.predict(Y)
# Print results
print(list(Predictions).count(0) * 100 / len(Predictions) , "% Coupans not redeemed in Test Data" )
| 41.911111 | 181 | 0.742842 | 296 | 1,886 | 4.527027 | 0.35473 | 0.041791 | 0.042537 | 0.05597 | 0.21791 | 0.192537 | 0.192537 | 0.192537 | 0.192537 | 0.192537 | 0 | 0.015263 | 0.131495 | 1,886 | 44 | 182 | 42.863636 | 0.802808 | 0.265642 | 0 | 0 | 1 | 0 | 0.365139 | 0.13148 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff6d9a9eb43a919d390d165fa0fb6535151149e1 | 5,160 | py | Python | template/template/scripts/launcher.py | chrismear/renios | 8eed53054fb4361eecb09e3c0eb3e26fd76d0cf0 | [
"CC-BY-3.0"
] | 15 | 2015-02-01T12:31:41.000Z | 2021-08-07T01:17:10.000Z | template/template/scripts/launcher.py | chrismear/renios | 8eed53054fb4361eecb09e3c0eb3e26fd76d0cf0 | [
"CC-BY-3.0"
] | null | null | null | template/template/scripts/launcher.py | chrismear/renios | 8eed53054fb4361eecb09e3c0eb3e26fd76d0cf0 | [
"CC-BY-3.0"
] | 4 | 2015-01-23T00:00:14.000Z | 2019-12-02T15:15:28.000Z | #!/usr/bin/env python
#@PydevCodeAnalysisIgnore
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import warnings
# Functions to be customized by distributors. ################################
# Given the Ren'Py base directory (usually the directory containing
# this file), this is expected to return the path to the common directory.
def path_to_common(renpy_base):
return renpy_base + "/common"
# Given a directory holding a Ren'Py game, this is expected to return
# the path to a directory that will hold save files.
def path_to_saves(gamedir):
import renpy #@UnresolvedImport
if not renpy.config.save_directory:
return gamedir + "/saves"
# Search the path above Ren'Py for a directory named "Ren'Py Data".
# If it exists, then use that for our save directory.
path = renpy.config.renpy_base
while True:
if os.path.isdir(path + "/Ren'Py Data"):
return path + "/Ren'Py Data/" + renpy.config.save_directory
newpath = os.path.dirname(path)
if path == newpath:
break
path = newpath
# Otherwise, put the saves in a platform-specific location.
if renpy.android:
return gamedir + "/saves"
elif renpy.macintosh:
rv = "~/Library/RenPy/" + renpy.config.save_directory
return os.path.expanduser(rv)
elif renpy.windows:
if 'APPDATA' in os.environ:
return os.environ['APPDATA'] + "/RenPy/" + renpy.config.save_directory
else:
rv = "~/RenPy/" + renpy.config.save_directory
return os.path.expanduser(rv)
else:
rv = "~/.renpy/" + renpy.config.save_directory
return os.path.expanduser(rv)
# Returns the path to the Ren'Py base directory (containing common and
# the launcher, usually.)
def path_to_renpy_base():
renpy_base = os.path.dirname(sys.argv[0])
renpy_base = os.environ.get('RENPY_BASE', renpy_base)
renpy_base = os.path.abspath(renpy_base)
return renpy_base
##############################################################################
# The version of the Mac Launcher and py4renpy that we require.
macos_version = (6, 14, 0)
linux_version = (6, 14, 0)
# Doing the version check this way also doubles as an import of ast,
# which helps py2exe et al.
try:
import ast; ast
except:
raise
print "Ren'Py requires at least python 2.6."
sys.exit(0)
android = ("ANDROID_PRIVATE" in os.environ)
# Android requires us to add code to the main module, and to command some
# renderers.
if android:
__main__ = sys.modules["__main__"]
__main__.path_to_renpy_base = path_to_renpy_base
__main__.path_to_common = path_to_common
__main__.path_to_saves = path_to_saves
os.environ["RENPY_RENDERER"] = "gl"
os.environ["RENPY_GL_ENVIRON"] = "limited"
#print "Ren'iOS: forcing renderer settings"
#os.environ["RENPY_RENDERER"] = "gl"
#os.environ["RENPY_GL_ENVIRON"] = "shader_es"
def main():
renpy_base = path_to_renpy_base()
# Add paths.
if os.path.exists(renpy_base + "/module"):
sys.path.append(renpy_base + "/module")
sys.path.append(renpy_base)
# This is looked for by the mac launcher.
if os.path.exists(renpy_base + "/renpy.zip"):
sys.path.append(renpy_base + "/renpy.zip")
# Ignore warnings that happen.
warnings.simplefilter("ignore", DeprecationWarning)
# Start Ren'Py proper.
try:
import renpy.bootstrap
except ImportError:
print >>sys.stderr, "Could not import renpy.bootstrap. Please ensure you decompressed Ren'Py"
print >>sys.stderr, "correctly, preserving the directory structure."
raise
if android:
renpy.linux = False
renpy.android = True
renpy.bootstrap.bootstrap(renpy_base)
#import profile
#profile.run('main()')
#print "Test STDOUT"
#
#import trace
#tracer = trace.Trace(
# ignoredirs=[sys.prefix, sys.exec_prefix],
# trace=1)
#tracer.run('main()')
if __name__ == "__main__":
main()
| 31.463415 | 101 | 0.673256 | 706 | 5,160 | 4.798867 | 0.349858 | 0.055785 | 0.026564 | 0.042503 | 0.205431 | 0.154959 | 0.115702 | 0.115702 | 0.078217 | 0.078217 | 0 | 0.005678 | 0.214922 | 5,160 | 163 | 102 | 31.656442 | 0.830659 | 0.458333 | 0 | 0.180556 | 0 | 0 | 0.141118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.111111 | null | null | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff6fd947f9d7fe716c3704fa2463950dd4e84bb7 | 402 | py | Python | setup.py | leandrocorreasantos/rapidapi_criptobot_br | 4102fed414708dae101c1d809c2723f7e27f272a | [
"MIT"
] | null | null | null | setup.py | leandrocorreasantos/rapidapi_criptobot_br | 4102fed414708dae101c1d809c2723f7e27f272a | [
"MIT"
] | null | null | null | setup.py | leandrocorreasantos/rapidapi_criptobot_br | 4102fed414708dae101c1d809c2723f7e27f272a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
setup(
name='rapidapi-criptobot-br',
version='1.1.0',
packages=['api'],
description='class to access criptobot-br on rapidapi',
author='Leandro Correa dos Santos',
author_email='leandro.admo@gmail.com',
install_requires=['requests', 'json'],
keywords='cripto currency robot rapidapi api exchange technical analysis'
)
| 28.714286 | 77 | 0.706468 | 51 | 402 | 5.529412 | 0.823529 | 0.078014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008902 | 0.161692 | 402 | 13 | 78 | 30.923077 | 0.827893 | 0.049751 | 0 | 0 | 0 | 0 | 0.498688 | 0.112861 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff8d466cb78def77047b6716960d07d37e514d93 | 3,805 | py | Python | server02.py | timgates42/csdesign | dd63c304e1b16ecd65bea072f9360986993df845 | [
"MIT"
] | 116 | 2015-01-07T08:39:32.000Z | 2021-12-21T13:07:53.000Z | server02.py | afcarl/csdesign | dd63c304e1b16ecd65bea072f9360986993df845 | [
"MIT"
] | null | null | null | server02.py | afcarl/csdesign | dd63c304e1b16ecd65bea072f9360986993df845 | [
"MIT"
] | 28 | 2015-01-07T07:27:16.000Z | 2021-07-17T22:26:37.000Z | ###############################################################################
#
# Copyright (c) 2012 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
"""
TCP Concurrent Server, I/O Multiplexing (select).
Single server process to handle any number of clients.
"""
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
import os
import sys
import errno
import select
import socket
import optparse
BACKLOG = 5
def serve_forever(host, port):
# create, bind. listen
lstsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# re-use the port
lstsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# put listening socket into non-blocking mode
lstsock.setblocking(0)
lstsock.bind((host, port))
lstsock.listen(BACKLOG)
print 'Listening on port %d ...' % port
# read, write, exception lists with sockets to poll
rlist, wlist, elist = [lstsock], [], []
while True:
# block in select
readables, writables, exceptions = select.select(rlist, wlist, elist)
for sock in readables:
if sock is lstsock: # new client connection, we can accept now
try:
conn, client_address = lstsock.accept()
except IOError as e:
code, msg = e.args
if code == errno.EINTR:
continue
else:
raise
# add the new connection to the 'read' list to poll
# in the next loop cycle
rlist.append(conn)
else:
# read a line that tells us how many bytes to write
bytes = sock.recv(1024)
if not bytes: # connection closed by client
sock.close()
rlist.remove(sock)
else:
print ('Got request to send %s bytes. '
'Sending them all...' % bytes)
# send them all
# XXX: this is cheating, we should use 'select' and wlist
# to determine whether socket is ready to be written to
data = os.urandom(int(bytes))
sock.sendall(data)
def main():
parser = optparse.OptionParser()
parser.add_option(
'-i', '--host', dest='host', default='0.0.0.0',
help='Hostname or IP address. Default is 0.0.0.0'
)
parser.add_option(
'-p', '--port', dest='port', type='int', default=2000,
help='Port. Default is 2000')
options, args = parser.parse_args()
serve_forever(options.host, options.port)
if __name__ == '__main__':
main()
| 34.279279 | 79 | 0.59343 | 467 | 3,805 | 4.788009 | 0.496788 | 0.039356 | 0.005367 | 0.003578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009963 | 0.287779 | 3,805 | 110 | 80 | 34.590909 | 0.815129 | 0.398423 | 0 | 0.096154 | 0 | 0 | 0.109541 | 0.01262 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.115385 | null | null | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff90cad1c5012214e02b97154f23b7b726789505 | 628 | py | Python | ipsc/2016/g.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | ipsc/2016/g.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | ipsc/2016/g.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | import collections
import itertools
import re
import sys
read_str = lambda : sys.stdin.readline().strip()
read_str_list = lambda : sys.stdin.readline().strip().split()
read_int = lambda : int(read_str())
read_int_list = lambda : map(int, read_str_list())
read_float = lambda : float(read_str())
read_float_list = lambda : map(float, read_str_list())
def solve(S):
dgt = ''.join(re.findall(r'[0-9]+', S))
while len(dgt) > 1 and dgt[0] == '0':
dgt = dgt[1:]
return dgt
def main():
T = read_int()
for _ in xrange(T):
read_str()
S = read_str()
ans = solve(S)
print '%s' % (ans)
if __name__ == "__main__":
main()
| 21.655172 | 61 | 0.659236 | 102 | 628 | 3.794118 | 0.382353 | 0.144703 | 0.085271 | 0.113695 | 0.139535 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01145 | 0.165605 | 628 | 28 | 62 | 22.428571 | 0.727099 | 0 | 0 | 0 | 0 | 0 | 0.02707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.166667 | null | null | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff95ea9c8a58457600ab644dcc9310c524b6fa2e | 11,719 | py | Python | nova/scheduler/solvers/pluggable_hosts_pulp_solver.py | rishabh1jain1/schedwise | b8c56362b11767cc583ea96b42dc28c408bb76cd | [
"Apache-2.0"
] | null | null | null | nova/scheduler/solvers/pluggable_hosts_pulp_solver.py | rishabh1jain1/schedwise | b8c56362b11767cc583ea96b42dc28c408bb76cd | [
"Apache-2.0"
] | 5 | 2020-06-05T17:58:28.000Z | 2022-02-11T03:39:35.000Z | nova/scheduler/solvers/pluggable_hosts_pulp_solver.py | rishabh1jain1/schedwise | b8c56362b11767cc583ea96b42dc28c408bb76cd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pulp import constants
from pulp import pulp
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import solvers as scheduler_solver
LOG = logging.getLogger(__name__)
class HostsPulpSolver(scheduler_solver.BaseHostSolver):
"""A LP based pluggable LP solver implemented using PULP modeler."""
def __init__(self):
self.cost_classes = self._get_cost_classes()
self.constraint_classes = self._get_constraint_classes()
self.cost_weights = self._get_cost_weights()
def update_with_soft_affinity_constraints_and_objective(self,variables,prob, num_hosts, num_instances):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
z_variables =[]
#Adding 'z' variables
for i in range(num_hosts):
for j in range(num_hosts):
if i != j:
z_variables.append(pulp.LpVariable("Z_variable_Col_"+str(i)+"Col_"+str(j), 0, 1, constants.LpInteger))
temp = 0
for i in range(num_hosts):
for j in range(num_hosts):
if i != j:
prob += column_sum_var[i] + column_sum_var[j] <= z_variables[temp] + 1
prob += 2 * z_variables[temp]<=column_sum_var[i] + column_sum_var[j]
#print str(temp) + " " + str(z_variables[temp])
temp = temp + 1
#Adding the objective
prob+=z_variables[0] * 0 + z_variables[1] * 3 + z_variables[2] * 1 + z_variables[4] * 3 + z_variables[5] * 5 + z_variables[8] * 4
return prob
def update_with_strict_affinity_constraints_and_objective(self,variables,prob, num_hosts, num_instances):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
prob += pulp.lpSum([column_sum_var[i]] for i in range(num_hosts)) == 1
return prob
def update_with_strict_anti_affinity_constraints_and_objective(self,variables,prob, num_hosts, num_instances):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
prob += pulp.lpSum([column_sum_var[i]] for i in range(num_hosts)) == num_instances
return prob
def update_with_soft_anti_affinity_constraints_and_objective(self,variables,prob, num_hosts, num_instances):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
prob += -1 * (pulp.lpSum([column_sum_var[i]] for i in range(num_hosts)))
return prob
def update_with_host_count_constraints_and_objective(self,variables,prob, num_hosts, num_instances,limit):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
prob += pulp.lpSum([column_sum_var[i]] for i in range(num_hosts)) <= int(limit)
return prob
def host_solve(self, hosts, instance_uuids, request_spec,
filter_properties):
"""This method returns a list of tuples - (host, instance_uuid)
that are returned by the solver. Here the assumption is that
all instance_uuids have the same requirement as specified in
filter_properties.
"""
host_instance_tuples_list = []
print filter_properties['instance_type']['memory_mb']
if instance_uuids:
num_instances = len(instance_uuids)
else:
num_instances = request_spec.get('num_instances', 1)
#Setting a unset uuid string for each instance.
instance_uuids = ['unset_uuid' + str(i)
for i in xrange(num_instances)]
num_hosts = len(hosts)
LOG.debug(_("All Hosts: %s") % [h.host for h in hosts])
for host in hosts:
LOG.debug(_("Host state: %s") % host)
# Create dictionaries mapping host/instance IDs to hosts/instances.
host_ids = ['Host' + str(i) for i in range(num_hosts)]
host_id_dict = dict(zip(host_ids, hosts))
instance_ids = ['Instance' + str(i) for i in range(num_instances)]
instance_id_dict = dict(zip(instance_ids, instance_uuids))
# Create the 'prob' variable to contain the problem data.
prob = pulp.LpProblem("Host Instance Scheduler Problem",
constants.LpMinimize)
# Create the 'variables' matrix to contain the referenced variables.
variables = [[pulp.LpVariable("IA" + "_Host" + str(i) + "_Instance" +
str(j), 0, 1, constants.LpInteger) for j in
range(num_instances)] for i in range(num_hosts)]
# Get costs and constraints and formulate the linear problem.
self.cost_objects = [cost() for cost in self.cost_classes]
self.constraint_objects = [constraint(variables, hosts,
instance_uuids, request_spec, filter_properties)
for constraint in self.constraint_classes]
costs = [[0 for j in range(num_instances)] for i in range(num_hosts)]
for cost_object in self.cost_objects:
cost = cost_object.get_cost_matrix(hosts, instance_uuids,
request_spec, filter_properties)
cost = cost_object.normalize_cost_matrix(cost, 0.0, 1.0)
weight = float(self.cost_weights[cost_object.__class__.__name__])
costs = [[costs[i][j] + weight * cost[i][j]
for j in range(num_instances)] for i in range(num_hosts)]
prob += (pulp.lpSum([costs[i][j] * variables[i][j]
for i in range(num_hosts) for j in range(num_instances)]),
"Sum_of_Host_Instance_Scheduling_Costs")
for constraint_object in self.constraint_objects:
coefficient_vectors = constraint_object.get_coefficient_vectors(
variables, hosts, instance_uuids,
request_spec, filter_properties)
variable_vectors = constraint_object.get_variable_vectors(
variables, hosts, instance_uuids,
request_spec, filter_properties)
operations = constraint_object.get_operations(
variables, hosts, instance_uuids,
request_spec, filter_properties)
for i in range(len(operations)):
operation = operations[i]
len_vector = len(variable_vectors[i])
prob += (operation(pulp.lpSum([coefficient_vectors[i][j]
* variable_vectors[i][j] for j in range(len_vector)])),
"Costraint_Name_%s" % constraint_object.__class__.__name__
+ "_No._%s" % i)
prob.writeLP('test.lp')
if filter_properties['instance_type']['constraint'] == "soft_affinity":
prob = self.update_with_soft_affinity_constraints_and_objective(variables,prob,num_hosts,num_instances)
elif filter_properties['instance_type']['constraint'] == "strict_affinity":
prob = self.update_with_strict_affinity_constraints_and_objective(variables,prob,num_hosts,num_instances)
elif filter_properties['instance_type']['constraint'] == "strict_antiaffinity":
prob = self.update_with_strict_anti_affinity_constraints_and_objective(variables,prob,num_hosts,num_instances)
elif filter_properties['instance_type']['constraint'] == "soft_antiaffinity":
prob = self.update_with_soft_anti_affinity_constraints_and_objective(variables,prob,num_hosts,num_instances)
else:
temp = filter_properties['instance_type']['constraint']
temp = temp.split("_")
prob = self.update_with_host_count_constraints_and_objective(variables,prob,num_hosts,num_instances,temp[3])
print prob
# The problem is solved using PULP's choice of Solver.
prob.solve()
# Create host-instance tuples from the solutions.
if pulp.LpStatus[prob.status] == 'Optimal':
for v in prob.variables():
if v.name.startswith('IA'):
(host_id, instance_id) = v.name.lstrip('IA').lstrip(
'_').split('_')
if v.varValue == 1.0:
host_instance_tuples_list.append(
(host_id_dict[host_id],
instance_id_dict[instance_id]))
return host_instance_tuples_list
| 49.65678 | 138 | 0.631112 | 1,498 | 11,719 | 4.682911 | 0.150868 | 0.039914 | 0.05417 | 0.036066 | 0.554954 | 0.533571 | 0.513329 | 0.480257 | 0.464006 | 0.420527 | 0 | 0.006357 | 0.275109 | 11,719 | 235 | 139 | 49.868085 | 0.819423 | 0.141394 | 0 | 0.337838 | 0 | 0 | 0.056179 | 0.017697 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.033784 | null | null | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff9c87881ced0c2a95f22bc798cffd2d0381f7e0 | 721 | py | Python | cblue/data/__init__.py | dfhby0/CBLUE | 36bdb52f17c4379d4a5f8b407890ba294017b5e2 | [
"Apache-2.0"
] | 293 | 2021-06-07T06:04:37.000Z | 2022-03-28T09:38:28.000Z | cblue/data/__init__.py | dfhby0/CBLUE | 36bdb52f17c4379d4a5f8b407890ba294017b5e2 | [
"Apache-2.0"
] | 6 | 2021-06-11T09:50:15.000Z | 2022-03-18T07:33:56.000Z | cblue/data/__init__.py | dfhby0/CBLUE | 36bdb52f17c4379d4a5f8b407890ba294017b5e2 | [
"Apache-2.0"
] | 61 | 2021-06-07T06:38:42.000Z | 2022-03-30T07:16:46.000Z | from .data_process import EEDataProcessor, REDataProcessor, ERDataProcessor, CTCDataProcessor, \
CDNDataProcessor, STSDataProcessor, QQRDataProcessor, QICDataProcessor, QTRDataProcessor
from .dataset import EEDataset, REDataset, ERDataset, CTCDataset, CDNDataset, STSDataset, \
QQRDataset, QICDataset, QTRDataset
__all__ = ['EEDataProcessor', 'EEDataset',
'REDataProcessor', 'REDataset',
'ERDataProcessor', 'ERDataset',
'CDNDataProcessor', 'CDNDataset',
'CTCDataProcessor', 'CTCDataset',
'STSDataProcessor', 'STSDataset',
'QQRDataProcessor', 'QQRDataset',
'QICDataProcessor', 'QICDataset',
'QTRDataProcessor', 'QTRDataset']
| 48.066667 | 96 | 0.696255 | 44 | 721 | 11.295455 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.198336 | 721 | 14 | 97 | 51.5 | 0.859862 | 0 | 0 | 0 | 0 | 0 | 0.316227 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ff9dcee27fd788e74a17caecd46d9f95eb475438 | 1,008 | py | Python | djapi/authtoken/models.py | Suor/djapi | 929b266d01aacc49f805c3ac7eec55766634babe | [
"BSD-2-Clause"
] | 12 | 2017-10-23T10:52:30.000Z | 2021-09-06T19:08:57.000Z | djapi/authtoken/models.py | Suor/djapi | 929b266d01aacc49f805c3ac7eec55766634babe | [
"BSD-2-Clause"
] | null | null | null | djapi/authtoken/models.py | Suor/djapi | 929b266d01aacc49f805c3ac7eec55766634babe | [
"BSD-2-Clause"
] | null | null | null | import binascii
import os
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Token(models.Model):
"""
The default authorization token model.
"""
key = models.CharField(_("Key"), max_length=40, primary_key=True)
user = models.OneToOneField(
settings.AUTH_USER_MODEL, related_name='auth_token',
on_delete=models.CASCADE, verbose_name=_("User")
)
created = models.DateTimeField(_("Created"), auto_now_add=True)
class Meta:
verbose_name = _("Token")
verbose_name_plural = _("Tokens")
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(Token, self).save(*args, **kwargs)
def generate_key(self):
return binascii.hexlify(os.urandom(20)).decode()
def __str__(self):
return self.key
| 28 | 69 | 0.686508 | 127 | 1,008 | 5.19685 | 0.503937 | 0.060606 | 0.045455 | 0.072727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007481 | 0.204365 | 1,008 | 35 | 70 | 28.8 | 0.815461 | 0.037698 | 0 | 0 | 0 | 0 | 0.036688 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.24 | 0.08 | 0.68 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
ff9f80880a91fdc6467f39d9797eb0e52dc6a441 | 2,035 | py | Python | app/lib/docker_config.py | joshburt/com.shapeandshare.therowantree.client.api | 6f17f469482a91071bedb7eedd158539cebe5639 | [
"MIT"
] | null | null | null | app/lib/docker_config.py | joshburt/com.shapeandshare.therowantree.client.api | 6f17f469482a91071bedb7eedd158539cebe5639 | [
"MIT"
] | null | null | null | app/lib/docker_config.py | joshburt/com.shapeandshare.therowantree.client.api | 6f17f469482a91071bedb7eedd158539cebe5639 | [
"MIT"
] | null | null | null | # Allow over-riding the defaults with non-secure ENV variables, or secure docker secrets
import therowantree_config as default_config
import os
###############################################################################
# Direcrtory Options
###############################################################################
LOGS_DIR = default_config.LOGS_DIR
if 'LOGS_DIR' in os.environ:
LOGS_DIR = os.environ['LOGS_DIR']
TMP_DIR = default_config.TMP_DIR
if 'TMP_DIR' in os.environ:
TMP_DIR = os.environ['TMP_DIR']
###############################################################################
# Server Options
###############################################################################
API_ACCESS_KEY = default_config.API_ACCESS_KEY
if 'API_ACCESS_KEY' in os.environ:
API_ACCESS_KEY = os.environ['API_ACCESS_KEY']
API_VERSION = default_config.API_VERSION
if 'API_VERSION' in os.environ:
API_VERSION = os.environ['API_VERSION']
LISTENING_HOST = default_config.LISTENING_HOST
if 'LISTENING_HOST' in os.environ:
LISTENING_HOST = os.environ['LISTENING_HOST']
FLASK_DEBUG = default_config.FLASK_DEBUG
if 'FLASK_DEBUG' in os.environ:
FLASK_DEBUG = bool(os.environ['FLASK_DEBUG'])
###############################################################################
# Database Options
###############################################################################
API_DATABASE_SERVER = default_config.API_DATABASE_SERVER
if 'API_DATABASE_SERVER' in os.environ:
API_DATABASE_SERVER = os.environ['API_DATABASE_SERVER']
API_DATABASE_NAME = default_config.API_DATABASE_NAME
if 'API_DATABASE_NAME' in os.environ:
API_DATABASE_NAME = os.environ['API_DATABASE_NAME']
API_DATABASE_USERNAME = default_config.API_DATABASE_USERNAME
if 'API_DATABASE_USERNAME' in os.environ:
API_DATABASE_USERNAME = os.environ['API_DATABASE_USERNAME']
API_DATABASE_PASSWORD = default_config.API_DATABASE_PASSWORD
if 'API_DATABASE_PASSWORD' in os.environ:
API_DATABASE_PASSWORD = os.environ['API_DATABASE_PASSWORD']
| 37 | 88 | 0.611794 | 234 | 2,035 | 4.948718 | 0.175214 | 0.15544 | 0.124352 | 0.138169 | 0.226252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.091892 | 2,035 | 54 | 89 | 37.685185 | 0.626623 | 0.067813 | 0 | 0 | 0 | 0 | 0.201693 | 0.059238 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.09375 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
ffa241b51a436f0dcd5e0dcb3336517e8a723091 | 1,626 | py | Python | Tests/Plot/LamHole/test_Hole_54_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | 2 | 2020-06-29T13:48:37.000Z | 2021-06-15T07:34:05.000Z | Tests/Plot/LamHole/test_Hole_54_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | null | null | null | Tests/Plot/LamHole/test_Hole_54_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@date Created on Wed Jan 13 17:45:15 2016
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
"""
from os.path import join
from unittest import TestCase
import matplotlib.pyplot as plt
from numpy import pi
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.LamHole import LamHole
from pyleecan.Classes.Lamination import Lamination
from pyleecan.Classes.Machine import Machine
from pyleecan.Classes.Magnet import Magnet
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.MatLamination import MatLamination
from pyleecan.Classes.HoleM54 import HoleM54
from pyleecan.Tests.Plot import save_path
class test_Hole_54_plot(TestCase):
"""unittest for Lamination with Hole plot"""
def test_Lam_Hole_54_plot(self):
"""Test machine plot hole 54"""
plt.close("all")
test_obj = Machine()
test_obj.rotor = LamHole(
is_internal=True, Rint=0.1, Rext=0.2, is_stator=False, L1=0.7
)
test_obj.rotor.hole = list()
test_obj.rotor.hole.append(
HoleM54(Zh=8, W0=pi / 4, H0=50e-3, H1=10e-3, R1=100e-3)
)
test_obj.rotor.hole.append(
HoleM54(Zh=8, W0=pi / 6, H0=25e-3, H1=10e-3, R1=100e-3)
)
test_obj.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s54-Rotor.png"))
self.assertEqual(len(fig.axes[0].patches), 18)
test_obj.rotor.hole[0].plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s54-Rotor hole.png"))
self.assertEqual(len(fig.axes[0].patches), 1)
| 30.679245 | 73 | 0.675892 | 244 | 1,626 | 4.397541 | 0.393443 | 0.100652 | 0.141659 | 0.059646 | 0.272134 | 0.272134 | 0.272134 | 0.272134 | 0.205033 | 0.205033 | 0 | 0.065167 | 0.207257 | 1,626 | 52 | 74 | 31.269231 | 0.767261 | 0.116851 | 0 | 0.114286 | 0 | 0 | 0.043724 | 0.035261 | 0 | 0 | 0 | 0 | 0.057143 | 1 | 0.028571 | false | 0 | 0.371429 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
ffa5af6affd31cfe1fe0ccc78cba42510b4fe8d2 | 521 | py | Python | mysite/addOne.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | mysite/addOne.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | mysite/addOne.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
import django
django.setup()
# your imports, e.g. Django models
from buckets.models import *
from buckets.name2date import name2date
bucket = BucketInfo.objects.get(name='xinkaibuk1')
allInDb = set()
allInDb.update(ImageInfo.objects.all())
file_name='20180227.062043.749.jpg'
date = name2date(file_name)
# print date
img = ImageInfo(file_name=file_name, date_time=date, bucket=bucket)
img.save()
print 'image saved' | 21.708333 | 67 | 0.773512 | 74 | 521 | 5.351351 | 0.594595 | 0.080808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044776 | 0.099808 | 521 | 24 | 68 | 21.708333 | 0.799574 | 0.122841 | 0 | 0 | 0 | 0 | 0.178022 | 0.098901 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.285714 | null | null | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ffa7676c4e5ed4bfd01c893490761de5a7bbcf0c | 255 | py | Python | backend/test.py | Harin329/Masterplan | 0e06d596751c5073b104d0e4aa6c1e8fd0a4fb0a | [
"Unlicense"
] | null | null | null | backend/test.py | Harin329/Masterplan | 0e06d596751c5073b104d0e4aa6c1e8fd0a4fb0a | [
"Unlicense"
] | null | null | null | backend/test.py | Harin329/Masterplan | 0e06d596751c5073b104d0e4aa6c1e8fd0a4fb0a | [
"Unlicense"
] | null | null | null | import boto3
import os
from dotenv import load_dotenv
load_dotenv()
client = boto3.client(
'dynamodb',
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
region_name="us-east-1"
)
| 19.615385 | 61 | 0.752941 | 40 | 255 | 4.425 | 0.475 | 0.20339 | 0.135593 | 0.158192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013636 | 0.137255 | 255 | 12 | 62 | 21.25 | 0.790909 | 0 | 0 | 0 | 0 | 0 | 0.215686 | 0.082353 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ffa8a6dc6156e32b6503b1d0530884e4f28a8bfa | 5,600 | py | Python | src/opendr/control/mobile_manipulation/mobileRL/envs/eeplanner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 3 | 2021-06-24T01:54:25.000Z | 2021-12-12T16:21:24.000Z | src/opendr/control/mobile_manipulation/mobileRL/envs/eeplanner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 79 | 2021-06-23T10:40:10.000Z | 2021-12-16T07:59:42.000Z | src/opendr/control/mobile_manipulation/mobileRL/envs/eeplanner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 5 | 2021-07-04T07:38:50.000Z | 2021-12-12T16:18:47.000Z | # Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pybindings import RobotObs, EEObs, LinearPlanner, GMMPlanner
MIN_PLANNER_VELOCITY = 0.001
MAX_PLANNER_VELOCITY = 0.1
# also defined in robot_env.cpp!
TIME_STEP_TRAIN = 0.1
class EEPlanner:
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map):
self.gripper_goal_tip = gripper_goal_tip
self.gripper_goal_wrist = gripper_goal_wrist
self._head_start = head_start
self._map = map
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist: float,
success_thres_rot: float) -> EEObs:
raise NotImplementedError()
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
raise NotImplementedError()
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
raise NotImplementedError()
class LinearPlannerWrapper(EEPlanner):
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map):
super(LinearPlannerWrapper, self).__init__(gripper_goal_tip,
gripper_goal_wrist,
head_start,
map)
self._planner = None
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist: float,
success_thres_rot: float) -> EEObs:
self._planner = LinearPlanner(self.gripper_goal_wrist,
robot_obs.gripper_tf,
[0, 0, 0, 0, 0, 0, 1],
robot_obs.base_tf,
success_thres_dist,
success_thres_rot,
MIN_PLANNER_VELOCITY,
MAX_PLANNER_VELOCITY,
slow_down_factor,
self._head_start,
TIME_STEP_TRAIN,
is_analytic_env)
return self.generate_obs_step(robot_obs)
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
return self._planner.step(robot_obs, learned_vel_norm)
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
return self._planner.generate_obs_step(robot_state)
class GMMPlannerWrapper(EEPlanner):
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map,
gmm_model_path: str,
robot_config):
super(GMMPlannerWrapper, self).__init__(gripper_goal_tip,
gripper_goal_wrist,
head_start,
map)
self._planner = None
assert os.path.exists(gmm_model_path), f"Path {gmm_model_path} doesn't exist"
self._gmm_model_path = gmm_model_path
self._robot_config = robot_config
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist,
success_thres_rot) -> EEObs:
# NOTE: planners either take in the goal for the tip or the wrist, but always output plans for the wrist!
self._planner = GMMPlanner(self.gripper_goal_wrist,
robot_obs.gripper_tf,
[0, 0, 0, 0, 0, 0, 1],
robot_obs.base_tf,
success_thres_dist,
success_thres_rot,
MIN_PLANNER_VELOCITY,
MAX_PLANNER_VELOCITY,
slow_down_factor,
self._head_start,
TIME_STEP_TRAIN,
is_analytic_env,
self._robot_config["tip_to_gripper_offset"],
self._robot_config["gripper_to_base_rot_offset"],
str(self._gmm_model_path),
self._robot_config["gmm_base_offset"])
return self.generate_obs_step(robot_obs)
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
return self._planner.step(robot_obs, learned_vel_norm)
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
return self._planner.generate_obs_step(robot_state)
| 40.875912 | 113 | 0.531964 | 572 | 5,600 | 4.837413 | 0.251748 | 0.063607 | 0.052042 | 0.045537 | 0.593422 | 0.576075 | 0.542103 | 0.542103 | 0.542103 | 0.52584 | 0 | 0.010287 | 0.409821 | 5,600 | 136 | 114 | 41.176471 | 0.826929 | 0.125 | 0 | 0.718447 | 0 | 0 | 0.019861 | 0.009623 | 0 | 0 | 0 | 0 | 0.009709 | 1 | 0.116505 | false | 0 | 0.019417 | 0.038835 | 0.223301 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ffae59649ff2b990843a197023e765523f170d67 | 7,220 | py | Python | snaps/openstack/tests/create_user_tests.py | hashnfv/hashnfv-snaps | 0dfca494ef7c2778babfac48d9b701953860b54f | [
"Apache-2.0"
] | null | null | null | snaps/openstack/tests/create_user_tests.py | hashnfv/hashnfv-snaps | 0dfca494ef7c2778babfac48d9b701953860b54f | [
"Apache-2.0"
] | null | null | null | snaps/openstack/tests/create_user_tests.py | hashnfv/hashnfv-snaps | 0dfca494ef7c2778babfac48d9b701953860b54f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Cable Television Laboratories, Inc. ("CableLabs")
# and others. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import uuid
from snaps.openstack.create_user import OpenStackUser, UserSettings
from snaps.openstack.tests.os_source_file_test import OSComponentTestCase
from snaps.openstack.utils import keystone_utils
__author__ = 'spisarski'
class UserSettingsUnitTests(unittest.TestCase):
"""
Tests the construction of the UserSettings class
"""
def test_no_params(self):
with self.assertRaises(Exception):
UserSettings()
def test_empty_config(self):
with self.assertRaises(Exception):
UserSettings(**dict())
def test_name_only(self):
with self.assertRaises(Exception):
UserSettings(name='foo')
def test_config_with_name_only(self):
with self.assertRaises(Exception):
UserSettings(**{'name': 'foo'})
def test_name_pass_enabled_str(self):
with self.assertRaises(Exception):
UserSettings(name='foo', password='bar', enabled='true')
def test_config_with_name_pass_enabled_str(self):
with self.assertRaises(Exception):
UserSettings(
**{'name': 'foo', 'password': 'bar', 'enabled': 'true'})
def test_name_pass_only(self):
settings = UserSettings(name='foo', password='bar')
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertIsNone(settings.project_name)
self.assertIsNone(settings.email)
self.assertTrue(settings.enabled)
def test_config_with_name_pass_only(self):
settings = UserSettings(**{'name': 'foo', 'password': 'bar'})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertIsNone(settings.project_name)
self.assertIsNone(settings.email)
self.assertTrue(settings.enabled)
def test_all(self):
settings = UserSettings(name='foo', password='bar',
project_name='proj-foo', email='foo@bar.com',
enabled=False)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertEqual('proj-foo', settings.project_name)
self.assertEqual('foo@bar.com', settings.email)
self.assertFalse(settings.enabled)
def test_config_all(self):
settings = UserSettings(**{'name': 'foo', 'password': 'bar',
'project_name': 'proj-foo',
'email': 'foo@bar.com',
'enabled': False})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertEqual('proj-foo', settings.project_name)
self.assertEqual('foo@bar.com', settings.email)
self.assertFalse(settings.enabled)
class CreateUserSuccessTests(OSComponentTestCase):
"""
Test for the CreateImage class defined in create_image.py
"""
def setUp(self):
"""
Instantiates the CreateImage object that is responsible for downloading
and creating an OS image file within OpenStack
"""
guid = str(uuid.uuid4())[:-19]
guid = self.__class__.__name__ + '-' + guid
self.user_settings = UserSettings(
name=guid + '-name',
password=guid + '-password',
roles={'admin': self.os_creds.project_name},
domain_name=self.os_creds.user_domain_name)
self.keystone = keystone_utils.keystone_client(self.os_creds)
# Initialize for cleanup
self.user_creator = None
def tearDown(self):
"""
Cleans the image and downloaded image file
"""
if self.user_creator:
self.user_creator.clean()
def test_create_user(self):
"""
Tests the creation of an OpenStack user.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
def test_create_user_2x(self):
"""
Tests the creation of an OpenStack user twice to ensure it only creates
one.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
# Create user for the second time to ensure it is the same
user2 = OpenStackUser(self.os_creds, self.user_settings).create()
self.assertEqual(retrieved_user, user2)
def test_create_delete_user(self):
"""
Tests the creation of an OpenStack user then delete.
"""
# Create Image
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
keystone_utils.delete_user(self.keystone, created_user)
# Delete user
self.user_creator.clean()
self.assertIsNone(self.user_creator.get_user())
def test_create_admin_user(self):
"""
Tests the creation of an OpenStack user.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
role = keystone_utils.get_role_by_name(self.keystone, 'admin')
self.assertIsNotNone(role)
os_proj = keystone_utils.get_project(
keystone=self.keystone, project_name=self.os_creds.project_name)
user_roles = keystone_utils.get_roles_by_user(
self.keystone, retrieved_user, os_proj)
self.assertIsNotNone(user_roles)
self.assertEqual(1, len(user_roles))
self.assertEqual(role.id, user_roles[0].id)
| 37.604167 | 79 | 0.645429 | 813 | 7,220 | 5.543665 | 0.212792 | 0.03905 | 0.043266 | 0.02951 | 0.564233 | 0.549368 | 0.524961 | 0.516086 | 0.507877 | 0.49878 | 0 | 0.002982 | 0.256925 | 7,220 | 191 | 80 | 37.801047 | 0.837092 | 0.170499 | 0 | 0.452174 | 0 | 0 | 0.044064 | 0 | 0 | 0 | 0 | 0 | 0.365217 | 1 | 0.13913 | false | 0.130435 | 0.043478 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
ffb11c0fb609d6059063047b9dfca31cac6fe559 | 276 | py | Python | setup.py | muhammad-alaref/CAM2RequestsCLI | 83911b52de7fe575682184ef9714cbb2b6fc5af4 | [
"Apache-2.0"
] | null | null | null | setup.py | muhammad-alaref/CAM2RequestsCLI | 83911b52de7fe575682184ef9714cbb2b6fc5af4 | [
"Apache-2.0"
] | null | null | null | setup.py | muhammad-alaref/CAM2RequestsCLI | 83911b52de7fe575682184ef9714cbb2b6fc5af4 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='CAM2RequestsCLI',
version='v1.0-rc0',
packages=find_packages(),
zip_safe=False,
install_requires=[
'click',
'requests',
],
entry_points='''
[console_scripts]
CAM2RequestsCLI=CAM2RequestsCLI:cli
''',
)
| 16.235294 | 43 | 0.717391 | 30 | 276 | 6.4 | 0.8 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025105 | 0.134058 | 276 | 16 | 44 | 17.25 | 0.778243 | 0 | 0 | 0 | 0 | 0 | 0.347826 | 0.126812 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4401048e9f96083e7a2404057ad05e3545075fdb | 173 | py | Python | run.py | BloomTech-Labs/tally-ai-ds | 09d7a207bf5e6f9de2d615ad078cebb8340abcd8 | [
"MIT"
] | 8 | 2020-01-08T18:16:20.000Z | 2020-06-30T23:49:38.000Z | run.py | Lambda-School-Labs/tally-ai-ds | 09d7a207bf5e6f9de2d615ad078cebb8340abcd8 | [
"MIT"
] | 5 | 2021-03-30T13:48:15.000Z | 2021-09-22T19:11:46.000Z | run.py | BloomTech-Labs/tally-ai-ds | 09d7a207bf5e6f9de2d615ad078cebb8340abcd8 | [
"MIT"
] | 5 | 2020-02-02T04:27:26.000Z | 2020-06-05T05:12:28.000Z | """ Entry point for the Yelp API Flask Reviews for Tally-ai"""
from yelpapi.app import create_app
APP = create_app()
if __name__=='__main__':
APP.run(host="0.0.0.0")
| 19.222222 | 62 | 0.693642 | 30 | 173 | 3.666667 | 0.7 | 0.054545 | 0.054545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027586 | 0.16185 | 173 | 8 | 63 | 21.625 | 0.731034 | 0.317919 | 0 | 0 | 0 | 0 | 0.135135 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
44046b1afd25c838028ace50bbff2629930820b6 | 745 | py | Python | batch/batch/globals.py | vrautela/hail | 7db6189b5b1feafa88452b8470e497d9505d9a46 | [
"MIT"
] | null | null | null | batch/batch/globals.py | vrautela/hail | 7db6189b5b1feafa88452b8470e497d9505d9a46 | [
"MIT"
] | null | null | null | batch/batch/globals.py | vrautela/hail | 7db6189b5b1feafa88452b8470e497d9505d9a46 | [
"MIT"
] | null | null | null | states = {'Pending', 'Ready', 'Creating', 'Running', 'Cancelled', 'Error', 'Failed', 'Success'}
complete_states = ('Cancelled', 'Error', 'Failed', 'Success')
valid_state_transitions = {
'Pending': {'Ready'},
'Ready': {'Creating', 'Running', 'Cancelled', 'Error'},
'Creating': {'Ready', 'Running'},
'Running': {'Ready', 'Cancelled', 'Error', 'Failed', 'Success'},
'Cancelled': set(),
'Error': set(),
'Failed': set(),
'Success': set(),
}
tasks = ('input', 'main', 'output')
memory_types = ('lowmem', 'standard', 'highmem')
HTTP_CLIENT_MAX_SIZE = 8 * 1024 * 1024
BATCH_FORMAT_VERSION = 6
STATUS_FORMAT_VERSION = 5
INSTANCE_VERSION = 22
MAX_PERSISTENT_SSD_SIZE_GIB = 64 * 1024
RESERVED_STORAGE_GB_PER_CORE = 5
| 26.607143 | 95 | 0.636242 | 82 | 745 | 5.536585 | 0.560976 | 0.123348 | 0.132159 | 0.178414 | 0.14978 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031847 | 0.157047 | 745 | 27 | 96 | 27.592593 | 0.691083 | 0 | 0 | 0 | 0 | 0 | 0.334228 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
441fd066a3b9ceec8854e7357e20db9ea179837b | 1,785 | py | Python | rem/_dummy_fork_locking.py | heni/rem | 00caa0e2e6b0bd0091e8290c1e11884c8c45d347 | [
"MIT"
] | 14 | 2015-03-25T08:31:02.000Z | 2022-03-31T10:58:40.000Z | rem/_dummy_fork_locking.py | heni/rem | 00caa0e2e6b0bd0091e8290c1e11884c8c45d347 | [
"MIT"
] | 3 | 2015-02-07T16:00:01.000Z | 2015-10-07T11:39:27.000Z | rem/_dummy_fork_locking.py | heni/rem | 00caa0e2e6b0bd0091e8290c1e11884c8c45d347 | [
"MIT"
] | 6 | 2015-02-06T16:43:05.000Z | 2020-02-18T13:43:31.000Z | import threading
__all__ = ["acquire_fork", "acquire_lock", "release_fork", "release_lock"]
class TwoExclusiveResourcesDispatcher(object):
def __init__(self):
self.FirstResourceUsage = 0
self.SecondResourceUsage = 0
self.lock = threading.Lock()
self.FirstResourceEvent = threading.Condition(self.lock)
self.SecondResourceEvent = threading.Condition(self.lock)
def AcquireFirstResource(self):
with self.lock:
while self.SecondResourceUsage > 0:
self.SecondResourceEvent.wait()
self.FirstResourceUsage += 1
def ReleaseFirstResource(self):
with self.lock:
if self.FirstResourceUsage <= 0:
raise RuntimeError("try to release already released object")
self.FirstResourceUsage -= 1
if self.FirstResourceUsage == 0:
self.FirstResourceEvent.notifyAll()
def AcquireSecondResource(self):
with self.lock:
while self.FirstResourceUsage > 0:
self.FirstResourceEvent.wait()
self.SecondResourceUsage += 1
def ReleaseSecondResource(self):
with self.lock:
if self.SecondResourceUsage <= 0:
raise RuntimeError("try to release already released object")
self.SecondResourceUsage -= 1
if self.SecondResourceUsage == 0:
self.SecondResourceEvent.notifyAll()
_ForkLockDispatcher = TwoExclusiveResourcesDispatcher()
def acquire_fork():
_ForkLockDispatcher.AcquireFirstResource()
def release_fork():
_ForkLockDispatcher.ReleaseFirstResource()
def acquire_lock():
_ForkLockDispatcher.AcquireSecondResource()
def release_lock():
_ForkLockDispatcher.ReleaseSecondResource()
| 30.254237 | 76 | 0.664986 | 149 | 1,785 | 7.825503 | 0.241611 | 0.048027 | 0.078902 | 0.054889 | 0.325901 | 0.174957 | 0.09434 | 0.09434 | 0.09434 | 0.09434 | 0 | 0.009002 | 0.253221 | 1,785 | 58 | 77 | 30.775862 | 0.865716 | 0 | 0 | 0.142857 | 0 | 0 | 0.069468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.02381 | 0 | 0.261905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4423940e7bf16fa7a158c1ac06a6349871fe59a2 | 2,410 | py | Python | my/core/error.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | my/core/error.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | my/core/error.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | """
Various error handling helpers
See https://beepb00p.xyz/mypy-error-handling.html#kiss for more detail
"""
from itertools import tee
from typing import Union, TypeVar, Iterable, List, Tuple, Type
T = TypeVar('T')
E = TypeVar('E', bound=Exception) # TODO make covariant?
ResT = Union[T, E]
Res = ResT[T, Exception]
def unwrap(res: Res[T]) -> T:
if isinstance(res, Exception):
raise res
else:
return res
def echain(ex: E, cause: Exception) -> E:
ex.__cause__ = cause
return ex
def split_errors(l: Iterable[ResT[T, E]], ET: Type[E]) -> Tuple[Iterable[T], Iterable[E]]:
# TODO would be nice to have ET=Exception default?
vit, eit = tee(l)
# TODO ugh, not sure if I can reconcile type checking and runtime and convince mypy that ET and E are the same type?
values: Iterable[T] = (
r # type: ignore[misc]
for r in vit
if not isinstance(r, ET))
errors: Iterable[E] = (
r
for r in eit
if isinstance(r, ET))
# TODO would be interesting to be able to have yield statement anywehere in code
# so there are multiple 'entry points' to the return value
return (values, errors)
def sort_res_by(items: Iterable[ResT], key) -> List[ResT]:
"""
The general idea is: just alaways carry errors with the entry that precedes them
"""
# TODO ResT object should hold exception class?...
group = []
groups = []
for i in items:
if isinstance(i, Exception):
group.append(i)
else:
groups.append((i, group))
group = []
results = []
for v, errs in sorted(groups, key=lambda p: key(p[0])):
results.extend(errs)
results.append(v)
results.extend(group)
return results
def test_sort_res_by() -> None:
class Exc(Exception):
def __eq__(self, other):
return self.args == other.args
ress = [
Exc('first'),
Exc('second'),
5,
3,
Exc('xxx'),
2,
1,
Exc('last'),
]
results = sort_res_by(ress, lambda x: x) # type: ignore
assert results == [
1,
Exc('xxx'),
2,
3,
Exc('first'),
Exc('second'),
5,
Exc('last'),
]
results2 = sort_res_by(ress + [0], lambda x: x) # type: ignore
assert results2 == [Exc('last'), 0] + results[:-1]
| 24.1 | 120 | 0.573444 | 329 | 2,410 | 4.145897 | 0.407295 | 0.020528 | 0.026393 | 0.024927 | 0.061584 | 0.035191 | 0 | 0 | 0 | 0 | 0 | 0.009524 | 0.302905 | 2,410 | 99 | 121 | 24.343434 | 0.802381 | 0.248133 | 0 | 0.298507 | 0 | 0 | 0.023596 | 0 | 0 | 0 | 0 | 0.020202 | 0.029851 | 1 | 0.089552 | false | 0 | 0.029851 | 0.014925 | 0.208955 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4426bbfc28bdebbc507707d96ca993bfc6cb31e2 | 1,158 | py | Python | Jira/CVE-2019-8451/__init__.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Jira/CVE-2019-8451/__init__.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Jira/CVE-2019-8451/__init__.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | import requests
import sys
# http://www.jas502n.com:8080/plugins/servlet/gadgets/makeRequest?url=http://www.jas502n.com:8080@www.baidu.com/
def ssrf_poc(url, ssrf_url):
if url[-1] == '/':
url = url[:-1]
else:
url = url
vuln_url = url + "/plugins/servlet/gadgets/makeRequest?url=" + url + '@' + ssrf_url
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0",
"Accept": "*/*",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "gzip, deflate",
"X-Atlassian-Token": "no-check",
"Connection": "close"
}
r = requests.get(url=vuln_url, headers=headers)
if r.status_code == 200 and 'set-cookie' in r.content:
print "\n>>>>Send poc Success!\n"
print 'X-AUSERNAME= %s' % r.headers.get('X-AUSERNAME')
print "\n>>>>vuln_url= " + vuln_url + '\n'
print r.content
else:
print "No Vuln Exit!"
if __name__ == "__main__":
while True:
print
ssrf_url = raw_input(">>>>SSRF URL: ")
url = "https://jira.liulishuo.work"
ssrf_poc(url, ssrf_url)
| 28.95 | 112 | 0.585492 | 168 | 1,158 | 3.916667 | 0.494048 | 0.053191 | 0.045593 | 0.051672 | 0.221885 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053691 | 0.227979 | 1,158 | 39 | 113 | 29.692308 | 0.682327 | 0.094991 | 0 | 0.066667 | 0 | 0.066667 | 0.381453 | 0.072658 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.066667 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4437b9810c4e7fa906b091e98a1904da452af26d | 915 | py | Python | api/v1/views/index.py | bdbaraban/HolbertonBnB | 70812bcef485e4d57ca0f7a1d0ddc59583f4122a | [
"MIT"
] | 1 | 2022-02-08T03:51:31.000Z | 2022-02-08T03:51:31.000Z | api/v1/views/index.py | bdbaraban/HolbertonBnB | 70812bcef485e4d57ca0f7a1d0ddc59583f4122a | [
"MIT"
] | null | null | null | api/v1/views/index.py | bdbaraban/HolbertonBnB | 70812bcef485e4d57ca0f7a1d0ddc59583f4122a | [
"MIT"
] | 8 | 2020-12-10T14:41:39.000Z | 2022-03-10T21:35:59.000Z | #!/usr/bin/env python3
"""Defines a status route for the HolbertonBnB API."""
from flask import jsonify
from flasgger import swag_from
from models import storage
from api.v1.views import app_views
@app_views.route("/status")
@swag_from("../apidocs/status/status.yml")
def status():
"""Returns the server status.
Returns:
JSON object with the current server status.
"""
return jsonify({"status": "OK"})
@app_views.route("/stats")
@swag_from("../apidocs/stats/stats.yml")
def stats():
"""Retrives the count of each object type.
Returns:
JSON object with the number of objects by type."""
return jsonify({
"amenities": storage.count("Amenity"),
"cities": storage.count("City"),
"places": storage.count("Place"),
"reviews": storage.count("Review"),
"states": storage.count("State"),
"users": storage.count("User")
})
| 26.142857 | 58 | 0.645902 | 115 | 915 | 5.086957 | 0.478261 | 0.123077 | 0.044444 | 0.071795 | 0.082051 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002736 | 0.201093 | 915 | 34 | 59 | 26.911765 | 0.797538 | 0.280874 | 0 | 0 | 0 | 0 | 0.233494 | 0.086957 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | true | 0 | 0.210526 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4437f26ba6c39393772f578c50974f0c3bd7647f | 3,538 | py | Python | data_strctures/queue.py | CodenameREBIRTH/algopy | f3894c98b7232d0f6067eb52ca68f9e6a8875884 | [
"MIT"
] | 4 | 2019-09-28T17:02:47.000Z | 2020-01-15T15:51:40.000Z | data_strctures/queue.py | CodenameREBIRTH/algopy | f3894c98b7232d0f6067eb52ca68f9e6a8875884 | [
"MIT"
] | 1 | 2019-10-15T03:56:59.000Z | 2019-10-15T03:56:59.000Z | data_strctures/queue.py | CodenameREBIRTH/algopy | f3894c98b7232d0f6067eb52ca68f9e6a8875884 | [
"MIT"
] | 3 | 2019-10-14T16:22:08.000Z | 2020-10-07T08:32:08.000Z | import inspect
class Queue(object):
'''
Queue data structure FIFO - First In First Out
'''
def __init__(self, capacity = 10):
'''
:param size: max capacity of the queue, default is 10
'''
self.queue = []
self.front = None
self.rear = None
self.size = 0
self.capacity = capacity
def __str__(self):
'''
:return:
'''
return ' '.join([str(i) for i in self.queue])
def get_size(self):
'''
:return: current size of the queue
'''
return self.size
def is_empty(self):
'''
:return: true if queue is empty, false otherwise
'''
return self.size == 0
def enequeue(self, value):
'''
:param value: value to be enqueued
:return: -1 if queue is full
'''
if self.size >= self.capacity:
return -1
else:
self.queue.append(value)
if self.front is None:
self.front = self.rear = 0
else:
self.rear = self.size
self.size += 1
def dequeue(self):
'''
:return: the element removed from the queue, None if queue is empty
'''
if self.is_empty():
return None
else:
self.size -= 1
if self.size == 0:
self.front = self.rear = 0
else:
self.rear = self.size - 1
return self.queue.pop(0)
@staticmethod
def get_code():
'''
:return: return source code for current class
'''
return inspect.getsource(Queue)
class Deque(object):
'''
Deque -> doubly ended queue
'''
def __init__(self, capacity = 10):
'''
:param capacity: max capacity of the deque
'''
self.queue = []
self.capacity = capacity
def __str__(self):
return ' '.join([str(i) for i in self.queue])
def is_full(self):
'''
to check whether deque is full or not
:return: true if deque is full, false otherwise
'''
return len(self.queue) == self.capacity
def is_empty(self):
'''
to check whether deque is empty or not
:return: true if deque is empty, false otherwise
'''
return len(self.queue) == 0
def insert_right(self, info):
'''
:param info: data to be added
:return: None if deque is full
'''
if self.is_full():
return None
else:
self.queue.append(info)
def insert_left(self, info):
'''
:param info: data to be added
:return: None if deque is full
'''
if self.is_full():
return None
else:
self.queue.insert(0, info)
def remove_left(self):
'''
:return: element which is removed, None if deque is empty
'''
if not self.is_empty():
return self.queue.pop(0)
else:
return None
def remove_right(self):
'''
:return: remove element from right end
'''
if self.is_empty():
return None
else:
self.queue.pop()
@staticmethod
def get_code():
'''
:return: source code for the current class
'''
return inspect.getsource(Deque)
# TODO -> add priority queue and circuler queue for concept purpose
| 18.331606 | 75 | 0.498021 | 406 | 3,538 | 4.263547 | 0.197044 | 0.062392 | 0.025997 | 0.041594 | 0.49509 | 0.380127 | 0.284229 | 0.218371 | 0.182553 | 0.182553 | 0 | 0.00943 | 0.400509 | 3,538 | 192 | 76 | 18.427083 | 0.806695 | 0.2671 | 0 | 0.528571 | 0 | 0 | 0.000931 | 0 | 0 | 0 | 0 | 0.005208 | 0 | 1 | 0.228571 | false | 0 | 0.014286 | 0.014286 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
44399f62445aff54bdaa825b5afca3b8782554ba | 1,803 | py | Python | merak/utils/refs.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 16 | 2021-01-22T04:09:30.000Z | 2022-03-17T10:38:34.000Z | merak/utils/refs.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 6 | 2021-04-12T10:09:47.000Z | 2022-03-24T09:31:13.000Z | merak/utils/refs.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 2 | 2021-07-14T05:39:17.000Z | 2021-07-28T16:27:40.000Z | # Copyright 2021 (David) Siu-Kei Muk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
def _get_lib_root():
root = os.path.join(os.path.dirname(__file__), "..")
return os.path.abspath(root)
class _PrefixPathDict(dict):
def __init__(self, prefix):
super(_PrefixPathDict, self).__init__()
self._prefix = prefix
def __setitem__(self, key, value):
if isinstance(value, str):
value = os.path.join(self._prefix, value)
super(_PrefixPathDict, self).__setitem__(key, value)
def _PackageDataMeta(prefix):
class _Meta(type):
@classmethod
def __prepare__(metacls, name, bases):
origin = super(_Meta, _Meta).__prepare__(metacls=metacls,
__name=name,
__bases=bases)
pfx_path_dict = _PrefixPathDict(
os.path.join(_get_lib_root(), prefix))
if origin: pfx_path_dict.update(origin)
return pfx_path_dict
return _Meta
class Template(metaclass=_PackageDataMeta("data")):
PY_INIT = "__init__.tmpl"
PY_SETUP = "setup.tmpl"
| 32.781818 | 80 | 0.665003 | 224 | 1,803 | 5.013393 | 0.513393 | 0.053428 | 0.042743 | 0.028495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005583 | 0.205214 | 1,803 | 54 | 81 | 33.388889 | 0.778088 | 0.364393 | 0 | 0 | 0 | 0 | 0.025641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.133333 | 0 | 0.566667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
443a6baa9c0756dd7aae2e5c5109341c173ecdeb | 538 | py | Python | Python/kth-largest-element-in-a-stream.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2020-10-27T03:22:31.000Z | 2020-10-27T03:22:31.000Z | Python/kth-largest-element-in-a-stream.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | null | null | null | Python/kth-largest-element-in-a-stream.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2021-03-22T18:58:23.000Z | 2021-03-22T18:58:23.000Z | # Time: O(nlogk)
# Space: O(k)
import heapq
class KthLargest(object):
def __init__(self, k, nums):
"""
:type k: int
:type nums: List[int]
"""
self.__k = k
self.__min_heap = []
for n in nums:
self.add(n)
def add(self, val):
"""
:type val: int
:rtype: int
"""
heapq.heappush(self.__min_heap, val)
if len(self.__min_heap) > self.__k:
heapq.heappop(self.__min_heap)
return self.__min_heap[0]
| 19.214286 | 44 | 0.494424 | 68 | 538 | 3.573529 | 0.455882 | 0.144033 | 0.226337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002976 | 0.375465 | 538 | 27 | 45 | 19.925926 | 0.720238 | 0.167286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
44460ed3ad075b11814a0994ca4285eafdbb39b1 | 823 | py | Python | scratch/interesting_cc.py | mikeboers/PyFlickr | 60f258ad6a5266fa8d8e6f6dc829e90e61df9696 | [
"BSD-3-Clause"
] | null | null | null | scratch/interesting_cc.py | mikeboers/PyFlickr | 60f258ad6a5266fa8d8e6f6dc829e90e61df9696 | [
"BSD-3-Clause"
] | null | null | null | scratch/interesting_cc.py | mikeboers/PyFlickr | 60f258ad6a5266fa8d8e6f6dc829e90e61df9696 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import datetime
from pprint import pprint
import logging
logging.basicConfig(file=sys.stderr, level=logging.DEBUG)
from flickr import Flickr
from flickr.util import *
key = os.environ.get('FLICKR_KEY')
secret = os.environ.get('FLICKR_SECRET')
if not (key and secret):
print 'please set FLICKR_KEY and FLICKR_SECRET'
exit(1)
flickr = Flickr((key, secret), format='json', echo=True)
one_day = datetime.timedelta(days=1)
date = datetime.date.today()
found = 0
while found < 10:
date -= one_day
print '---', date
for photo in flickr.interestingness.getList.iter(date=str(date), extras='license', per_page=25):
if photo['license'] == '0':
continue
found += 1
print found, short_url(photo['id'])
if found == 10:
break
| 21.657895 | 100 | 0.663426 | 115 | 823 | 4.678261 | 0.495652 | 0.050186 | 0.04461 | 0.066915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017028 | 0.215067 | 823 | 37 | 101 | 22.243243 | 0.815789 | 0 | 0 | 0 | 0 | 0 | 0.104623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.259259 | null | null | 0.148148 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
44474deb5fa0c086545a3adfa369f518e9e5f9f5 | 2,415 | py | Python | fused_biattention.py | utahnlp/layer_augmentation_qa | a3dd2d7af6f45b2aa500ff7612f77c31fb879cf0 | [
"Apache-2.0"
] | 6 | 2019-10-08T11:48:21.000Z | 2021-05-28T07:00:56.000Z | fused_biattention.py | utahnlp/layer_augmentation_qa | a3dd2d7af6f45b2aa500ff7612f77c31fb879cf0 | [
"Apache-2.0"
] | null | null | null | fused_biattention.py | utahnlp/layer_augmentation_qa | a3dd2d7af6f45b2aa500ff7612f77c31fb879cf0 | [
"Apache-2.0"
] | 1 | 2020-07-02T06:31:06.000Z | 2020-07-02T06:31:06.000Z | import sys
import torch
from torch import nn
from torch.autograd import Variable
from view import *
from holder import *
from util import *
from join_table import *
from trilinear_prod import *
from fusion import *
# fused bidir attention
class FusedBiAttention(torch.nn.Module):
def __init__(self, opt, shared):
super(FusedBiAttention, self).__init__()
self.opt = opt
self.shared = shared
enc_size = opt.hidden_size if 'elmo' not in opt.enc else opt.hidden_size + opt.elmo_size
self.trilinear_prod = TrilinearProd(opt, enc_size)
self.fusion = Fusion(opt, enc_size)
self.softmax2 = nn.Softmax(2)
self.phi_joiner = JoinTable(2)
def biattention(self, scores, C, Q):
batch_l = self.shared.batch_l
context_l = self.shared.context_l
enc_size = C.shape[2]
# attention
att1 = self.softmax2(scores) # (batch_l, context_l, max_query_l)
att2 = self.softmax2(scores.transpose(1,2)) # (batch_l, max_query_l, context_l)
# attend
agg1 = att1.bmm(Q) # (batch_l, context_l, enc_size)
agg2 = att2.bmm(C) # (batch_l, max_query_l, enc_size)
agg2 = self.masked_fill_query(agg2)
return att1, att2, agg1, agg2
def masked_fill_scores(self, scores):
return scores * self.shared.score_mask + (self.shared.one - self.shared.score_mask) * self.shared.neg_inf
def masked_fill_query(self, query):
return query * self.shared.query_mask.unsqueeze(-1)
# input encodings of context (C) and query (Q)
# C of shape (batch_l, context_l, hidden_size)
# Q of shape (batch_l, query_l, hidden_size)
def forward(self, C, Q):
self.update_context()
batch_l = self.shared.batch_l
context_l = self.shared.context_l
max_query_l = self.shared.query_l.max()
hidden_size = self.opt.hidden_size
# get similarity score
scores = self.trilinear_prod(C, Q)
scores = self.masked_fill_scores(scores)
#
att1, att2, agg1, agg2 = self.biattention(scores, C, Q)
#
G = self.fusion(C, agg1)
P = self.fusion(Q, agg2)
P = self.masked_fill_query(P)
# bookkeeping
self.shared.att_soft1 = att1
self.shared.att_soft2 = att2
self.shared.G = G
self.shared.P = P
return att1, att2, G
def update_context(self):
batch_l = self.shared.batch_l
context_l = self.shared.context_l
max_query_l = self.shared.query_l.max()
word_vec_size = self.opt.word_vec_size
hidden_size = self.opt.hidden_size
def begin_pass(self):
pass
def end_pass(self):
pass
| 23.446602 | 107 | 0.719669 | 387 | 2,415 | 4.26615 | 0.22739 | 0.109025 | 0.053301 | 0.050878 | 0.217444 | 0.188976 | 0.121139 | 0.121139 | 0.121139 | 0.121139 | 0 | 0.016484 | 0.171014 | 2,415 | 102 | 108 | 23.676471 | 0.808192 | 0.13913 | 0 | 0.196721 | 0 | 0 | 0.001938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131148 | false | 0.065574 | 0.163934 | 0.032787 | 0.377049 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4449271c863f70b26b80fbfbede707ee87657d0c | 5,565 | py | Python | BresMaker.py | EvolveWithProseeker/Proseeker | 640c54952fc9295d03a2fdb22a452a8e4f6d4611 | [
"MIT"
] | 3 | 2021-05-31T00:56:30.000Z | 2022-03-23T14:24:09.000Z | BresMaker.py | EvolveWithProseeker/Proseeker | 640c54952fc9295d03a2fdb22a452a8e4f6d4611 | [
"MIT"
] | null | null | null | BresMaker.py | EvolveWithProseeker/Proseeker | 640c54952fc9295d03a2fdb22a452a8e4f6d4611 | [
"MIT"
] | 1 | 2022-03-24T08:49:30.000Z | 2022-03-24T08:49:30.000Z | # Simplified Bres Maker
# Version: 1.0
#Python Version: 2.0
# IMPORTS
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from numpy import asarray
from numpy import savetxt
import sys
import os
# DEFINITIONS
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch]
# DATALOAD
#user_input = str(sys.argv[1])
#ranking = str(sys.argv[2])
#working = str(sys.argv[3])
#iterations = int(sys.argv[4])
#trys = int(sys.argv[5])
user_input = "D:/Proseeker/exampledeets.csv"
ranking = "D:/Proseeker/ranking.csv"
working = "D:/Proseeker"
iterations = 1000000
trys = 1000
aavals = pd.read_csv(ranking, usecols=['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V'],
sep =',')
d = {}
for i in range(0,544):
for j in range(0,20):
rowmin = min(aavals.iloc[i])
rowmax = max(aavals.iloc[i])
val = aavals.iloc[i, j]
aavals.replace([aavals.iloc[i, j]], (val - rowmin)/(rowmax - rowmin))
d['A'] = list(aavals['A'])
d['R'] = list(aavals['R'])
d['D'] = list(aavals['D'])
d['N'] = list(aavals['N'])
d['C'] = list(aavals['C'])
d['E'] = list(aavals['E'])
d['Q'] = list(aavals['Q'])
d['G'] = list(aavals['G'])
d['H'] = list(aavals['H'])
d['I'] = list(aavals['I'])
d['L'] = list(aavals['L'])
d['K'] = list(aavals['K'])
d['M'] = list(aavals['M'])
d['F'] = list(aavals['F'])
d['P'] = list(aavals['P'])
d['S'] = list(aavals['S'])
d['T'] = list(aavals['T'])
d['W'] = list(aavals['W'])
d['Y'] = list(aavals['Y'])
d['V'] = list(aavals['V'])
library = pd.read_csv(user_input, header=None, sep=',')
seqs = library[0]
sites = library[1]
# PROCESSING
for x in range(0, len(seqs)):
subjectstd = list(seqs[x])
subject = list.copy(subjectstd)
for p in range(0,len(subjectstd)):
subject.append(subjectstd[p])
for z in range(0, len(subject)):
if subject[z] == 'A':
subject[z] = d['A']
elif subject[z] == 'a':
subject[z] = d['A']
elif subject[z] == 'R':
subject[z] = d['R']
elif subject[z] == 'r':
subject[z] = d['R']
elif subject[z] == 'N':
subject[z] = d['N']
elif subject[z] == 'n':
subject[z] = d['N']
elif subject[z] == 'D':
subject[z] = d['D']
elif subject[z] == 'd':
subject[z] = d['D']
elif subject[z] == 'C':
subject[z] = d['C']
elif subject[z] == 'c':
subject[z] = d['C']
elif subject[z] == 'Q':
subject[z] = d['Q']
elif subject[z] == 'q':
subject[z] = d['Q']
elif subject[z] == 'E':
subject[z] = d['E']
elif subject[z] == 'e':
subject[z] = d['E']
elif subject[z] == 'G':
subject[z] = d['G']
elif subject[z] == 'g':
subject[z] = d['G']
elif subject[z] == 'H':
subject[z] = d['H']
elif subject[z] == 'h':
subject[z] = d['H']
elif subject[z] == 'I':
subject[z] = d['I']
elif subject[z] == 'i':
subject[z] = d['I']
elif subject[z] == 'L':
subject[z] = d['L']
elif subject[z] == 'l':
subject[z] = d['L']
elif subject[z] == 'K':
subject[z] = d['K']
elif subject[z] == 'k':
subject[z] = d['K']
elif subject[z] == 'M':
subject[z] = d['M']
elif subject[z] == 'm':
subject[z] = d['M']
elif subject[z] == 'F':
subject[z] = d['F']
elif subject[z] == 'f':
subject[z] = d['F']
elif subject[z] == 'P':
subject[z] = d['P']
elif subject[z] == 'p':
subject[z] = d['P']
elif subject[z] == 'S':
subject[z] = d['S']
elif subject[z] == 's':
subject[z] = d['S']
elif subject[z] == 'T':
subject[z] = d['T']
elif subject[z] == 't':
subject[z] = d['T']
elif subject[z] == 'W':
subject[z] = d['W']
elif subject[z] == 'w':
subject[z] = d['W']
elif subject[z] == 'Y':
subject[z] = d['Y']
elif subject[z] == 'y':
subject[z] = d['Y']
elif subject[z] == 'V':
subject[z] = d['V']
elif subject[z] == 'v':
subject[z] = d['V']
subjectsites = str(sites[x])
splits = find(subjectsites, ':')
splits.append(len(subjectsites))
if sum(splits) > 0:
for q in range(len(splits)):
if q == 0:
subpos = int(subjectsites[0:splits[q]])
else:
subpos = int(subjectsites[splits[q-1]+1:splits[q]])
breswindow = list((subject[subpos-6], subject[subpos-5], subject[subpos-4], subject[subpos-3],
subject[subpos-2], subject[subpos-1], subject[subpos], subject[subpos+1],
subject[subpos+2], subject[subpos+3], subject[subpos+4], subject[subpos+5],
subject[subpos+6]))
breswindow = np.column_stack(breswindow)
kmeans = KMeans(n_clusters=50, n_init=trys, max_iter=iterations, algorithm="full")
kmeans.fit(breswindow)
clusters = kmeans.labels_
breswindow = np.insert(breswindow, 13, clusters, axis=1)
savetxt(os.path.join(working, 'p{}.bres{}.csv'.format(x+1, q+1)), breswindow, delimiter=',', fmt='%f')
| 29.759358 | 120 | 0.4708 | 760 | 5,565 | 3.434211 | 0.165789 | 0.245211 | 0.144828 | 0.012644 | 0.414559 | 0.350958 | 0.350958 | 0.350958 | 0.337931 | 0.337931 | 0 | 0.014694 | 0.315184 | 5,565 | 186 | 121 | 29.919355 | 0.670165 | 0.040611 | 0 | 0.264901 | 0 | 0 | 0.042997 | 0.009951 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006623 | false | 0 | 0.046358 | 0.006623 | 0.059603 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
44503eada61c7c6d19634eb06fc44feea60e5c94 | 2,445 | py | Python | SeeThru_Feeds/Library/Components/HTTP.py | SeeThru-Networks/Python-Feed | 9c29fcf3462475e339f95d3e9766ed3a652ee6c0 | [
"MIT"
] | 3 | 2020-04-15T19:58:37.000Z | 2020-06-24T14:08:48.000Z | SeeThru_Feeds/Library/Components/HTTP.py | SeeThru-Networks/Python-Feed | 9c29fcf3462475e339f95d3e9766ed3a652ee6c0 | [
"MIT"
] | null | null | null | SeeThru_Feeds/Library/Components/HTTP.py | SeeThru-Networks/Python-Feed | 9c29fcf3462475e339f95d3e9766ed3a652ee6c0 | [
"MIT"
] | null | null | null | from SeeThru_Feeds.Model.Components.ComponentBase import ComponentBase
from SeeThru_Feeds.Model.Properties.Properties import FillableProperty, ResultProperty
import requests
class HTTPBase(ComponentBase):
URL = FillableProperty(name="url", required=True)
COOKIES = FillableProperty(name="cookies", required=False)
HEADERS = FillableProperty(name="header", required=False, default=None, of_type=dict)
RESPONSE = ResultProperty(name="response")
STATUS_CODE = ResultProperty(name="status_code")
RESPONSE_CONTENT = ResultProperty(name="response_content")
RESPONSE_URL = ResultProperty(name="response_url")
Component_Title = "HTTP Component"
Component_Description = "This component provides a wrapper over the requests http methods to make them follow the component design rules"
Component_Author = "SeeThru Networks"
Component_Owner = "SeeThru Networks"
class HTTPGet(HTTPBase):
def component_execute(self):
response = requests.get(self.get_property(
HTTPBase.URL), cookies=self.get_property(HTTPBase.COOKIES), headers=self.get_property(HTTPBase.HEADERS))
self.set_property(HTTPGet.RESPONSE, response)
self.set_property(HTTPGet.STATUS_CODE, response.status_code)
self.set_property(HTTPGet.COOKIES, response.cookies)
self.set_property(HTTPGet.RESPONSE_CONTENT, response.text)
self.set_property(HTTPGet.RESPONSE_URL, response.url)
class HTTPPost(HTTPBase):
DATA = FillableProperty(name="data")
JSON = FillableProperty(name="json", of_type=dict, required=False)
CONTENT_TYPE = FillableProperty(
name="content_type", default="application/x-www-form-urlencoded")
def component_execute(self):
if self.get_property(HTTPBase.HEADERS) is None:
self.set_property(HTTPBase.HEADERS, {'Content-Type': self.get_property(self.CONTENT_TYPE)})
response = requests.post(
self.get_property(HTTPPost.URL),
cookies=self.get_property(HTTPPost.COOKIES),
data=self.get_property(HTTPPost.DATA),
json=self.get_property(HTTPPost.JSON),
headers=self.get_property(HTTPBase.HEADERS)
)
self.set_property(HTTPPost.RESPONSE, response)
self.set_property(HTTPPost.STATUS_CODE, response.status_code)
self.set_property(HTTPPost.RESPONSE_CONTENT, response.text)
self.set_property(HTTPPost.RESPONSE_URL, response.url)
| 45.277778 | 141 | 0.737423 | 280 | 2,445 | 6.271429 | 0.246429 | 0.039863 | 0.085421 | 0.06549 | 0.280182 | 0.156036 | 0.156036 | 0.1082 | 0.059226 | 0 | 0 | 0 | 0.164417 | 2,445 | 53 | 142 | 46.132075 | 0.85952 | 0 | 0 | 0.046512 | 0 | 0 | 0.116564 | 0.013497 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.069767 | 0 | 0.511628 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
445cac484200f11a9ce597a53a164ef8e3583569 | 13,084 | py | Python | WaveBlocksND/IOManager.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | 3 | 2016-09-01T21:13:54.000Z | 2020-03-23T15:45:32.000Z | WaveBlocksND/IOManager.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | null | null | null | WaveBlocksND/IOManager.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | 6 | 2016-03-16T15:22:01.000Z | 2021-03-13T14:06:54.000Z | """The WaveBlocks Project
This file contains code for serializing simulation data.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011, 2012, 2016 R. Bourquin
@license: Modified BSD License
"""
import os
import types
import pickle
import json
import six
import h5py as hdf
import numpy as np
__all__ = ["IOManager"]
class IOManager(object):
"""An IOManager class that can save various simulation results into data
files. For storing the data we use the well established HDF5 file format.
An IOManager instance abstracts the input and output operations and translates
requests into low-level operations.
"""
def __init__(self):
self._hdf_file_version = 2
self._prefixb = "datablock_"
self._prefixg = "group_"
# The current open data file
self._srf = None
# Book keeping data
# TODO: consider storing these values inside the data files
self._block_ids = None
self._block_count = None
self._group_ids = None
self._group_count = None
def __str__(self):
if self._srf is None:
s = "IOManager instance without an open file."
else:
s = "IOManager instance with open file " + str(self._srf.filename) + "\n"
s += " containing " + str(self._block_count) + " data blocks in "
s += str(self._group_count) + " data groups."
return s
def __getattr__(self, key):
"""Try to load a plugin if a member function is not available.
Plugins implement the actual I/O operations for specific data objects.
"""
parts = key.split("_")
# Plugin name convention, we only trigger plugin loading
# for requests starting with "add", "load" or "save".
# However, IF we load a plugin, we load ALL functions it defines.
if parts[0] not in ("add", "delete", "has", "load", "save", "update"):
return
else:
print("Requested function: {}".format(key))
name = "IOM_plugin_" + parts[1]
# Load the necessary plugin
print("Plugin to load: {}".format(name))
try:
plugin = __import__(name)
except ImportError:
raise ImportError("IOM plugin '{}' not found!".format(name))
# Filter out functions we want to add to IOM and
# bind the methods to the current IOM instance
for k, v in plugin.__dict__.items():
if isinstance(v, types.FunctionType):
self.__dict__[k] = types.MethodType(v, self)
# Now return the new function to complete it's call
return self.__dict__[key]
def create_file(self, filename):
"""Set up a new :py:class:`IOManager` instance. The output file is created and opened.
:param filename: The filename (optionally with filepath) of the file we try to create.
If not given the default value from `GlobalDefaults` is used.
"""
# Create the file if it does not yet exist.
# Otherwise raise an exception to avoid overwriting data.
if os.path.lexists(filename):
raise IOError("Output file '{}' already exists!".format(filename))
else:
self._srf = hdf.File(filename)
# Initialize the internal book keeping data
self._block_ids = []
self._block_count = 0
self._group_ids = []
self._group_count = 0
# The version of the current file format
self._srf.attrs["file_version"] = self._hdf_file_version
# Save the simulation parameters
self.create_group(groupid="global")
self.create_block(blockid="global", groupid="global")
def open_file(self, filename):
"""Load a given file that contains the results from another simulation.
:param filename: The filename (optionally with filepath) of the file we try to load.
If not given the default value from `GlobalDefaults` is used.
"""
# Try to open the file or raise an exception if it does not exist.
if os.path.lexists(filename):
if hdf.is_hdf5(filename):
self._srf = hdf.File(filename)
else:
raise IOError("File '{}' is not a hdf5 file".format(filename))
else:
raise IOError("File '{}' does not exist!".format(filename))
# Check if the file format can be read by the IOManager
if "file_version" not in self._srf.attrs.keys():
print("Warning: Unsupported file format without version number")
else:
if self._srf.attrs["file_version"] != self._hdf_file_version:
raise IOError("Unsupported file format version " + str(self._srf.attrs["file_version"]))
# Initialize the internal book keeping data
self._block_ids = [s[len(self._prefixb):] for s in self._srf.keys() if s.startswith(self._prefixb)]
self._block_count = len(self._block_ids)
self._group_ids = [s[len(self._prefixg):] for s in self._srf.keys() if s.startswith(self._prefixg)]
self._group_count = len(self._group_ids)
def finalize(self):
"""Close the open output file and reset the internal information."""
if self._srf is None:
return
# Close the file
self._srf.flush()
self._srf.close()
self._srf = None
# Reset book keeping data
self._block_ids = None
self._block_count = None
self._group_ids = None
self._group_count = None
def get_number_blocks(self, groupid=None):
"""Return the number of data blocks in the current file structure.
:param groupid: An optional group ID. If given we count only data blocks which are a
member of this group. If it is ``None`` (default) we count all data blocks.
"""
if groupid is None:
return self._block_count
else:
return len(self.get_block_ids(groupid=groupid))
def get_number_groups(self):
"""Return the number of data block groups in the current file structure.
"""
return self._group_count
def get_block_ids(self, groupid=None, grouped=False):
"""Return a list containing the IDs for all blocks in the current file structure.
:param groupid: An optional group ID. If given we return only block IDs for blocks
which are a member of this group. If it is ``None`` we return all block IDs.
:param grouped: If ``True`` we group the block IDs by their group into lists.
This option is only relevant in case the `groupid` is not given.
"""
if groupid is not None:
if str(groupid) in self._group_ids:
return self._srf["/" + self._prefixg + str(groupid)].keys()
else:
return []
else:
if grouped is False:
return self._block_ids[:]
else:
return [self._srf["/" + self._prefixg + str(gid)].keys() for gid in self.get_group_ids()]
def get_group_ids(self, exclude=[]):
"""Return a list containing the IDs for all groups in the current file structure.
:param exclude: A list of group IDs to exclude. Per default no group is excluded.
"""
return [gid for gid in self._group_ids if gid not in exclude]
def get_group_of_block(self, blockid):
"""Return the ID of the group a given block belongs to or ``None``
if there is no such data block.
:param blockid: The ID of the given block.
"""
if str(blockid) in self._block_ids:
return self._srf["/" + self._prefixb + str(blockid)].attrs["group"]
else:
return None
def create_block(self, *, blockid=None, groupid="global", **blockattributes):
"""Create a data block with the specified block ID. Each data block can
store several chunks of information, and there can be an arbitrary number
of data blocks per file.
:param blockid: The ID for the new data block. If not given the blockid will
be choosen automatically. The block ID has to be unique.
:return: The block ID of the created block.
"""
if self._srf is None:
return
if blockid is not None and (not str(blockid).isalnum()):
raise ValueError("Block ID allows only characters A-Z, a-z and 0-9 and no leading digit.")
if blockid is not None and str(blockid) in self._block_ids:
raise ValueError("Invalid or already used block ID: " + str(blockid))
if blockid is None:
# Try to find a valid autonumber
autonumber = 0
while str(autonumber) in self._block_ids:
autonumber += 1
blockid = str(autonumber)
self._block_ids.append(str(blockid))
self._block_count += 1
# Create the data block
datablock = self._srf.create_group("/" + self._prefixb + str(blockid))
# Does the group already exist?
if not str(groupid) in self._group_ids:
self.create_group(groupid=groupid)
# Put the data block into the group
datablock.attrs["group"] = str(groupid)
self._srf["/" + self._prefixg + str(groupid) + "/" + str(blockid)] = hdf.SoftLink("/" + self._prefixb + str(blockid))
# Write some extended attributes
for attribute, value in blockattributes.items():
datablock.attrs['ext:' + attribute] = str(value)
return blockid
def create_group(self, groupid=None):
"""Create a data group with the specified group ID. Each data group can
contain an arbitrary number of data blocks, and there can be an arbitrary
number of data groups per file.
:param groupid: The ID for the new data group. If not given the group ID will
be chosen automatically. The group ID has to be unique.
:return: The group ID of the created group.
"""
if self._srf is None:
return
if groupid is not None and (not str(groupid).isalnum()):
raise ValueError("Group ID allows only characters A-Z, a-z and 0-9 and no leading digit.")
if groupid is not None and str(groupid) in self._group_ids:
raise ValueError("Invalid or already used group ID: " + str(groupid))
if groupid is None:
# Try to find a valid autonumber
autonumber = 0
while str(autonumber) in self._group_ids:
autonumber += 1
groupid = str(autonumber)
self._group_ids.append(str(groupid))
self._group_count += 1
# Create the group
self._srf.create_group("/" + self._prefixg + str(groupid))
return groupid
def must_resize(self, path, size, axis=0):
"""Check if we must resize a given dataset and if yes, resize it.
"""
# Ok, it's inefficient but sufficient for now.
# TODO: Consider resizing in bigger chunks and shrinking at the end if necessary.
# Current size of the array
cur_len = self._srf[path].shape[axis]
# Is the current size smaller than the new "size"?
# If yes, then resize the array along the given axis.
if cur_len - 1 < size:
self._srf[path].resize(size + 1, axis=axis)
def find_timestep_index(self, timegridpath, timestep):
"""Lookup the index for a given timestep. This assumes the timegrid
array is strictly monotone.
"""
# TODO: Allow for slicing etc
timegrid = self._srf[timegridpath][:]
index = (timegrid == timestep)
nrvals = np.sum(index)
if nrvals < 1:
raise ValueError("No index for given timestep!")
elif nrvals > 1:
raise ValueError("Multiple indices for given timestep!")
else:
return int(np.where(index)[0])
def split_data(self, data, axis):
"""Split a multi-dimensional data block into slabs along a given axis.
:param data: The data tensor given.
:param axis: The axis along which to split the data.
:return: A list of slices.
"""
parts = data.shape[axis]
return np.split(data, parts, axis=axis)
def _save_attr_value(self, value):
# TODO: Fix for old python 2.x
# Remove after 3.x transition
# Store all the values as pickled strings because hdf can
# only store strings or ndarrays as attributes.
bpv = pickle.dumps(value)
if not isinstance(bpv, bytes):
bpv = six.b(bpv)
npvbpv = np.void(bpv)
return npvbpv
def _load_attr_value(self, value):
# TODO: Fix for old python 2.x
# Remove after 3.x transition
npvbpv = value
bpv = value.tobytes(npvbpv)
pv = pickle.loads(bpv)
return pv
| 36.64986 | 125 | 0.608759 | 1,732 | 13,084 | 4.485566 | 0.194573 | 0.023426 | 0.016991 | 0.00901 | 0.290642 | 0.252413 | 0.19256 | 0.170678 | 0.162183 | 0.130004 | 0 | 0.004956 | 0.306023 | 13,084 | 356 | 126 | 36.752809 | 0.850661 | 0.367625 | 0 | 0.232558 | 0 | 0.011628 | 0.100333 | 0 | 0 | 0 | 0 | 0.011236 | 0 | 1 | 0.104651 | false | 0 | 0.05814 | 0 | 0.296512 | 0.017442 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
445e689615f50b3103fabda264a4d3bf138fe65a | 1,968 | py | Python | Scripts/domain-db-all-domain-rates.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | Scripts/domain-db-all-domain-rates.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | Scripts/domain-db-all-domain-rates.py | colinwalshbrown/CWB_utils | 86675812f9398845d1994b57500830e2c3dc6cc0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import sqlite3
import matplotlib.pyplot as plt
import numpy as np
def main(args):
if len(args) < 2:
print "usage: domain-db-all-domain-rates.py <db> <species>"
sys.exit(1)
conn = sqlite3.connect(args[0])
conn.row_factory = sqlite3.Row
cur = conn.cursor()
sp1 = 'dmel'
sp2 = args[1]
domains = {}
cur.execute("""SELECT * FROM pfam_model""")
dom_list = cur.fetchall()
for x in dom_list:
d = {'desc':x['description'],
'length':x['length'],
'acc':x['accession'],
'pfam_id':x['pfam_id']}
domains[x['name']] = d
print "%s domains found" % len(domains.keys())
cnt = 0
for (k,d) in domains.items():
print "searching %s %s..." % (k,d['pfam_id'])
cur.execute("""SELECT r.rate
FROM domain_pw_aa_rates r JOIN dmel_pfam_domain d
ON r.dmel_pfam_domain_id = d.dmel_pfam_domain_id
WHERE ((d.pfam_id = ?) AND (r.species1 = ?)) AND (r.species2 = ?)""", (str(d['pfam_id']),sp1,sp2))
rates_result = cur.fetchall()
print len(rates_result)
rates = np.array([float(x['rate']) for x in rates_result])
med = np.median(rates)
d['med'] = med
print "%d %s: %s median = %s" % (cnt,k,'dmel_' + sp2,med)
cnt += 1
median_rates = np.array([x['med'] for x in domains.values() if x['med'] >= 0])
print median_rates
plt.figure(1,figsize=(7,7))
plt.hist(median_rates,bins=50)
plt.xlabel("Per-family Median Pairwise %s-%s AA rate" % (sp1,sp2))
plt.ylabel("Family Count")
plt.show()
plt.savefig("%s_all-domain-median-rates.svg" % (args[0],),format="svg")
rout = open("%s-%s_domains_median-rates.txt" % (sp1,sp2),"w")
print >> rout, "\t".join([str(x) for x in median_rates])
if __name__ == "__main__":
main(sys.argv[1:])
| 31.741935 | 121 | 0.552337 | 284 | 1,968 | 3.690141 | 0.369718 | 0.073473 | 0.022901 | 0.030534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019635 | 0.275407 | 1,968 | 61 | 122 | 32.262295 | 0.715288 | 0.010163 | 0 | 0 | 0 | 0.020408 | 0.307653 | 0.067283 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.081633 | null | null | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
445f77fe93f324f67e10e55b0a43977d0e9569e3 | 292 | py | Python | spider/conf.py | schoeu/spid | eebb21fd41799cb077938593b4ff352b51322bc4 | [
"MIT"
] | 1 | 2018-10-18T06:59:34.000Z | 2018-10-18T06:59:34.000Z | spider/conf.py | schoeu/spid | eebb21fd41799cb077938593b4ff352b51322bc4 | [
"MIT"
] | 8 | 2021-03-18T21:11:03.000Z | 2022-03-11T23:30:58.000Z | spider/conf.py | schoeu/spid | eebb21fd41799cb077938593b4ff352b51322bc4 | [
"MIT"
] | null | null | null | import utils
import os
import json
def getjsondata(path):
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), path)
f = open(path)
data = json.loads(f.read())
return data
def getconfig():
return getjsondata('./conf.json') | 22.461538 | 78 | 0.664384 | 43 | 292 | 4.418605 | 0.511628 | 0.126316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.191781 | 292 | 13 | 79 | 22.461538 | 0.805085 | 0 | 0 | 0 | 0 | 0 | 0.037543 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.272727 | 0.090909 | 0.636364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
4460778f6a8c643ae09c2cfcff03efe7a38ed115 | 2,080 | py | Python | rocketbear/orderings.py | wallarelvo/rocketbear | 905c24e4243f178a50907ae965de68dc119de25d | [
"Apache-2.0"
] | 1 | 2015-04-13T09:59:21.000Z | 2015-04-13T09:59:21.000Z | rocketbear/orderings.py | wallarelvo/rocketbear | 905c24e4243f178a50907ae965de68dc119de25d | [
"Apache-2.0"
] | null | null | null | rocketbear/orderings.py | wallarelvo/rocketbear | 905c24e4243f178a50907ae965de68dc119de25d | [
"Apache-2.0"
] | null | null | null |
import graph
"""
This file contains multiple heuristics that can be used for static and
dynamic variable orderings
"""
class DynamicDomOverDeg(graph.ConstraintGraph):
"""
Dynamic ordering using the pruned domain over the degree
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
return len(v.get_pruned_domain()) / len(self.edges[v])
class DynamicSmallestDomainFirst(graph.ConstraintGraph):
"""
Dynamic ordering where the variable with the currently smallest
pruned domain is expanded frist
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
return len(v.get_pruned_domain())
class StaticSmallestDomainFirst(graph.ConstraintGraph):
"""
Static ordering where the variable with the overall smallest
domain is expanded first
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
return len(v.get_full_domain())
class StaticMostArcsFirst(graph.ConstraintGraph):
"""
Static ordering where the variable with the overall largest number
of constraints is expanded first
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
return -len(self.edges[v])
class DynamicMostArcsFirst(graph.ConstraintGraph):
"""
Dynamic ordering where the variable with the highest number of constraints
to variables that are not already assigned is expanded first
"""
def ordering(self, v):
"""
Returns the evaluation used for ordering for a node
"""
s = 0
for nbr in self.edges[v]:
if not nbr.is_assigned():
s -= 1
return s
# Used for higher order functionality
orders = {
"dsdf": DynamicSmallestDomainFirst,
"ssdf": StaticSmallestDomainFirst,
"smaf": StaticMostArcsFirst,
"dmaf": DynamicMostArcsFirst,
"ddod": DynamicDomOverDeg
}
| 24.186047 | 78 | 0.647596 | 234 | 2,080 | 5.726496 | 0.333333 | 0.036567 | 0.05597 | 0.059701 | 0.514179 | 0.489552 | 0.489552 | 0.489552 | 0.489552 | 0.402985 | 0 | 0.001321 | 0.272115 | 2,080 | 85 | 79 | 24.470588 | 0.883752 | 0.371154 | 0 | 0.185185 | 0 | 0 | 0.020101 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.185185 | false | 0 | 0.037037 | 0 | 0.592593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
44619703bad870796ed0246c1284937d5fd67724 | 681 | py | Python | examples/caseless_example.py | kajuberdut/incase | 48b281ac49e22d0b76510771bfa9de9ed94d1fc4 | [
"MIT"
] | null | null | null | examples/caseless_example.py | kajuberdut/incase | 48b281ac49e22d0b76510771bfa9de9ed94d1fc4 | [
"MIT"
] | null | null | null | examples/caseless_example.py | kajuberdut/incase | 48b281ac49e22d0b76510771bfa9de9ed94d1fc4 | [
"MIT"
] | null | null | null | from incase import Case, Caseless
# Instances of Caseless are strings
example = Caseless("example string")
print(isinstance(example, str))
# True
# By property
print(example.snake)
# example_string
# Or by subscript (string or Case)
print(example["camel"])
# exampleString
print(example[Case.UPPER_SNAKE])
# EXAMPLE_STRING
# Caseless ignore case when comparing to str
print(Caseless("some name") == "SOME_NAME")
# True
# Caseless hashes ignore case also
a_dict = {Caseless("This is a Key"): "this"}
print(a_dict[Caseless("thisIsAKey")])
# Caseless can also generate case coercion functions
make_camel = Caseless.factory("camel")
print(make_camel("snake_case"))
# snakeCase
| 21.28125 | 52 | 0.754772 | 93 | 681 | 5.430108 | 0.473118 | 0.077228 | 0.071287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129222 | 681 | 31 | 53 | 21.967742 | 0.851602 | 0.395007 | 0 | 0 | 0 | 0 | 0.197995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.636364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
44685ac8d733d07256270373907d763751c70fec | 1,567 | py | Python | gui_pyside6/ejemplo_cuatro/manejo_eventos.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | gui_pyside6/ejemplo_cuatro/manejo_eventos.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | gui_pyside6/ejemplo_cuatro/manejo_eventos.py | JuanDuran85/ejemplos_python | 47aa49c65384ab89654f362f3da6cd2b0ef386e5 | [
"Apache-2.0"
] | null | null | null | # signals (eventos) y slots (metodos que procesan los eventos)
from PySide6.QtWidgets import QApplication, QMainWindow, QPushButton, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QMessageBox
from PySide6.QtCore import QSize
import sys
class VentanaPrincipal(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Signals y Slots")
# boton
boton = QPushButton("Click Aqui")
# conectamos el evento chequedado, por defecto esta en False
boton.setCheckable(True)
# Conectamos otro slot al evento chequear
boton.clicked.connect(self._evento_chequeado)
# conectamos el evento signal click con el slot llamado evento_click (funcion)
boton.clicked.connect(self._evento_click)
# se publica o muestra el boton
self.setCentralWidget(boton)
def _evento_click(self):
# se muestra un mensaje de alerta
#QMess""ageBox.information(self, "Evento", "Se ha hecho click")
# accedemosal estado del boton para saber si esta checado o no
print("evento click: ", self.boton_checado)
print("evento click")
def _evento_chequeado(self, chequeado):
# se muestra un mensaje de alerta
#QMessageBox.information(self, "Evento", "Se ha chequeado")
self.boton_checado = chequeado
print("evento chequeado: ", self.boton_checado)
if __name__ == '__main__':
app = QApplication(sys.argv)
ventana = VentanaPrincipal()
ventana.show()
sys.exit(app.exec_()) | 40.179487 | 135 | 0.67709 | 180 | 1,567 | 5.733333 | 0.488889 | 0.053295 | 0.046512 | 0.044574 | 0.155039 | 0.050388 | 0 | 0 | 0 | 0 | 0 | 0.001675 | 0.238034 | 1,567 | 39 | 136 | 40.179487 | 0.862647 | 0.32993 | 0 | 0 | 0 | 0 | 0.07411 | 0 | 0 | 0 | 0 | 0.025641 | 0 | 1 | 0.130435 | false | 0 | 0.130435 | 0 | 0.304348 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.