hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12f3e15449887a93928072028cfc90fcace19042
| 322
|
py
|
Python
|
3-Python-Advanced (May 2021)/modules/gui_shop/main.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
3-Python-Advanced (May 2021)/modules/gui_shop/main.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
3-Python-Advanced (May 2021)/modules/gui_shop/main.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
# from authentication import render_main_enter_screen
from modules.gui_shop.authentication import render_main_enter_screen
from modules.gui_shop.canvas import tk
if __name__ == '__main__':
render_main_enter_screen()
tk.mainloop() # keeps the window on our screen until we close it. Otherwise it wold disappears.
| 35.777778
| 99
| 0.807453
| 47
| 322
| 5.12766
| 0.574468
| 0.124481
| 0.186722
| 0.261411
| 0.489627
| 0.489627
| 0.489627
| 0.489627
| 0.489627
| 0.489627
| 0
| 0
| 0.136646
| 322
| 8
| 100
| 40.25
| 0.866906
| 0.406832
| 0
| 0
| 0
| 0
| 0.042781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
423b6446a4147ac8847fd61fe08d00fde1329fff
| 46
|
py
|
Python
|
app/v1/__init__.py
|
josephniel/python-base
|
63ef4218a01250e03041be90bd4c773ceea42336
|
[
"MIT"
] | null | null | null |
app/v1/__init__.py
|
josephniel/python-base
|
63ef4218a01250e03041be90bd4c773ceea42336
|
[
"MIT"
] | null | null | null |
app/v1/__init__.py
|
josephniel/python-base
|
63ef4218a01250e03041be90bd4c773ceea42336
|
[
"MIT"
] | null | null | null |
# just so that the folder is seen as a module
| 23
| 45
| 0.73913
| 10
| 46
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23913
| 46
| 1
| 46
| 46
| 0.971429
| 0.934783
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4249b5da7fe77f6e6671782085bdbbf375ac1496
| 46
|
py
|
Python
|
static_data/__init__.py
|
dp0973/static-data
|
bba6f209bcef9844c57b5aadba49e9e70ae6cfed
|
[
"MIT"
] | 2
|
2021-07-25T04:01:46.000Z
|
2021-08-31T19:18:48.000Z
|
static_data/__init__.py
|
dp0973/static-data
|
bba6f209bcef9844c57b5aadba49e9e70ae6cfed
|
[
"MIT"
] | 1
|
2021-08-31T19:47:46.000Z
|
2021-09-01T06:56:18.000Z
|
static_data/__init__.py
|
dp0973/static-data
|
bba6f209bcef9844c57b5aadba49e9e70ae6cfed
|
[
"MIT"
] | 2
|
2021-07-25T04:01:56.000Z
|
2021-08-31T19:31:55.000Z
|
from static_data.ddragon import(
ddragon
)
| 15.333333
| 32
| 0.76087
| 6
| 46
| 5.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 3
| 33
| 15.333333
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
42843cab8606607f30c24da11fb9c0c7a7e0c373
| 328
|
py
|
Python
|
(Ex4)_Neural_Network_model_implementation/Sigmoid.py
|
HarryPham0123/Coursera_Machine_learning_AndrewNg
|
ae1fa34969fa0dafd44aa6606f6749c09b447239
|
[
"MIT"
] | 1
|
2021-11-10T07:23:30.000Z
|
2021-11-10T07:23:30.000Z
|
(Ex4)_Neural_Network_model_implementation/Sigmoid.py
|
HarryPham0123/Coursera_Machine_learning_AndrewNg
|
ae1fa34969fa0dafd44aa6606f6749c09b447239
|
[
"MIT"
] | null | null | null |
(Ex4)_Neural_Network_model_implementation/Sigmoid.py
|
HarryPham0123/Coursera_Machine_learning_AndrewNg
|
ae1fa34969fa0dafd44aa6606f6749c09b447239
|
[
"MIT"
] | null | null | null |
import numpy as np
# (NOTE) Sigmoid function used for logistic regression (non-linear regression)
# TODO Logistic function (Sigmoid function)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# TODO return gradient (derivative) of sigmoid function
def sigmoid_gradient(z):
return sigmoid(z) * (1-sigmoid(z))
| 23.428571
| 79
| 0.689024
| 45
| 328
| 5
| 0.488889
| 0.2
| 0.16
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011538
| 0.207317
| 328
| 13
| 80
| 25.230769
| 0.853846
| 0.52439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
4286b8de9b0c3f35fb0b97be8f62944d79463d42
| 10,182
|
py
|
Python
|
mayan/apps/tags/tests/test_views.py
|
prezi/mayan-edms
|
e9bc10a056c3379b57115c6e83022f48c6298e1d
|
[
"Apache-2.0"
] | 4
|
2019-02-17T08:35:42.000Z
|
2019-03-28T06:02:11.000Z
|
mayan/apps/tags/tests/test_views.py
|
zhoubear/mayan-edms
|
e9bc10a056c3379b57115c6e83022f48c6298e1d
|
[
"Apache-2.0"
] | 1
|
2018-10-11T13:01:34.000Z
|
2018-10-11T13:01:34.000Z
|
mayan/apps/tags/tests/test_views.py
|
prezi/mayan-edms
|
e9bc10a056c3379b57115c6e83022f48c6298e1d
|
[
"Apache-2.0"
] | 3
|
2019-01-29T13:21:57.000Z
|
2019-10-27T03:20:15.000Z
|
from __future__ import unicode_literals
from documents.permissions import permission_document_view
from documents.tests import GenericDocumentViewTestCase
from ..models import Tag
from ..permissions import (
permission_tag_attach, permission_tag_create, permission_tag_delete,
permission_tag_edit, permission_tag_remove, permission_tag_view
)
from .literals import (
TEST_TAG_COLOR, TEST_TAG_COLOR_EDITED, TEST_TAG_LABEL,
TEST_TAG_LABEL_EDITED
)
class TagViewTestCase(GenericDocumentViewTestCase):
def _create_tag(self):
self.tag = Tag.objects.create(
color=TEST_TAG_COLOR, label=TEST_TAG_LABEL
)
def _request_create_tag(self):
return self.post(
'tags:tag_create', data={
'label': TEST_TAG_LABEL,
'color': TEST_TAG_COLOR
}, follow=True
)
def test_tag_create_view_no_permissions(self):
self.login_user()
response = self._request_create_tag()
self.assertEqual(response.status_code, 403)
self.assertEqual(Tag.objects.count(), 0)
def test_tag_create_view_with_permissions(self):
self.login_user()
self.grant_permission(permission=permission_tag_create)
response = self._request_create_tag()
self.assertEqual(response.status_code, 200)
self.assertEqual(Tag.objects.count(), 1)
tag = Tag.objects.first()
self.assertEqual(tag.label, TEST_TAG_LABEL)
self.assertEqual(tag.color, TEST_TAG_COLOR)
def _request_delete_tag(self):
return self.post(
'tags:tag_delete', args=(self.tag.pk,), follow=True
)
def test_tag_delete_view_no_permissions(self):
self.login_user()
self._create_tag()
response = self._request_delete_tag()
self.assertEqual(response.status_code, 200)
self.assertEqual(Tag.objects.count(), 1)
def test_tag_delete_view_with_access(self):
self.login_user()
self._create_tag()
self.grant_access(obj=self.tag, permission=permission_tag_delete)
response = self._request_delete_tag()
self.assertEqual(response.status_code, 200)
self.assertEqual(Tag.objects.count(), 0)
def _request_multiple_delete(self):
return self.post(
'tags:tag_multiple_delete', data={'id_list': self.tag.pk},
follow=True
)
def test_tag_multiple_delete_view_no_permissions(self):
self.login_user()
self._create_tag()
response = self._request_multiple_delete()
self.assertEqual(response.status_code, 200)
self.assertEqual(Tag.objects.count(), 1)
def test_tag_multiple_delete_view_with_access(self):
self.login_user()
self._create_tag()
self.grant_access(obj=self.tag, permission=permission_tag_delete)
response = self._request_multiple_delete()
self.assertEqual(response.status_code, 200)
self.assertEqual(Tag.objects.count(), 0)
def _request_edit_tag(self):
return self.post(
'tags:tag_edit', args=(self.tag.pk,), data={
'label': TEST_TAG_LABEL_EDITED, 'color': TEST_TAG_COLOR_EDITED
}, follow=True
)
def test_tag_edit_view_no_permissions(self):
self.login_user()
self._create_tag()
response = self._request_edit_tag()
self.assertEqual(response.status_code, 403)
tag = Tag.objects.get(pk=self.tag.pk)
self.assertEqual(tag.label, TEST_TAG_LABEL)
self.assertEqual(tag.color, TEST_TAG_COLOR)
def test_tag_edit_view_with_access(self):
self.login_user()
self._create_tag()
self.grant_access(obj=self.tag, permission=permission_tag_edit)
response = self._request_edit_tag()
self.assertEqual(response.status_code, 200)
tag = Tag.objects.get(pk=self.tag.pk)
self.assertEqual(tag.label, TEST_TAG_LABEL_EDITED)
self.assertEqual(tag.color, TEST_TAG_COLOR_EDITED)
def _request_document_list(self):
return self.get('documents:document_list')
def test_document_tags_widget_no_permissions(self):
self.login_user()
self._create_tag()
self.tag.documents.add(self.document)
response = self._request_document_list()
self.assertNotContains(response, text=TEST_TAG_LABEL, status_code=200)
def test_document_tags_widget_with_access(self):
self.login_user()
self._create_tag()
self.tag.documents.add(self.document)
self.grant_access(obj=self.tag, permission=permission_tag_view)
self.grant_access(
obj=self.document, permission=permission_document_view
)
response = self._request_document_list()
self.assertContains(response, text=TEST_TAG_LABEL, status_code=200)
def _request_attach_tag(self):
return self.post(
'tags:tag_attach', args=(self.document.pk,), data={
'tags': self.tag.pk,
'user': self.user.pk
}
)
def test_document_attach_tag_view_no_permission(self):
self.login_user()
self._create_tag()
self.assertEqual(self.document.tags.count(), 0)
self.grant_access(obj=self.tag, permission=permission_tag_attach)
response = self._request_attach_tag()
# Redirect to previous URL and show warning message about having to
# select at least one object.
self.assertEqual(response.status_code, 302)
self.assertEqual(self.document.tags.count(), 0)
def test_document_attach_tag_view_with_access(self):
self.login_user()
self._create_tag()
self.assertEqual(self.document.tags.count(), 0)
self.grant_access(obj=self.document, permission=permission_tag_attach)
self.grant_access(obj=self.tag, permission=permission_tag_attach)
# permission_tag_view is needed because the form filters the
# choices
self.grant_access(obj=self.tag, permission=permission_tag_view)
response = self._request_attach_tag()
self.assertEqual(response.status_code, 302)
self.assertQuerysetEqual(
self.document.tags.all(), (repr(self.tag),)
)
def _request_multiple_attach_tag(self):
return self.post(
'tags:multiple_documents_tag_attach', data={
'id_list': self.document.pk, 'tags': self.tag.pk,
'user': self.user.pk
}
)
def test_document_multiple_attach_tag_view_no_permission(self):
self.login_user()
self._create_tag()
self.grant_permission(permission=permission_tag_view)
response = self._request_multiple_attach_tag()
self.assertEqual(response.status_code, 200)
self.assertEqual(self.document.tags.count(), 0)
def test_document_multiple_attach_tag_view_with_access(self):
self.login_user()
self._create_tag()
self.grant_access(obj=self.document, permission=permission_tag_attach)
self.grant_access(obj=self.tag, permission=permission_tag_attach)
# permission_tag_view is needed because the form filters the
# choices
self.grant_access(obj=self.tag, permission=permission_tag_view)
response = self._request_multiple_attach_tag()
self.assertEqual(response.status_code, 302)
self.assertQuerysetEqual(
self.document.tags.all(), (repr(self.tag),)
)
def _request_single_document_multiple_tag_remove(self):
return self.post(
'tags:single_document_multiple_tag_remove',
args=(self.document.pk,), data={
'id_list': self.document.pk,
'tags': self.tag.pk,
}
)
def test_single_document_multiple_tag_remove_view_no_permissions(self):
self.login_user()
self._create_tag()
self.document.tags.add(self.tag)
self.grant_access(obj=self.tag, permission=permission_tag_view)
response = self._request_single_document_multiple_tag_remove()
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(self.document.tags.all(), (repr(self.tag),))
def test_single_document_multiple_tag_remove_view_with_access(self):
self.login_user()
self._create_tag()
self.document.tags.add(self.tag)
self.grant_access(obj=self.document, permission=permission_tag_remove)
self.grant_access(obj=self.tag, permission=permission_tag_remove)
self.grant_access(obj=self.tag, permission=permission_tag_view)
response = self._request_single_document_multiple_tag_remove()
self.assertEqual(response.status_code, 302)
self.assertEqual(self.document.tags.count(), 0)
def _request_multiple_documents_selection_tag_remove(self):
return self.post(
'tags:multiple_documents_selection_tag_remove',
data={
'id_list': self.document.pk,
'tags': self.tag.pk,
}
)
def test_multiple_documents_selection_tag_remove_view_no_permissions(self):
self.login_user()
self._create_tag()
self.document.tags.add(self.tag)
self.grant_access(obj=self.tag, permission=permission_tag_view)
response = self._request_multiple_documents_selection_tag_remove()
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(self.document.tags.all(), (repr(self.tag),))
def test_multiple_documents_selection_tag_remove_view_with_access(self):
self.login_user()
self._create_tag()
self.document.tags.add(self.tag)
self.grant_access(obj=self.document, permission=permission_tag_remove)
self.grant_access(obj=self.tag, permission=permission_tag_remove)
self.grant_access(obj=self.tag, permission=permission_tag_view)
response = self._request_multiple_documents_selection_tag_remove()
self.assertEqual(response.status_code, 302)
self.assertEqual(self.document.tags.count(), 0)
| 32.018868
| 79
| 0.677077
| 1,239
| 10,182
| 5.213075
| 0.073446
| 0.037932
| 0.074779
| 0.055736
| 0.884657
| 0.828766
| 0.789286
| 0.723022
| 0.691438
| 0.677814
| 0
| 0.008386
| 0.227067
| 10,182
| 317
| 80
| 32.119874
| 0.812325
| 0.022294
| 0
| 0.610092
| 0
| 0
| 0.029654
| 0.016586
| 0
| 0
| 0
| 0
| 0.183486
| 1
| 0.12844
| false
| 0
| 0.027523
| 0.041284
| 0.201835
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
428c86bb9a75f4ce9ca6bde70020a25995130302
| 171
|
py
|
Python
|
src/wafp/targets/__init__.py
|
schemathesis/web-api-fuzzing-project
|
69916d9ba9c8c844c7f529dd116790b1f7a135a9
|
[
"MIT"
] | 5
|
2021-05-10T16:14:46.000Z
|
2021-09-18T13:29:42.000Z
|
src/wafp/targets/__init__.py
|
schemathesis/web-api-fuzzing-project
|
69916d9ba9c8c844c7f529dd116790b1f7a135a9
|
[
"MIT"
] | 4
|
2021-05-10T10:42:56.000Z
|
2021-11-13T08:15:11.000Z
|
src/wafp/targets/__init__.py
|
schemathesis/web-api-fuzzing-project
|
69916d9ba9c8c844c7f529dd116790b1f7a135a9
|
[
"MIT"
] | null | null | null |
from . import cli, loader
from .core import BaseTarget
from .metadata import Language, Metadata, Package, SchemaSource, SchemaSourceType, Specification, SpecificationType
| 42.75
| 115
| 0.830409
| 18
| 171
| 7.888889
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 171
| 3
| 116
| 57
| 0.934211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c41010cc65e3cfab36a240c31f997527a2c29416
| 91
|
py
|
Python
|
api/__init__.py
|
xuhdev/MAX-Text-Sentiment-Classifier
|
be40b49491726cb16a59715cd1e22b566a1710cc
|
[
"Apache-2.0"
] | null | null | null |
api/__init__.py
|
xuhdev/MAX-Text-Sentiment-Classifier
|
be40b49491726cb16a59715cd1e22b566a1710cc
|
[
"Apache-2.0"
] | null | null | null |
api/__init__.py
|
xuhdev/MAX-Text-Sentiment-Classifier
|
be40b49491726cb16a59715cd1e22b566a1710cc
|
[
"Apache-2.0"
] | null | null | null |
from .metadata import ModelMetadataAPI # noqa
from .predict import ModelPredictAPI # noqa
| 45.5
| 46
| 0.813187
| 10
| 91
| 7.4
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 91
| 2
| 47
| 45.5
| 0.948718
| 0.098901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c41b7109b9723858b0684045387d7e9834fc0274
| 13,334
|
py
|
Python
|
BinanceCTOrder.py
|
a2gs/BinanceCopyTrade
|
78d77a303487e77acbe02b5c3d96502c842789ce
|
[
"MIT"
] | 8
|
2020-10-06T20:49:44.000Z
|
2022-02-27T01:47:17.000Z
|
BinanceCTOrder.py
|
a2gs/BinanceCopyTrade
|
78d77a303487e77acbe02b5c3d96502c842789ce
|
[
"MIT"
] | 1
|
2021-04-19T03:16:44.000Z
|
2021-05-04T14:32:38.000Z
|
BinanceCTOrder.py
|
a2gs/BinanceCopyTrade
|
78d77a303487e77acbe02b5c3d96502c842789ce
|
[
"MIT"
] | 1
|
2022-03-11T00:56:27.000Z
|
2022-03-11T00:56:27.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Andre Augusto Giannotti Scota
# andre.scota@gmail.com
# MIT license
from binance.client import Client
from binance.exceptions import BinanceAPIException, BinanceWithdrawException, BinanceRequestException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
testOrder = False
"""
LOCK = True
#LOCK = False
"""
def setTestOrder(o: bool):
global testOrder
testOrder = o
def getTestOrder() -> bool:
global testOrder
return testOrder
"""
def binanceSide(s: str):
if s == 'BUY': return Client.SIDE_BUY
elif s == 'SELL': return Client.SIDE_SELL
return ''
# ---------------------------------------------------
def printPlacedOrder(order, log = None):
log(f"Symbol: [{order['symbol']}]")
log(f"\tSide.................: [{order['side']}]")
log(f"\tType.................: [{order['type']}]")
log(f"\tTransaction Time.....: [{order['transactTime']}]")
log(f"\tPrice................: [{order['price']}]")
log(f"\tOrig Qtd.............: [{order['origQty']}]")
log(f"\tExecuted Qtd.........: [{order['executedQty']}]")
log(f"\tCummulative Quote Qtd: [{order['cummulativeQuoteQty']}]")
log(f"\tStatus...............: [{order['status']}]")
log(f"\tTime In Force........: [{order['timeInForce']}]")
log(f"\tOrder Id.............: [{order['orderId']}]")
log(f"\tClient Order Id......: [{order['clientOrderId']}]")
if 'fills' not in order:
return
for f in order['fills']:
log(f"\t\tPrice...........: [{f['price']}]")
log(f"\t\tQty.............: [{f['qty']}]")
log(f"\t\tCommission......: [{f['commission']}]")
log(f"\t\tCommission Asset: [{f['commissionAsset']}]")
def cancel_a_spot_order(client, log = None, symbOrd = '', ordrid = 0) -> [bool, str]:
log(f"Cancel SPOT Order Id [{ordrid}] with Symbol [{symbOrd}]")
# TESTING
global LOCK
if LOCK == True:
return False, "Programmed flag order lock ON!"
try:
cancOrd = client.cancel_order(symbol = symbOrd, orderId = ordrid)
except BinanceRequestException as e:
return False, f"Erro BinanceRequestException: [{e.status_code} - {e.message}]"
except BinanceAPIException as e:
return False, f"Erro at client.cancel_order() BinanceAPIException: [{e.status_code} - {e.message}]"
except Exception as e:
return False, f"Erro at client.cancel_order(): {e}"
log(f"Symbol..................: [{cancOrd['symbol']}]")
log(f"Original Client Order Id: [{cancOrd['origClientOrderId']}]")
log(f"Order Id................: [{cancOrd['orderId']}]")
log(f"Order List Id (OCO info): [{cancOrd['orderListId']}]")
log(f"Client Order Id.........: [{cancOrd['clientOrderId']}]")
log(f"Price...................: [{cancOrd['price']}]")
log(f"Original Qtd............: [{cancOrd['origQty']}]")
log(f"Executed Qty............: [{cancOrd['executedQty']}]")
log(f"Cummulative Quote Qty...: [{cancOrd['cummulativeQuoteQty']}]")
log(f"Status..................: [{cancOrd['status']}]")
log(f"Time In Force...........: [{cancOrd['timeInForce']}]")
log(f"Type....................: [{cancOrd['type']}]")
log(f"Side....................: [{cancOrd['side']}]")
return True, "Ok"
def cancel_a_margin_order(client, log, symbOrd = '', ordrid = 0) -> [bool, str]:
log(f"Cancel Margin Order Id [{ordrid}] with Symbol [{symbOrd}]")
# TESTING
global LOCK
if LOCK == True:
return False, "Programmed flag order lock ON!"
try:
cancOrd = client.cancel_margin_order(symbol = symbOrd, orderId = ordrid)
except BinanceRequestException as e:
return False, f"Erro at client.cancel_margin_order() BinanceRequestException: [{e.status_code} - {e.message}]"
except BinanceAPIException as e:
return False, f"Erro at client.cancel_margin_order() BinanceAPIException: [{e.status_code} - {e.message}]"
except Exception as e:
return False, f"Erro at client.cancel_margin_order(): {e}"
log(f"Symbol..................: [{cancOrd['symbol']}]")
log(f"Original Client Order Id: [{cancOrd['origClientOrderId']}]")
log(f"OrderId.................: [{cancOrd['orderId']}]")
log(f"Client Order Id.........: [{cancOrd['clientOrderId']}]")
log(f"Price...................: [{cancOrd['price']}]")
log(f"Original Qtd............: [{cancOrd['origQty']}]")
log(f"Executed Qty............: [{cancOrd['executedQty']}]")
log(f"Cummulative Quote Qty...: [{cancOrd['cummulativeQuoteQty']}]")
log(f"Status..................: [{cancOrd['status']}]")
log(f"Time In Force...........: [{cancOrd['timeInForce']}]")
log(f"Type....................: [{cancOrd['type']}]")
log(f"Side....................: [{cancOrd['side']}]")
return True, 'Ok'
# ---------------------------------------------------
def orderSpotLimit(client, log = None, symbOrd = '', qtdOrd = 0, prcOrd = 0.0,
prcStopOrd = 0.0, prcStopLimitOrd = 0.0, sideOrd = 0) -> [bool, str, str]:
log(f"Symbol....: [{symbOrd}]")
log(f"Side......: [{sideOrd}]")
log(f"Quantity..: [{qtdOrd}]")
log(f"Price.....: [{prcOrd}]")
log(f"Stop Price: [{prcStopOrd}]")
log(f"Limit OCO.: [{prcStopLimitOrd}]")
# TESTING
global LOCK
if LOCK == True:
return([False, "Programmed flag order lock ON!", ""])
try:
order = client.create_oco_order(symbol = symbOrd,
side = sideOrd,
quantity = qtdOrd,
price = prcOrd,
stopPrice = prcStopOrd,
stopLimitPrice = prcStopLimitOrd,
stopLimitTimeInForce = Client.TIME_IN_FORCE_GTC,
newOrderRespType = Client.ORDER_RESP_TYPE_FULL)
except BinanceRequestException as e:
return([False, f"Erro create_oco_order BinanceRequestException: [{e.status_code} - {e.message}]", ""])
except BinanceAPIException as e:
return([False, f"Erro create_oco_order BinanceAPIException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderException as e:
return([False, f"Erro create_oco_order BinanceOrderException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinAmountException as e:
return([False, f"Erro create_oco_order BinanceOrderMinAmountException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinPriceException as e:
return([False, f"Erro create_oco_order BinanceOrderMinPriceException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinTotalException as e:
return([False, f"Erro create_oco_order BinanceOrderMinTotalException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderUnknownSymbolException as e:
return([False, f"Erro create_oco_order BinanceOrderUnknownSymbolException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderInactiveSymbolException as e:
return([False, f"Erro create_oco_order BinanceOrderInactiveSymbolException: [{e.status_code} - {e.message}]", ""])
except Expcetion as e:
return([False, f"Erro create_oco_order generic exception: {e}", ""])
printPlacedOrder(order, log, order['orderId'])
return True, "Ok"
def orderSpot(client, log = None, symbOrd = '', qtdOrd = 0, prcOrd = 0.0, sideOrd = 0, typeOrd = 0) ->[bool, str, str]:
log(f"Symbol....: [{symbOrd}]")
log(f"Side......: [{sideOrd}]")
log(f"Quantity..: [{qtdOrd}]")
log(f"Price.....: [{prcOrd}]")
log(f"Type......: [{typeOrd}]")
# TESTING
global LOCK
if LOCK == True:
# return([False, "Programmed flag order lock ON!", ""])
return([True, ">>>>>>>>>>>>>>>>>>>Programmed flag order lock ON!", "987654321"])
try:
order = client.create_order(symbol = symbOrd,
quantity = qtdOrd,
price = prcOrd,
side = sideOrd,
type = typeOrd,
timeInForce = Client.TIME_IN_FORCE_GTC,
newOrderRespType = Client.ORDER_RESP_TYPE_FULL)
except BinanceRequestException as e:
return([False, f"Erro order_limit_buy BinanceRequestException: [{e.status_code} - {e.message}]", ""])
except BinanceAPIException as e:
return([False, f"Erro order_limit_buy BinanceAPIException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderException as e:
return([False, f"Erro order_limit_buy BinanceOrderException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinAmountException as e:
return([False, f"Erro order_limit_buy BinanceOrderMinAmountException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinPriceException as e:
return([False, f"Erro order_limit_buy BinanceOrderMinPriceException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinTotalException as e:
return([False, f"Erro order_limit_buy BinanceOrderMinTotalException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderUnknownSymbolException as e:
return([False, f"Erro order_limit_buy BinanceOrderUnknownSymbolException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderInactiveSymbolException as e:
return([False, f"Erro order_limit_buy BinanceOrderInactiveSymbolException: [{e.status_code} - {e.message}]", ""])
except Exception as e:
return([False, f"Erro order_limit_buy generic exception: {e}", ""])
printPlacedOrder(order, log)
return([True, "Ok", order['orderId']])
# ---------------------------------------------------
def buyOCOOrder(client, log = None, symb = '', qtd = 0, prc = 0.0, stopprice = 0.0, limit = 0.0) -> bool:
log("NOT IMPLEMENTED")
def sellOCOOrder(client, log = None, symb = '', qtd = 0, prc = 0.0, stopprice = 0.0, limit = 0.0) -> bool:
log("NOT IMPLEMENTED")
# ---------------------------------------------------
def orderMargin(client, log = None, symbOrd = '', sideOrd = 0, typeOrd = 0,
qtdOrd = 0, prcOrd = 0.0, prcStop = 0.0, limit = 0.0) ->[bool, str, str]:
log(f"MARGIN Order {typeOrd}")
log(f"Symbol....: [{symbOrd}]")
log(f"Side......: [{sideOrd}]")
log(f"Quantity..: [{qtdOrd}]")
log(f"Price.....: [{prcOrd}]")
log(f"Stop Price: [{prcStop}]")
log(f"Limit OCO.: [{limit}]")
log(f"Type......: [{typeOrd}]")
# TESTING
global LOCK
if LOCK == True:
return([False, "Programmed flag order lock ON!", ""])
try:
if typeOrd == 'LIMIT':
order = client.create_margin_order(symbol = symbOrd,
side = binanceSide(sideOrd),
type = Client.ORDER_TYPE_LIMIT,
timeInForce = Client.TIME_IN_FORCE_GTC,
quantity = qtdOrd,
price = prcOrd,
newOrderRespType = Client.ORDER_RESP_TYPE_FULL)
else:
order = client.create_margin_order(symbol = symbOrd,
side = binanceSide(sideOrd),
type = BU.binanceOrderType(typeOrd),
timeInForce = Client.TIME_IN_FORCE_GTC,
quantity = qtdOrd,
price = prcOrd,
stopPrice = prcStop,
newOrderRespType = Client.ORDER_RESP_TYPE_FULL)
except BinanceRequestException as e:
return([False, f"Erro create_margin_order BinanceRequestException: [{e.status_code} - {e.message}]", ""])
except BinanceAPIException as e:
return([False, f"Erro create_margin_order BinanceAPIException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderException as e:
return([False, f"Erro create_margin_order BinanceOrderException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinAmountException as e:
return([False, f"Erro create_margin_order BinanceOrderMinAmountException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinPriceException as e:
return([False, f"Erro create_margin_order BinanceOrderMinPriceException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderMinTotalException as e:
return([False, f"Erro create_margin_order BinanceOrderMinTotalException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderUnknownSymbolException as e:
return([False, f"Erro create_margin_order BinanceOrderUnknownSymbolException: [{e.status_code} - {e.message}]", ""])
except BinanceOrderInactiveSymbolException as e:
return([False, f"Erro create_margin_order BinanceOrderInactiveSymbolException: [{e.status_code} - {e.message}]", ""])
except Exception as e:
return([False, f"Erro create_margin_order generic exception: {e}", ""])
log(f"\tOrder id....: [{order['orderId']}]")
log(f"\tSymbol......: [{order['symbol']}]")
log(f"\tPrice.......: [{order['price']}]")
log(f"\tQtd.........: [{order['origQty']}]")
log(f"\tQtd executed: [{order['executedQty']}]")
log(f"\tSide........: [{order['side']}]")
log(f"\tType........: [{order['type']}]")
log(f"\tStop price..: [{order['stopPrice']}]")
log(f"\tIs working..? [{order['isWorking']}]")
return([True, "Ok", order['orderId']])
| 45.047297
| 291
| 0.597345
| 1,410
| 13,334
| 5.553901
| 0.113475
| 0.036266
| 0.037926
| 0.058996
| 0.768101
| 0.751373
| 0.732984
| 0.723662
| 0.700932
| 0.660963
| 0
| 0.004556
| 0.19349
| 13,334
| 295
| 292
| 45.2
| 0.723638
| 0.031648
| 0
| 0.518182
| 0
| 0
| 0.441322
| 0.155114
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040909
| false
| 0
| 0.009091
| 0
| 0.109091
| 0.013636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c4230dcb6ac2d7afa95c7dc2e76035d72e433952
| 73
|
py
|
Python
|
spikeforest/spikesorters/spyking_circus/spykingcircussortingextractor/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | null | null | null |
spikeforest/spikesorters/spyking_circus/spykingcircussortingextractor/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | null | null | null |
spikeforest/spikesorters/spyking_circus/spykingcircussortingextractor/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | null | null | null |
from .spykingcircussortingextractor import SpykingCircusSortingExtractor
| 36.5
| 72
| 0.931507
| 4
| 73
| 17
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054795
| 73
| 1
| 73
| 73
| 0.985507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c449cf4ac62a7e9e6b3650f498c1bf442e497b98
| 191
|
py
|
Python
|
tests/pyccel/scripts/import_syntax/user_mod.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 206
|
2018-06-28T00:28:47.000Z
|
2022-03-29T05:17:03.000Z
|
tests/pyccel/scripts/import_syntax/user_mod.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 670
|
2018-07-23T11:02:24.000Z
|
2022-03-30T07:28:05.000Z
|
tests/pyccel/scripts/import_syntax/user_mod.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 19
|
2019-09-19T06:01:00.000Z
|
2022-03-29T05:17:06.000Z
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
from pyccel.decorators import types
@types('double','double','double')
def user_func(x1, x2, x3):
return x1+x2+x3
| 27.285714
| 71
| 0.748691
| 27
| 191
| 5.259259
| 0.703704
| 0.169014
| 0.084507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.104712
| 191
| 6
| 72
| 31.833333
| 0.795322
| 0.361257
| 0
| 0
| 0
| 0
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
c45d2739227d6d132c5ddb1ba9c2e8c66013d3ec
| 257
|
py
|
Python
|
test/utils/test_learning_rate.py
|
lethaiq/machin
|
7873cada457328952310394afeedcad4bb6a7c4a
|
[
"MIT"
] | 287
|
2020-06-13T05:19:50.000Z
|
2022-03-31T04:46:32.000Z
|
test/utils/test_learning_rate.py
|
lethaiq/machin
|
7873cada457328952310394afeedcad4bb6a7c4a
|
[
"MIT"
] | 19
|
2020-08-19T05:33:45.000Z
|
2022-03-27T15:16:03.000Z
|
test/utils/test_learning_rate.py
|
lethaiq/machin
|
7873cada457328952310394afeedcad4bb6a7c4a
|
[
"MIT"
] | 44
|
2020-07-06T00:41:44.000Z
|
2022-03-29T17:05:08.000Z
|
from machin.utils.logging import default_logger
from machin.utils.learning_rate import gen_learning_rate_func
def test_gen_learning_rate_func():
func = gen_learning_rate_func([(0, 1e-3), (20000, 1e-3)], default_logger)
func(10000)
func(20001)
| 28.555556
| 77
| 0.770428
| 40
| 257
| 4.625
| 0.475
| 0.259459
| 0.243243
| 0.308108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 0.128405
| 257
| 8
| 78
| 32.125
| 0.736607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c465dc4bc4a09d41cf33edb066ca57eaefcbe76c
| 74
|
py
|
Python
|
modelvshuman/models/wrappers/__init__.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 158
|
2021-06-04T15:19:58.000Z
|
2022-03-30T00:31:28.000Z
|
modelvshuman/models/wrappers/__init__.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 7
|
2021-07-20T03:57:34.000Z
|
2022-02-01T11:00:47.000Z
|
modelvshuman/models/wrappers/__init__.py
|
TizianThieringer/model-vs-human
|
17729b8167520f682d93d55c340c27de07bb2681
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 14
|
2021-06-16T13:33:11.000Z
|
2022-03-29T15:04:09.000Z
|
from .pytorch import PyTorchModel
from .tensorflow import TensorFlowModel
| 24.666667
| 39
| 0.864865
| 8
| 74
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 2
| 40
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c46c983d799d3afaf58d30ad66cc8674c22c2824
| 5,638
|
py
|
Python
|
datacube_ows/styles/api/base.py
|
LiamOSullivan/datacube-ows
|
6ff10853081b08d8de8485bd3c9c976cdae03390
|
[
"Apache-2.0"
] | null | null | null |
datacube_ows/styles/api/base.py
|
LiamOSullivan/datacube-ows
|
6ff10853081b08d8de8485bd3c9c976cdae03390
|
[
"Apache-2.0"
] | null | null | null |
datacube_ows/styles/api/base.py
|
LiamOSullivan/datacube-ows
|
6ff10853081b08d8de8485bd3c9c976cdae03390
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
import xarray
from datacube_ows.startup_utils import initialise_ignorable_warnings
from datacube_ows.styles.base import StandaloneProductProxy, StyleDefBase
initialise_ignorable_warnings()
def StandaloneStyle(cfg):
"""
Construct a OWS style object that stands alone, independent of a complete OWS configuration environment.
:param cfg: A valid OWS Style definition configuration dictionary.
Refer to the documentation for the valid syntax:
https://datacube-ows.readthedocs.io/en/latest/cfg_styling.html
:return: A OWS Style Definition object, prepared to work in standalone mode.
"""
style = StyleDefBase(StandaloneProductProxy(), cfg, stand_alone=True)
style.make_ready(None)
return style
def apply_ows_style(style, data, loop_over=None, valid_data_mask=None):
"""
Apply an OWS style to an ODC XArray to generate a styled image.
:param style: An OWS Style object, as created by StandaloneStyle()
:param data: An xarray Dataset, as generated by datacube.load_data()
Note that the Dataset must contain all of the band names referenced by the standalone style
configuration. (The names of the data variables in the dataset must exactly match
the band names in the configuration. None of the band aliasing techniques normally
supported by OWS can work in standalone mode.)
For bands that are used as bitmaps (i.e. either for masking with pq_mask or colour coding
in value_map), the data_variable must have a valid flag_definition attribute.
:param loop_over: (optional) A string which is the name of a dimension in the data to loop over,
for bulk processing. E.g. if set to "time", the output will have the same time dimension
coordinates as the input, with the single-date style being applied to each time slice
in the input data independently.
:param valid_data_mask: (optional) An xarray DataArray mask, with dimensions and coordinates matching data.
:return: An xarray Dataset, with the same dimensions and coordinates as data, and four data_vars of
8 bit signed integer data named red, green, blue and alpha, representing an 24bit RGBA image.
"""
if loop_over is None:
return style.transform_data(
data,
style.to_mask(
data,
valid_data_mask
)
)
image_slices = []
for coord in data[loop_over].values:
d_slice = data.sel(**{loop_over: coord})
image_slices.append(
style.transform_data(
d_slice,
style.to_mask(
d_slice,
valid_data_mask
)
)
)
return xarray.concat(image_slices, data[loop_over])
def apply_ows_style_cfg(cfg, data, loop_over=None, valid_data_mask=None):
"""
Apply an OWS style configuration to an ODC XArray to generate a styled image.
:param cfg: A valid OWS Style definition configuration dictionary.
Refer to the documentation for the valid syntax:
https://datacube-ows.readthedocs.io/en/latest/cfg_styling.html
:param data: An xarray Dataset, as generated by datacube.load_data()
Note that the Dataset must contain all of the band names referenced by the standalone style
configuration. (The names of the data variables in the dataset must exactly match
the band names in the configuration. None of the band aliasing techniques normally
supported by OWS can work in standalone mode.)
For bands that are used as bitmaps (i.e. either for masking with pq_mask or colour coding
in value_map), the data_variable must have a valid flag_definition attribute.
:param loop_over: (optional) A string which is the name of a dimension in the data to loop over,
for bulk processing. E.g. if set to "time", the output will have the same time dimension
coordinates as the input, with the single-date style being applied to each time slice
in the input data independently.
:param valid_data_mask: (optional) An xarray DataArray mask, with dimensions and coordinates matching data.
:return: An xarray Dataset, with the same dimensions and coordinates as data, and four data_vars of
8 bit signed integer data named red, green, blue and alpha, representing an 24bit RGBA image.
"""
return apply_ows_style(
StandaloneStyle(cfg),
data,
loop_over=loop_over,
valid_data_mask=valid_data_mask
)
def generate_ows_legend_style(style, ndates=0):
"""
:param style: An OWS Style object, as created by StandaloneStyle()
:param ndates: (optional) Number of dates (for styles with multi-date handlers)
:return: A PIL Image object.
"""
return style.render_legend(ndates)
def generate_ows_legend_style_cfg(cfg, ndates=0):
"""
:param cfg: A valid OWS Style definition configuration dictionary.
Refer to the documentation for the valid syntax:
https://datacube-ows.readthedocs.io/en/latest/cfg_styling.html
:param ndates: (optional) Number of dates (for styles with multi-date handlers)
:return: A PIL Image object.
"""
return generate_ows_legend_style(StandaloneStyle(cfg), ndates)
| 44.046875
| 111
| 0.690848
| 783
| 5,638
| 4.87484
| 0.238825
| 0.025151
| 0.027247
| 0.011003
| 0.722557
| 0.709458
| 0.709458
| 0.709458
| 0.709458
| 0.709458
| 0
| 0.004264
| 0.25133
| 5,638
| 127
| 112
| 44.393701
| 0.900024
| 0.693154
| 0
| 0.219512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.073171
| 0
| 0.341463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c471d7b8d0b668b38f19b7337cb1526c9df80129
| 3,933
|
py
|
Python
|
baghchal/lookup_table.py
|
basnetsoyuj/baghchal
|
fb60a7a69b6698f07545150ee71a47837e920de8
|
[
"MIT"
] | 6
|
2019-07-27T10:57:13.000Z
|
2022-02-21T04:08:18.000Z
|
build/lib/baghchal/lookup_table.py
|
basnetsoyuj/BaghChal
|
fb60a7a69b6698f07545150ee71a47837e920de8
|
[
"MIT"
] | 2
|
2021-06-08T20:09:10.000Z
|
2021-10-08T14:35:30.000Z
|
build/lib/baghchal/lookup_table.py
|
basnetsoyuj/BaghChal
|
fb60a7a69b6698f07545150ee71a47837e920de8
|
[
"MIT"
] | 1
|
2021-12-26T13:23:44.000Z
|
2021-12-26T13:23:44.000Z
|
action_space={'11':0,'12':1,'13':2,'14':3,'15':4,'21':5,'22':6,'23':7,'24':8,'25':9,'31':10,'32':11,'33':12,'34':13,'35':14,'41':15,'42':16,'43':17,'44':18,'45':19,'51':20,'52':21,'53':22,'54':23,'55':24,'1112':25,'1121':26,'1122':27,'1113':28,'1131':29,'1133':30,'1213':31,'1211':32,'1222':33,'1232':34,'1214':35,'1312':36,'1314':37,'1323':38,'1322':39,'1324':40,'1333':41,'1331':42,'1315':43,'1311':44,'1335':45,'1415':46,'1413':47,'1424':48,'1412':49,'1434':50,'1525':51,'1524':52,'1514':53,'1513':54,'1533':55,'1535':56,'2131':57,'2111':58,'2122':59,'2141':60,'2123':61,'2212':62,'2232':63,'2213':64,'2233':65,'2231':66,'2221':67,'2223':68,'2211':69,'2242':70,'2244':71,'2224':72,'2324':73,'2313':74,'2333':75,'2322':76,'2325':77,'2321':78,'2343':79,'2413':80,'2433':81,'2414':82,'2415':83,'2423':84,'2425':85,'2434':86,'2435':87,'2442':88,'2444':89,'2422':90,'2515':91,'2524':92,'2535':93,'2545':94,'2523':95,'3132':96,'3121':97,'3122':98,'3142':99,'3141':100,'3113':101,'3133':102,'3151':103,'3111':104,'3153':105,'3242':106,'3231':107,'3233':108,'3222':109,'3212':110,'3234':111,'3252':112,'3332':113,'3344':114,'3323':115,'3343':116,'3322':117,'3342':118,'3334':119,'3324':120,'3313':121,'3355':122,'3331':123,'3315':124,'3353':125,'3351':126,'3311':127,'3335':128,'3424':129,'3444':130,'3433':131,'3435':132,'3454':133,'3432':134,'3414':135,'3545':136,'3544':137,'3525':138,'3534':139,'3524':140,'3513':141,'3555':142,'3533':143,'3515':144,'3553':145,'4142':146,'4151':147,'4131':148,'4143':149,'4121':150,'4232':151,'4241':152,'4233':153,'4231':154,'4243':155,'4251':156,'4252':157,'4253':158,'4244':159,'4224':160,'4222':161,'4342':162,'4344':163,'4333':164,'4353':165,'4345':166,'4341':167,'4323':168,'4454':169,'4433':170,'4455':171,'4445':172,'4443':173,'4453':174,'4434':175,'4435':176,'4442':177,'4424':178,'4422':179,'4544':180,'4555':181,'4535':182,'4525':183,'4543':184,'5142':185,'5141':186,'5152':187,'5131':188,'5133':189,'5153':190,'5242':191,'5251':192,'5253':193,'5254':194,'5232':195,'5354':196,'5344':197,'5343':198,'5342':199,'5352':200,'5355':201,'5333':202,'5331':203,'5351':204,'5335':205,'5444':206,'5455':207,'5453':208,'5434':209,'5452':210,'5545':211,'5554':212,'5544':213,'5535':214,'5533':215,'5553':216}
reversed_action_space={v:k for k,v in action_space.items()}
action_list=list(action_space.keys())
bagh_moves_dict={(1,1):{(1,3),(3,1),(3,3)},(1,2):{(3,2),(1,4)},(1,3):{(3,3),(3,1),(1,5),(1,1),(3,5)},(1,4):{(1,2),(3,4)},(1,5):{(1,3),(3,3),(3,5)},(2,1):{(4,1),(2,3)},(2,2):{(4,2),(4,4),(2,4)},(2,3):{(2,5),(2,1),(4,3)},(2,4):{(4,2),(4,4),(2,2)},(2,5):{(4,5),(2,3)},(3,1):{(1,3),(3,3),(5,1),(1,1),(5,3)},(3,2):{(1,2),(3,4),(5,2)},(3,3):{(1,3),(5,5),(3,1),(1,5),(5,3),(5,1),(1,1),(3,5)},(3,4):{(5,4),(3,2),(1,4)},(3,5):{(1,3),(5,5),(3,3),(1,5),(5,3)},(4,1):{(4,3),(2,1)},(4,2):{(4,4),(2,4),(2,2)},(4,3):{(4,5),(4,1),(2,3)},(4,4):{(4,2),(2,4),(2,2)},(4,5):{(2,5),(4,3)},(5,1):{(3,1),(3,3),(5,3)},(5,2):{(5,4),(3,2)},(5,3):{(5,5),(3,3),(3,1),(5,1),(3,5)},(5,4):{(3,4),(5,2)},(5,5):{(3,5),(3,3),(5,3)}}
connected_points_dict={(1,1):{(1,2),(2,1),(2,2)},(1,2):{(1,3),(1,1),(2,2)},(1,3):{(1,2),(1,4),(2,3),(2,2),(2,4)},(1,4):{(1,5),(1,3),(2,4)},(1,5):{(2,5),(2,4),(1,4)},(2,1):{(3,1),(1,1),(2,2)},(2,2):{(1,2),(3,2),(1,3),(3,3),(3,1),(2,1),(2,3),(1,1)},(2,3):{(2,4),(1,3),(3,3),(2,2)},(2,4):{(1,3),(3,3),(1,4),(1,5),(2,3),(2,5),(3,4),(3,5)},(2,5):{(1,5),(2,4),(3,5)},(3,1):{(3,2),(2,1),(2,2),(4,2),(4,1)},(3,2):{(4,2),(3,1),(3,3),(2,2)},(3,3):{(3,2),(4,4),(2,3),(4,3),(2,2),(4,2),(3,4),(2,4)},(3,4):{(2,4),(4,4),(3,3),(3,5)},(3,5):{(4,5),(4,4),(2,5),(3,4),(2,4)},(4,1):{(4,2),(5,1),(3,1)},(4,2):{(3,2),(4,1),(3,3),(3,1),(4,3),(5,1),(5,2),(5,3)},(4,3):{(4,2),(4,4),(3,3),(5,3)},(4,4):{(5,4),(3,3),(5,5),(4,5),(4,3),(5,3),(3,4),(3,5)},(4,5):{(4,4),(5,5),(3,5)},(5,1):{(4,2),(4,1),(5,2)},(5,2):{(4,2),(5,1),(5,3)},(5,3):{(5,4),(4,4),(4,3),(4,2),(5,2)},(5,4):{(4,4),(5,5),(5,3)},(5,5):{(4,5),(5,4),(4,4)}}
| 786.6
| 2,241
| 0.500636
| 944
| 3,933
| 2.075212
| 0.449153
| 0.035733
| 0.019908
| 0.014293
| 0.160286
| 0.057172
| 0.028586
| 0.02144
| 0
| 0
| 0
| 0.469554
| 0.002034
| 3,933
| 5
| 2,242
| 786.6
| 0.029554
| 0
| 0
| 0
| 0
| 0
| 0.207931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c48010964b516ab4ffd35a43829edf4b08f1291b
| 105
|
py
|
Python
|
tests/conftest.py
|
fraudlabspro/fraudlabspro-python
|
8cdf454f728b84957ada3f5ef28840156d97181c
|
[
"MIT"
] | 4
|
2018-08-28T02:45:21.000Z
|
2021-04-24T16:32:27.000Z
|
tests/conftest.py
|
ip2whois/ip2whois-python
|
79af46c585e3d1e48135b2b985ce51d9a83fced5
|
[
"MIT"
] | 3
|
2019-12-02T10:20:11.000Z
|
2019-12-10T00:35:09.000Z
|
tests/conftest.py
|
ip2whois/ip2whois-python
|
79af46c585e3d1e48135b2b985ce51d9a83fced5
|
[
"MIT"
] | 1
|
2019-04-14T20:52:43.000Z
|
2019-04-14T20:52:43.000Z
|
import pytest
@pytest.fixture(scope = 'module')
def global_data():
return {'apikey': "YOUR_API_KEY"}
| 21
| 37
| 0.704762
| 14
| 105
| 5.071429
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 105
| 5
| 37
| 21
| 0.78022
| 0
| 0
| 0
| 0
| 0
| 0.226415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
670bd31790214fb15af9efc26da19e9df6e23ac4
| 173
|
py
|
Python
|
tests/web_platform/css_flexbox_1/test_flexbox_align_items_baseline.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_align_items_baseline.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_align_items_baseline.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestFlexbox_AlignItemsBaseline(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_align-items-baseline'))
| 28.833333
| 83
| 0.820809
| 19
| 173
| 7.105263
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 0.080925
| 173
| 5
| 84
| 34.6
| 0.830189
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 0.162791
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
672cd831cf90464dd8e97f0b6e382e38348304ac
| 342
|
py
|
Python
|
dis_calc.py
|
RAKS-Codes/Discount-calc
|
64aa0e038520f723bb5fa6cf487f3f787df56bd2
|
[
"Apache-2.0"
] | null | null | null |
dis_calc.py
|
RAKS-Codes/Discount-calc
|
64aa0e038520f723bb5fa6cf487f3f787df56bd2
|
[
"Apache-2.0"
] | null | null | null |
dis_calc.py
|
RAKS-Codes/Discount-calc
|
64aa0e038520f723bb5fa6cf487f3f787df56bd2
|
[
"Apache-2.0"
] | null | null | null |
import math
import random
import os
def final_price():
final_price = (org_price - (org_price * (disc_perc/100)))
return int(final_price)
org_price = int(input("Enter the original price: "))
disc_perc = int(input("Enter the dicount %: \n"))
print("The final price after discount is: " , final_price())
| 22.8
| 62
| 0.643275
| 47
| 342
| 4.489362
| 0.489362
| 0.236967
| 0.184834
| 0.170616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0.236842
| 342
| 15
| 63
| 22.8
| 0.796935
| 0
| 0
| 0
| 0
| 0
| 0.258359
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.333333
| 0
| 0.555556
| 0.111111
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
678594f6038f56c247450a3420bb47d0800d89f9
| 1,487
|
py
|
Python
|
Player.py
|
NankNgSa/Tic-Tac-Toe
|
a1127689c04b2f8db7121b807df2d358b5eeaa25
|
[
"MIT"
] | null | null | null |
Player.py
|
NankNgSa/Tic-Tac-Toe
|
a1127689c04b2f8db7121b807df2d358b5eeaa25
|
[
"MIT"
] | null | null | null |
Player.py
|
NankNgSa/Tic-Tac-Toe
|
a1127689c04b2f8db7121b807df2d358b5eeaa25
|
[
"MIT"
] | null | null | null |
from Board import Board
class Player:
global b
b = [" " for i in range(9)]
def __init__(self):
self.player = "x"
self.round = 0
def play(self):
while True:
# Board.print_board(b)
print("the round of the player " +self.player)
# the player enter a number where he wants to mark
print("enter a number between 1 & 9")
# -1 because the index starts from 0 to 8
move = int(input()) - 1
# if the case is free
if b[move] == " ":
b[move] = self.player
self.round += 1
else:
print("/!\ Choose another case...")
# continue
def play(self, b):
while True:
Board.print_board(b)
print("the round of the player " +self.player)
# the player enter a number where he wants to mark
print("enter a number between 1 & 9")
# -1 because the index starts from 0 to 8
move = int(input()) - 1
# if the case is free
if b[move] == " ":
b[move] = self.player
self.round += 1
else:
print("/!\ Choose another case...")
continue
if self.round == 9:
print("Nobody've won...")
break
| 20.652778
| 63
| 0.431069
| 169
| 1,487
| 3.757396
| 0.313609
| 0.07874
| 0.075591
| 0.059843
| 0.781102
| 0.781102
| 0.781102
| 0.781102
| 0.781102
| 0.781102
| 0
| 0.021823
| 0.476126
| 1,487
| 71
| 64
| 20.943662
| 0.793325
| 0.166106
| 0
| 0.5625
| 0
| 0
| 0.154386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.03125
| 0
| 0.1875
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
67c40b8654792ae9e621c8612f93f5d8b5332956
| 176
|
py
|
Python
|
server/stylegan2_hypotheses_explorer/logic/util/__init__.py
|
HealthML/StyleGAN2-Hypotheses-Explorer
|
347d7a85cea4b5d13c8c27c8015a085997f377c8
|
[
"MIT"
] | 2
|
2021-09-22T13:49:16.000Z
|
2022-01-12T06:04:12.000Z
|
server/stylegan2_hypotheses_explorer/logic/util/__init__.py
|
HealthML/StyleGAN2-Hypotheses-Explorer
|
347d7a85cea4b5d13c8c27c8015a085997f377c8
|
[
"MIT"
] | null | null | null |
server/stylegan2_hypotheses_explorer/logic/util/__init__.py
|
HealthML/StyleGAN2-Hypotheses-Explorer
|
347d7a85cea4b5d13c8c27c8015a085997f377c8
|
[
"MIT"
] | null | null | null |
from .batched import batched
from .json import load, load_and_validate
from .meta import subclasses_dict
__all__ = ["batched", "load", "load_and_validate", "subclasses_dict"]
| 29.333333
| 69
| 0.784091
| 24
| 176
| 5.333333
| 0.458333
| 0.125
| 0.171875
| 0.296875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 176
| 5
| 70
| 35.2
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.244318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
67d8cb6c1fed98ab21ef0a28cf3ad480e42b57f7
| 66
|
py
|
Python
|
automon/integrations/sentryio/__init__.py
|
TheShellLand/automonisaur
|
b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9
|
[
"MIT"
] | 2
|
2021-09-15T18:35:44.000Z
|
2022-01-18T05:36:54.000Z
|
automon/integrations/sentryio/__init__.py
|
TheShellLand/automonisaur
|
b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9
|
[
"MIT"
] | 16
|
2021-08-29T22:51:53.000Z
|
2022-03-09T16:08:19.000Z
|
automon/integrations/sentryio/__init__.py
|
TheShellLand/automonisaur
|
b5f304a44449b8664c93d8a8a3c3cf2d73aa0ce9
|
[
"MIT"
] | null | null | null |
from .client import SentryClient
from .config import SentryConfig
| 22
| 32
| 0.848485
| 8
| 66
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db2576ee46ce4b266866ce08dbf3f4913dbd4ee3
| 300
|
py
|
Python
|
tests/02_acceptance/22_outputs_ext/conftest.py
|
opentelekomcloud-infra/delatore
|
6d16400cdfeab175ba6a7d7844d1d623d975fceb
|
[
"Apache-2.0"
] | 1
|
2020-08-19T16:21:04.000Z
|
2020-08-19T16:21:04.000Z
|
tests/02_acceptance/22_outputs_ext/conftest.py
|
opentelekomcloud-infra/delatore
|
6d16400cdfeab175ba6a7d7844d1d623d975fceb
|
[
"Apache-2.0"
] | 86
|
2019-12-02T21:24:20.000Z
|
2021-03-04T13:29:35.000Z
|
tests/02_acceptance/22_outputs_ext/conftest.py
|
opentelekomcloud-infra/delatore
|
6d16400cdfeab175ba6a7d7844d1d623d975fceb
|
[
"Apache-2.0"
] | 1
|
2020-03-19T09:25:07.000Z
|
2020-03-19T09:25:07.000Z
|
import pytest
from delatore.outputs import AlertaRunner, BotRunner
@pytest.fixture
def bot(service, stop_event):
return BotRunner(service, stop_event)
@pytest.fixture
def alerta(service, stop_event):
return AlertaRunner(msg_service=service, stop_event=stop_event, send_heartbeats=False)
| 21.428571
| 90
| 0.803333
| 39
| 300
| 6
| 0.487179
| 0.192308
| 0.273504
| 0.188034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 300
| 13
| 91
| 23.076923
| 0.883019
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
220d1bcf3405e0d1aa48f9a846fb79ae59c7feb9
| 206
|
py
|
Python
|
students/K33402/Dubina Sergey/laboratory_works/lab2/races_project/races/admin.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 4
|
2020-09-03T15:41:42.000Z
|
2021-12-24T15:28:20.000Z
|
students/K33402/Dubina Sergey/laboratory_works/lab2/races_project/races/admin.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 48
|
2020-09-13T20:22:42.000Z
|
2021-04-30T11:13:30.000Z
|
students/K33402/Dubina Sergey/laboratory_works/lab2/races_project/races/admin.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 69
|
2020-09-06T10:32:37.000Z
|
2021-11-28T18:13:17.000Z
|
from django.contrib import admin
from .models import *
admin.site.register(Driver)
admin.site.register(Car)
admin.site.register(Race)
admin.site.register(Registration)
admin.site.register(Comment)
| 22.888889
| 34
| 0.781553
| 28
| 206
| 5.75
| 0.464286
| 0.279503
| 0.52795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106796
| 206
| 8
| 35
| 25.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.285714
| 0
| 0.285714
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
224105746dcac638e5fe296794c2844593099b12
| 126
|
py
|
Python
|
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/01-Basic-Syntax-Conditional-Statements-and-Loops/03_More_Exercises-NOT-DONE-YET/01_Find-The-Largest.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/01-Basic-Syntax-Conditional-Statements-and-Loops/03_More_Exercises-NOT-DONE-YET/01_Find-The-Largest.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/01-Basic-Syntax-Conditional-Statements-and-Loops/03_More_Exercises-NOT-DONE-YET/01_Find-The-Largest.py
|
karolinanikolova/SoftUni-Software-Engineering
|
7891924956598b11a1e30e2c220457c85c40f064
|
[
"MIT"
] | null | null | null |
# 1. Find the Largest
# Given a number, print the largest number that can be formed from the digits of the given number.
| 15.75
| 98
| 0.730159
| 22
| 126
| 4.181818
| 0.681818
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010309
| 0.230159
| 126
| 7
| 99
| 18
| 0.938144
| 0.920635
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
22445dd8a0078122aa6666e4dbc995d216a4a083
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/future/moves/urllib/error.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/future/moves/urllib/error.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/future/moves/urllib/error.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/81/fa/ca/ceffba5b93a3ccd21f8ef25a424c400512f29b62b08df2855d274307ad
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375
| 0
| 96
| 1
| 96
| 96
| 0.520833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
224641abc0be2dee8d10b7e0329eb91225f1c22b
| 79
|
py
|
Python
|
project-scaffolds/python-package/cluedo/models/__init__.py
|
ranuzz/makeall-code
|
34bf5c1b73870884ceafd3f7d4b18be29127cab1
|
[
"MIT"
] | 2
|
2021-11-02T04:21:41.000Z
|
2021-12-03T22:47:10.000Z
|
project-scaffolds/python-package/cluedo/models/__init__.py
|
ranuzz/makeall-code
|
34bf5c1b73870884ceafd3f7d4b18be29127cab1
|
[
"MIT"
] | null | null | null |
project-scaffolds/python-package/cluedo/models/__init__.py
|
ranuzz/makeall-code
|
34bf5c1b73870884ceafd3f7d4b18be29127cab1
|
[
"MIT"
] | 2
|
2022-01-29T10:52:16.000Z
|
2022-02-13T18:10:14.000Z
|
from __future__ import absolute_import
from cluedo.models.sample import Sample
| 26.333333
| 39
| 0.873418
| 11
| 79
| 5.818182
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101266
| 79
| 3
| 39
| 26.333333
| 0.901408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2276abba0d6a88f2e185da835d5522aa5630c126
| 74
|
py
|
Python
|
socialchoice/__init__.py
|
ashwinreddy/voting
|
4a9ef46901ddad23e814d5744944463f6fd9ae83
|
[
"MIT"
] | null | null | null |
socialchoice/__init__.py
|
ashwinreddy/voting
|
4a9ef46901ddad23e814d5744944463f6fd9ae83
|
[
"MIT"
] | null | null | null |
socialchoice/__init__.py
|
ashwinreddy/voting
|
4a9ef46901ddad23e814d5744944463f6fd9ae83
|
[
"MIT"
] | null | null | null |
from .voting import *
from .preference_schedule import PreferenceSchedule
| 24.666667
| 51
| 0.851351
| 8
| 74
| 7.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 2
| 52
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
97f7ae21b79ca272342a429da38b1e29f9e2ae3b
| 535
|
py
|
Python
|
aiosaber/__init__.py
|
flowsaber/aiosaber
|
480d1bf49c9612c97ad20d008469ae94c1fbffd3
|
[
"MIT"
] | null | null | null |
aiosaber/__init__.py
|
flowsaber/aiosaber
|
480d1bf49c9612c97ad20d008469ae94c1fbffd3
|
[
"MIT"
] | null | null | null |
aiosaber/__init__.py
|
flowsaber/aiosaber
|
480d1bf49c9612c97ad20d008469ae94c1fbffd3
|
[
"MIT"
] | null | null | null |
__version__ = '0.0.1.1'
import warnings
import uvloop
# noinspection PyUnresolvedReferences
from .channel import *
# noinspection PyUnresolvedReferences
from .flow import *
# noinspection PyUnresolvedReferences
from .middleware import BaseHandler, BaseExecutor, BaseBuilder
# noinspection PyUnresolvedReferences
from .operators import *
# noinspection PyUnresolvedReferences
from .task import *
uvloop.install()
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=UserWarning)
| 25.47619
| 62
| 0.828037
| 49
| 535
| 8.959184
| 0.469388
| 0.387244
| 0.432802
| 0.300683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008299
| 0.099065
| 535
| 20
| 63
| 26.75
| 0.90249
| 0.334579
| 0
| 0
| 0
| 0
| 0.054286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.636364
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3f131b4232bd51dd6419020c4a76ef5643e8e65f
| 308
|
py
|
Python
|
zippy/edu.uci.python.test/src/tests/megaguards/for/test4.py
|
securesystemslab/zippy-megaguards
|
9e3324d6aea0327fe499b9e07b1a67194ddd1db3
|
[
"BSD-3-Clause"
] | 1
|
2018-07-19T21:15:29.000Z
|
2018-07-19T21:15:29.000Z
|
zippy/edu.uci.python.test/src/tests/megaguards/for/test4.py
|
securesystemslab/zippy-megaguards
|
9e3324d6aea0327fe499b9e07b1a67194ddd1db3
|
[
"BSD-3-Clause"
] | null | null | null |
zippy/edu.uci.python.test/src/tests/megaguards/for/test4.py
|
securesystemslab/zippy-megaguards
|
9e3324d6aea0327fe499b9e07b1a67194ddd1db3
|
[
"BSD-3-Clause"
] | null | null | null |
a = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
b = [4, 5, 6]
c = [0, 0, 0]
def t():
for j in range(len(a)):
for [A1, A2, A3] in [[1, 2, 3], [1, 2, 3], [1, 2, 3]]:
for B in b:
c[A1-1] = B + A1*2
c[A2-1] = B + A2*2
c[A3-1] = B + A3*2
t()
print(c)
| 23.692308
| 62
| 0.308442
| 65
| 308
| 1.461538
| 0.323077
| 0.126316
| 0.189474
| 0.168421
| 0.189474
| 0.189474
| 0.189474
| 0.189474
| 0.189474
| 0
| 0
| 0.224138
| 0.435065
| 308
| 12
| 63
| 25.666667
| 0.321839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.083333
| 0.083333
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3f57978563e0ff1bf859ce4d289508f63c33f02b
| 216
|
py
|
Python
|
openprocurement/auction/tests/test_core.py
|
OrysiaDrabych/openprocurement.auction
|
d68b4aca7313dd4c7c13bd22c772a32a1b70d79f
|
[
"Apache-2.0"
] | 23
|
2015-07-09T17:07:39.000Z
|
2020-11-14T11:23:39.000Z
|
openprocurement/auction/tests/test_core.py
|
OrysiaDrabych/openprocurement.auction
|
d68b4aca7313dd4c7c13bd22c772a32a1b70d79f
|
[
"Apache-2.0"
] | 23
|
2015-01-14T22:33:58.000Z
|
2018-02-08T16:31:20.000Z
|
openprocurement/auction/tests/test_core.py
|
OrysiaDrabych/openprocurement.auction
|
d68b4aca7313dd4c7c13bd22c772a32a1b70d79f
|
[
"Apache-2.0"
] | 27
|
2015-02-17T10:22:32.000Z
|
2021-06-08T06:50:45.000Z
|
from openprocurement.auction.core import compoenents
class TestDispatch(object):
def test_predicate(self):
pass
def test_plugin_load(self):
pass
def test_adapters(self):
pass
| 15.428571
| 52
| 0.675926
| 25
| 216
| 5.68
| 0.68
| 0.147887
| 0.15493
| 0.211268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.259259
| 216
| 13
| 53
| 16.615385
| 0.8875
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.375
| 0.125
| 0
| 0.625
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
3f5863f0a28265b1b51112a92fa21a99ee26b295
| 131
|
py
|
Python
|
cowsay_app/admin.py
|
JustDeWitt8485/django-cowsay
|
4c3cf91eb7ba394cd7fd581b3d86c56494acf413
|
[
"MIT"
] | null | null | null |
cowsay_app/admin.py
|
JustDeWitt8485/django-cowsay
|
4c3cf91eb7ba394cd7fd581b3d86c56494acf413
|
[
"MIT"
] | null | null | null |
cowsay_app/admin.py
|
JustDeWitt8485/django-cowsay
|
4c3cf91eb7ba394cd7fd581b3d86c56494acf413
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from cowsay_app.models import Message
# Register your models here.
admin.site.register(Message)
| 18.714286
| 37
| 0.816794
| 19
| 131
| 5.578947
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122137
| 131
| 6
| 38
| 21.833333
| 0.921739
| 0.198473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
58d207950de0f8905e72ba1e593ed6460e4176ce
| 536
|
py
|
Python
|
survey/questions/__init__.py
|
vahndi/quant-survey
|
1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f
|
[
"MIT"
] | 2
|
2021-04-10T21:50:36.000Z
|
2022-03-26T16:46:52.000Z
|
survey/questions/__init__.py
|
vahndi/quant-survey
|
1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f
|
[
"MIT"
] | 11
|
2020-08-30T18:47:14.000Z
|
2021-09-09T15:57:19.000Z
|
survey/questions/__init__.py
|
vahndi/quant-survey
|
1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f
|
[
"MIT"
] | null | null | null |
from survey.questions._abstract.question import Question
from survey.questions.count_question import CountQuestion
from survey.questions.free_text_question import FreeTextQuestion
from survey.questions.likert_question import LikertQuestion
from survey.questions.multi_choice_question import MultiChoiceQuestion
from survey.questions.positive_measure_question import PositiveMeasureQuestion
from survey.questions.ranked_choice_question import RankedChoiceQuestion
from survey.questions.single_choice_question import SingleChoiceQuestion
| 59.555556
| 78
| 0.908582
| 61
| 536
| 7.770492
| 0.377049
| 0.168776
| 0.320675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 536
| 8
| 79
| 67
| 0.940476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
451f5d066fa316abde2b3536411ce20cf3e85e2c
| 217
|
py
|
Python
|
perimetro_cuadrado.py
|
Sebas2587/practica_python
|
2d3a92c4f97afd7f199e8beee8cc87251f249edf
|
[
"MIT"
] | null | null | null |
perimetro_cuadrado.py
|
Sebas2587/practica_python
|
2d3a92c4f97afd7f199e8beee8cc87251f249edf
|
[
"MIT"
] | null | null | null |
perimetro_cuadrado.py
|
Sebas2587/practica_python
|
2d3a92c4f97afd7f199e8beee8cc87251f249edf
|
[
"MIT"
] | null | null | null |
# calculo del perimetro del cuadrado
print('calculo del perimetro cuadrado')
lado = int(input('introduce el valor del lado que conoces del cuadrado: '))
lado *= 4
print('el perimetro del cuadrado es: ',lado)
| 27.125
| 76
| 0.718894
| 31
| 217
| 5.032258
| 0.483871
| 0.211538
| 0.24359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00565
| 0.184332
| 217
| 7
| 77
| 31
| 0.875706
| 0.156682
| 0
| 0
| 0
| 0
| 0.655172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
18959d46b97c1a4842aa58f0624fc71e16201b7e
| 254
|
py
|
Python
|
wingedsheep/carcassonne/objects/city.py
|
SuryaVikram/CarcassonneMaster
|
cc0201638dfea05b254833097329729e9b8a410c
|
[
"MIT"
] | 11
|
2020-05-19T17:29:18.000Z
|
2022-03-24T06:22:50.000Z
|
wingedsheep/carcassonne/objects/city.py
|
SuryaVikram/CarcassonneMaster
|
cc0201638dfea05b254833097329729e9b8a410c
|
[
"MIT"
] | 6
|
2020-05-18T09:24:26.000Z
|
2022-03-12T00:30:21.000Z
|
wingedsheep/carcassonne/objects/city.py
|
SuryaVikram/CarcassonneMaster
|
cc0201638dfea05b254833097329729e9b8a410c
|
[
"MIT"
] | 5
|
2021-09-16T11:53:26.000Z
|
2022-03-30T12:08:56.000Z
|
from wingedsheep.carcassonne.objects.coordinate_with_side import CoordinateWithSide
class City:
def __init__(self, city_positions: [CoordinateWithSide], finished: bool):
self.city_positions = city_positions
self.finished = finished
| 31.75
| 83
| 0.775591
| 27
| 254
| 6.962963
| 0.62963
| 0.207447
| 0.180851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15748
| 254
| 7
| 84
| 36.285714
| 0.878505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
18d0326c9387f6a919d3333e47f316d631de0c3e
| 66
|
py
|
Python
|
main.py
|
csy1993/PythonLeetcode
|
98fd9b1639626459fbf81bf94727775d39248dde
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
csy1993/PythonLeetcode
|
98fd9b1639626459fbf81bf94727775d39248dde
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
csy1993/PythonLeetcode
|
98fd9b1639626459fbf81bf94727775d39248dde
|
[
"Apache-2.0"
] | null | null | null |
'''
* @File: main.py
* @Author: CSY
* @Date: 2019/7/27 - 9:40
'''
| 11
| 25
| 0.5
| 11
| 66
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188679
| 0.19697
| 66
| 5
| 26
| 13.2
| 0.433962
| 0.863636
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
18d40bf9458210791473817db5245f750de4bebe
| 45
|
py
|
Python
|
tests/__init__.py
|
tsouchlarakis/sql-query-tools
|
03de8c971ba3c937d72b567938689fcb37482073
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
tsouchlarakis/sql-query-tools
|
03de8c971ba3c937d72b567938689fcb37482073
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
tsouchlarakis/sql-query-tools
|
03de8c971ba3c937d72b567938689fcb37482073
|
[
"MIT"
] | null | null | null |
"""Unit test package for sql_query_tools."""
| 22.5
| 44
| 0.733333
| 7
| 45
| 4.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 1
| 45
| 45
| 0.775
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
18f632ead7eea2d6005e0c8453f48e7f42455efe
| 4,792
|
py
|
Python
|
beam_analysis/AppliedLoad.py
|
XDwightsBeetsX/beam-analysis
|
714b90cdc6b77d6da6d2182b66efead17db2617d
|
[
"MIT"
] | 1
|
2021-08-13T08:08:36.000Z
|
2021-08-13T08:08:36.000Z
|
beam_analysis/AppliedLoad.py
|
XDwightsBeetsX/beam-analysis
|
714b90cdc6b77d6da6d2182b66efead17db2617d
|
[
"MIT"
] | 10
|
2021-02-23T06:47:48.000Z
|
2022-01-01T19:27:36.000Z
|
beam_analysis/AppliedLoad.py
|
XDwightsBeetsX/beam_analysis
|
714b90cdc6b77d6da6d2182b66efead17db2617d
|
[
"MIT"
] | null | null | null |
from enum import Enum
from beam_analysis.BeamAnalysisTypes import BeamAnalysisTypes
class AppliedLoadTypes(Enum):
"""
The assigned values here matter and are used in Singularity analysis
"""
DISTRIBUTED_LOAD = 1
POINT_LOAD = 2
MOMENT = 3
class AppliedLoad(object):
def __init__(self, magnitude, appliedLoadType):
self.Magnitude = magnitude
self.AppliedLoadType = appliedLoadType
def evaluateAt(self, x, beamAnalysisType):
pass
def getString(self, beamAnalysisType):
pass
class DistributedLoad(AppliedLoad):
def __init__(self, start, stop, magnitude):
super().__init__(magnitude, AppliedLoadTypes.DISTRIBUTED_LOAD)
self.Start = start
self.Stop = stop
def evaluateAt(self, x, beamAnalysisType):
if not (self.Start <= x):
return 0.0
if beamAnalysisType == BeamAnalysisTypes.SHEAR:
return self.Magnitude * (x - self.Start)
if beamAnalysisType == BeamAnalysisTypes.BENDING:
return (self.Magnitude / 2) * (x - self.Start) ** 2
if beamAnalysisType == BeamAnalysisTypes.ANGLE:
return (self.Magnitude / 6) * (x - self.Start) ** 3
if beamAnalysisType == BeamAnalysisTypes.DEFLECTION:
return (self.Magnitude / 24) * (x - self.Start) ** 4
def getString(self, beamAnalysisType):
mag = abs(self.Magnitude)
if beamAnalysisType == BeamAnalysisTypes.SHEAR:
if (self.Start == 0):
return f"{mag}x"
return f"{mag}<x - {self.Start}>^1"
if beamAnalysisType == BeamAnalysisTypes.BENDING:
if (self.Start == 0):
return f"({mag} / 2)x^2"
return f"({mag} / 2)<x - {self.Start}>^2"
if beamAnalysisType == BeamAnalysisTypes.ANGLE:
if (self.Start == 0):
return f"({mag} / 6)x^3"
return f"({mag} / 6)<x - {self.Start}>^3"
if beamAnalysisType == BeamAnalysisTypes.DEFLECTION:
if (self.Start == 0):
return f"({mag} / 24)x^4"
return f"({mag} / 24)<x - {self.Start}>^4"
class PointLoad(AppliedLoad):
def __init__(self, location, magnitude):
super().__init__(magnitude, AppliedLoadTypes.POINT_LOAD)
self.Location = location
def evaluateAt(self, x, beamAnalysisType):
if not (self.Location <= x):
return 0.0
if beamAnalysisType == BeamAnalysisTypes.SHEAR:
return self.Magnitude
if beamAnalysisType == BeamAnalysisTypes.BENDING:
return self.Magnitude * (x - self.Location)
if beamAnalysisType == BeamAnalysisTypes.ANGLE:
return (self.Magnitude / 2) * (x - self.Location) ** 2
if beamAnalysisType == BeamAnalysisTypes.DEFLECTION:
return (self.Magnitude / 6) * (x - self.Location) ** 3
def getString(self, beamAnalysisType):
mag = abs(self.Magnitude)
if beamAnalysisType == BeamAnalysisTypes.SHEAR:
if (self.Location == 0):
return f"{mag}"
return f"{mag}<x - {self.Location}>^0"
if beamAnalysisType == BeamAnalysisTypes.BENDING:
return f"{mag}<x - {self.Location}>^1"
if beamAnalysisType == BeamAnalysisTypes.ANGLE:
return f"({mag} / 2)<x - {self.Location}>^2"
if beamAnalysisType == BeamAnalysisTypes.DEFLECTION:
return f"({mag} / 6)<x - {self.Location}>^3"
class Moment(AppliedLoad):
def __init__(self, location, magnitude):
super().__init__(magnitude, AppliedLoadTypes.MOMENT)
self.Location = location
def evaluateAt(self, x, beamAnalysisType):
if not (self.Location <= x):
return 0.0
if beamAnalysisType == BeamAnalysisTypes.SHEAR:
return 0.0
if beamAnalysisType == BeamAnalysisTypes.BENDING:
return self.Magnitude
if beamAnalysisType == BeamAnalysisTypes.ANGLE:
return self.Magnitude * (x - self.Location)
if beamAnalysisType == BeamAnalysisTypes.DEFLECTION:
return (self.Magnitude / 2) * (x - self.Location) ** 2
def getString(self, beamAnalysisType):
mag = abs(self.Magnitude)
if beamAnalysisType == BeamAnalysisTypes.SHEAR:
return f"0.0"
if beamAnalysisType == BeamAnalysisTypes.BENDING:
if (self.Location == 0):
return f"{mag}"
return f"{mag}<x - {self.Location}>^0"
if beamAnalysisType == BeamAnalysisTypes.ANGLE:
return f"{mag}<x - {self.Location}>^1"
if beamAnalysisType == BeamAnalysisTypes.DEFLECTION:
return f"({mag} / 2)<x - {self.Location}>^2"
| 36.030075
| 72
| 0.595785
| 484
| 4,792
| 5.830579
| 0.117769
| 0.153083
| 0.297661
| 0.089298
| 0.824238
| 0.766123
| 0.72112
| 0.587881
| 0.54146
| 0.416726
| 0
| 0.016765
| 0.290484
| 4,792
| 132
| 73
| 36.30303
| 0.813235
| 0.01419
| 0
| 0.656863
| 0
| 0
| 0.0839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0.019608
| 0.019608
| 0
| 0.539216
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
7a00061bffda2979f894d9c3c2f02401c27df1bf
| 142
|
py
|
Python
|
tests/steps/import_behave_core_steps.py
|
berkeleybop/behave_core
|
aa8354fcf0deeb68f6add8749d80d0a2a1bef16d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/steps/import_behave_core_steps.py
|
berkeleybop/behave_core
|
aa8354fcf0deeb68f6add8749d80d0a2a1bef16d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/steps/import_behave_core_steps.py
|
berkeleybop/behave_core
|
aa8354fcf0deeb68f6add8749d80d0a2a1bef16d
|
[
"BSD-3-Clause"
] | null | null | null |
####
#### Not really much here but to indroduce the step libraries.
####
from behave_core import json_data_steps, resource_steps, page_steps
| 23.666667
| 67
| 0.753521
| 21
| 142
| 4.857143
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140845
| 142
| 5
| 68
| 28.4
| 0.836066
| 0.401408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e151d2ce34f1d3b94b709c20d0351a387b8c2564
| 13,604
|
py
|
Python
|
silver_waffle/base/constants.py
|
miguelagustin/silver-waffle-trading-bot
|
a20ed31b075fd432952ce5608868a47472f32c6b
|
[
"MIT"
] | 2
|
2021-03-16T00:39:47.000Z
|
2021-03-16T00:57:35.000Z
|
silver_waffle/base/constants.py
|
miguelagustin/silver-waffle-trading-bot
|
a20ed31b075fd432952ce5608868a47472f32c6b
|
[
"MIT"
] | null | null | null |
silver_waffle/base/constants.py
|
miguelagustin/silver-waffle-trading-bot
|
a20ed31b075fd432952ce5608868a47472f32c6b
|
[
"MIT"
] | 1
|
2021-07-19T21:45:54.000Z
|
2021-07-19T21:45:54.000Z
|
STABLECOIN_SYMBOLS = ['USDC', 'DAI', 'USDT', 'BUSD', 'TUSD', 'PAX', 'VAI']
FREE_RPC_ENDPOINTS = ['https://api.mycryptoapi.com/eth',
'https://nodes.mewapi.io/rpc/eth',
'https://mainnet-nethermind.blockscout.com/',
'https://mainnet.eth.cloud.ava.do/',
'https://cloudflare-eth.com/']
CHAINLINK_ADDRESSES = {'1INCH-ETH': '0x72AFAECF99C9d9C8215fF44C77B94B99C28741e8',
'AAVE-ETH': '0x6Df09E975c830ECae5bd4eD9d90f3A95a4f88012',
'AAVE-USD': '0x547a514d5e3769680Ce22B2361c10Ea13619e8a9',
'ADA-USD': '0xAE48c91dF1fE419994FFDa27da09D5aC69c30f55',
'ADX-USD': '0x231e764B44b2C1b7Ca171fa8021A24ed520Cde10',
'ALPHA-ETH': '0x89c7926c7c15fD5BFDB1edcFf7E7fC8283B578F6',
'AMP-USD': '0x8797ABc4641dE76342b8acE9C63e3301DC35e3d8',
'AMPL-ETH': '0x492575FDD11a0fCf2C6C719867890a7648d526eB',
'AMPL-USD': '0xe20CA8D7546932360e37E9D72c1a47334af57706',
'ANT-ETH': '0x8f83670260F8f7708143b836a2a6F11eF0aBac01',
'APY TVL': '0x889f28E24EA0573db472EedEf7c4137B3357ac2B',
'AUD-USD': '0x77F9710E7d0A19669A13c055F62cd80d313dF022',
'BADGER-ETH': '0x58921Ac140522867bf50b9E009599Da0CA4A2379',
'BAL-ETH': '0xC1438AA3823A6Ba0C159CfA8D98dF5A994bA120b',
'BAND-ETH': '0x0BDb051e10c9718d1C29efbad442E88D38958274',
'BAND-USD': '0x919C77ACc7373D000b329c1276C76586ed2Dd19F',
'BAT-ETH': '0x0d16d4528239e9ee52fa531af613AcdB23D88c94',
'BCH-USD': '0x9F0F69428F923D6c95B781F89E165C9b2df9789D',
'BNB-ETH': '0xc546d2d06144F9DD42815b8bA46Ee7B8FcAFa4a2',
'BNB-USD': '0x14e613AC84a31f709eadbdF89C6CC390fDc9540A',
'BNT-ETH': '0xCf61d1841B178fe82C8895fe60c2EDDa08314416',
'BNT-USD': '0x1E6cF0D433de4FE882A437ABC654F58E1e78548c',
'BTC-ARS': '0xA912dd6b62B1C978e205B86994E057B1b494D73a',
'BTC-ETH': '0xdeb288F737066589598e9214E782fa5A8eD689e8',
'BTC-USD': '0xF4030086522a5bEEa4988F8cA5B36dbC97BeE88c',
'BTC-height': '0x4D2574c790d836b8C886615d927e9BA585B10EbA',
'BTC Difficulty': '0xA792Ebd0E4465DB2657c7971519Cfa0f0275F428',
'BUSD-ETH': '0x614715d2Af89E6EC99A233818275142cE88d1Cfd',
'BZRX-ETH': '0x8f7C7181Ed1a2BA41cfC3f5d064eF91b67daef66',
'CAD-USD': '0xa34317DB73e77d453b1B8d04550c44D10e981C8e',
'CEL-ETH': '0x75FbD83b4bd51dEe765b2a01e8D3aa1B020F9d33',
'CHF-USD': '0x449d117117838fFA61263B61dA6301AA2a88B13A',
'CNY-USD': '0xeF8A4aF35cd47424672E3C590aBD37FBB7A7759a',
'COMP-ETH': '0x1B39Ee86Ec5979ba5C322b826B3ECb8C79991699',
'COMP-USD': '0xdbd020CAeF83eFd542f4De03e3cF0C28A4428bd5',
'COVER-ETH': '0x7B6230EF79D5E97C11049ab362c0b685faCBA0C2',
'COVER-USD': '0x0ad50393F11FfAc4dd0fe5F1056448ecb75226Cf',
'CREAM-ETH': '0x82597CFE6af8baad7c0d441AA82cbC3b51759607',
'CRO-ETH': '0xcA696a9Eb93b81ADFE6435759A29aB4cf2991A96',
'CRV-ETH': '0x8a12Be339B0cD1829b91Adc01977caa5E9ac121e',
'CV-Index': '0x1B58B67B2b2Df71b4b0fb6691271E83A0fa36aC5',
'DAI-ETH': '0x773616E4d11A78F511299002da57A0a94577F1f4',
'DAI-USD': '0xAed0c38402a5d19df6E4c03F4E2DceD6e29c1ee9',
'DASH-USD': '0xFb0cADFEa136E9E343cfb55B863a6Df8348ab912',
'DIGG-BTC': '0x418a6C98CD5B8275955f08F0b8C1c6838c8b1685',
'DMG-ETH': '0xD010e899f7ab723AC93f825cDC5Aa057669557c2',
'DOT-USD': '0x1C07AFb8E2B827c5A4739C6d59Ae3A5035f28734',
'DPI-ETH': '0x029849bbc0b1d93b85a8b6190e979fd38F5760E2',
'DPI-USD': '0xD2A593BF7594aCE1faD597adb697b5645d5edDB2',
'ENJ-ETH': '0x24D9aB51950F3d62E9144fdC2f3135DAA6Ce8D1B',
'EOS-USD': '0x10a43289895eAff840E8d45995BBa89f9115ECEe',
'ETC-USD': '0xaEA2808407B7319A31A383B6F8B60f04BCa23cE2',
'ETH-USD': '0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419',
'ETH-XDR': '0xb022E2970b3501d8d83eD07912330d178543C1eB',
'EUR-USD': '0xb49f677943BC038e9857d61E7d053CaA2C1734C1',
'EURS RESERVES': '0xbcD05A3E0c11f340cCcD9a4Efe05eEB2b33AB67A',
'FIL-ETH': '0x0606Be69451B1C9861Ac6b3626b99093b713E801',
'FIL-USD': '0x1A31D42149e82Eb99777f903C08A2E41A00085d3',
'FNX-USD': '0x80070f7151BdDbbB1361937ad4839317af99AE6c',
'FTM-ETH': '0x2DE7E4a9488488e0058B95854CC2f7955B35dC9b',
'FTSE-GBP': '0xE23FA0e8dd05D6f66a6e8c98cab2d9AE82A7550c',
'FTT-ETH': '0xF0985f7E2CaBFf22CecC5a71282a89582c382EFE',
'FXS-USD': '0x6Ebc52C8C1089be9eB3945C4350B68B8E4C2233f',
'Fast Gas-Gwei': '0x169E633A2D1E6c10dD91238Ba11c4A708dfEF37C',
'GBP-USD': '0x5c0Ab2d9b5a7ed9f470386e82BB36A3613cDd4b5',
'GRT-ETH': '0x17D054eCac33D91F7340645341eFB5DE9009F1C1',
'HEGIC-ETH': '0xAf5E8D9Cd9fC85725A83BF23C52f1C39A71588a6',
'HEGIC-USD': '0xBFC189aC214E6A4a35EBC281ad15669619b75534',
'INJ-USD': '0xaE2EbE3c4D20cE13cE47cbb49b6d7ee631Cd816e',
'IOST-USD': '0xd0935838935349401c73a06FCde9d63f719e84E5',
'JPY-USD': '0xBcE206caE7f0ec07b545EddE332A47C2F75bbeb3',
'KNC-ETH': '0x656c0544eF4C98A6a98491833A89204Abb045d6b',
'KNC-USD': '0xf8fF43E991A81e6eC886a3D281A2C6cC19aE70Fc',
'KP3R-ETH': '0xe7015CCb7E5F788B8c1010FC22343473EaaC3741',
'KRW-USD': '0x01435677FB11763550905594A16B645847C1d0F3',
'LINK-ETH': '0xDC530D9457755926550b59e8ECcdaE7624181557',
'LINK-USD': '0x2c1d072e956AFFC0D435Cb7AC38EF18d24d9127c',
'LRC-ETH': '0x160AC928A16C93eD4895C2De6f81ECcE9a7eB7b4',
'LRC-USD': '0xFd33ec6ABAa1Bdc3D9C6C85f1D6299e5a1a5511F',
'LTC-USD': '0x6AF09DF7563C363B5763b9102712EbeD3b9e859B',
'MANA-ETH': '0x82A44D92D6c329826dc557c5E1Be6ebeC5D5FeB9',
'MATIC-USD': '0x7bAC85A8a13A4BcD8abb3eB7d6b4d632c5a57676',
'MKR-ETH': '0x24551a8Fb2A7211A25a17B1481f043A8a8adC7f2',
'MLN-ETH': '0xDaeA8386611A157B08829ED4997A8A62B557014C',
'MTA-ETH': '0x98334b85De2A8b998Ba844c5521e73D68AD69C00',
'MTA-USD': '0xc751E86208F0F8aF2d5CD0e29716cA7AD98B5eF5',
'N225-JPY': '0x5c4939a2ab3A2a9f93A518d81d4f8D0Bc6a68980',
'NMR-ETH': '0x9cB2A01A7E64992d32A34db7cEea4c919C391f6A',
'OGN-ETH': '0x2c881B6f3f6B5ff6C975813F87A4dad0b241C15b',
'OMG-ETH': '0x57C9aB3e56EE4a83752c181f241120a3DBba06a1',
'ONT-USD': '0xcDa3708C5c2907FCca52BB3f9d3e4c2028b89319',
'ORN-ETH': '0xbA9B2a360eb8aBdb677d6d7f27E12De11AA052ef',
'OXT-USD': '0xd75AAaE4AF0c398ca13e2667Be57AF2ccA8B5de6',
'Orchid': '0xa175FA75795c6Fb2aFA48B72d22054ee0DeDa4aC',
'PAX-ETH': '0x3a08ebBaB125224b7b6474384Ee39fBb247D2200',
'PAX-RESERVES': '0xf482Ed35406933F321f293aC0e4c6c8f59a22fA5',
'PAXG-ETH': '0x9B97304EA12EFed0FAd976FBeCAad46016bf269e',
'PAXG-RESERVES': '0x716BB8c60D409e54b8Fb5C4f6aBC50E794DA048a',
'PERP-ETH': '0x3b41D5571468904D4e53b6a8d93A6BaC43f02dC9',
'RCN-BTC': '0xEa0b3DCa635f4a4E77D9654C5c18836EE771566e',
'REN-ETH': '0x3147D7203354Dc06D9fd350c7a2437bcA92387a4',
'REN-USD': '0x0f59666EDE214281e956cb3b2D0d69415AfF4A01',
'REP-ETH': '0xD4CE430C3b67b3E2F7026D86E7128588629e2455',
'RLC-ETH': '0x4cba1e1fdc738D0fe8DB3ee07728E2Bc4DA676c6',
'RUNE-ETH': '0x875D60C44cfbC38BaA4Eb2dDB76A767dEB91b97e',
'SGD-USD': '0xe25277fF4bbF9081C75Ab0EB13B4A13a721f3E13',
'SNX-ETH': '0x79291A9d692Df95334B1a0B3B4AE6bC606782f8c',
'SNX-USD': '0xDC3EA94CD0AC27d9A86C180091e7f78C683d3699',
'SRM-ETH': '0x050c048c9a0CD0e76f166E2539F87ef2acCEC58f',
'SUSD-ETH': '0x8e0b7e6062272B5eF4524250bFFF8e5Bd3497757',
'SUSHI-ETH': '0xe572CeF69f43c2E488b33924AF04BDacE19079cf',
'SXP-USD': '0xFb0CfD6c19e25DB4a08D8a204a387cEa48Cc138f',
'TOMO-USD': '0x3d44925a8E9F9DFd90390E58e92Ec16c996A331b',
'TRU-USD': '0x26929b85fE284EeAB939831002e1928183a10fb1',
'TRX-USD': '0xacD0D1A29759CC01E8D925371B72cb2b5610EA25',
'TRY-USD': '0xB09fC5fD3f11Cf9eb5E1C5Dba43114e3C9f477b5',
'TSLA-USD': '0x1ceDaaB50936881B3e449e47e40A2cDAF5576A4a',
'TUSD-ETH': '0x3886BA987236181D98F2401c507Fb8BeA7871dF2',
'TUSD Reserves': '0x478f4c42b877c697C4b19E396865D4D533EcB6ea',
'TUSD Supply': '0x807b029DD462D5d9B9DB45dff90D3414013B969e',
'Total Marketcap-USD': '0xEC8761a0A73c34329CA5B1D3Dc7eD07F30e836e2',
'UMA-ETH': '0xf817B69EA583CAFF291E287CaE00Ea329d22765C',
'UNI-ETH': '0xD6aA3D25116d8dA79Ea0246c4826EB951872e02e',
'UNI-USD': '0x553303d460EE0afB37EdFf9bE42922D8FF63220e',
'USDC-ETH': '0x986b5E1e1755e3C2440e960477f25201B0a8bbD4',
'USDC-USD': '0x8fFfFfd4AfB6115b954Bd326cbe7B4BA576818f6',
'USDK-USD': '0xfAC81Ea9Dd29D8E9b212acd6edBEb6dE38Cb43Af',
'USDT-ETH': '0xEe9F2375b4bdF6387aa8265dD4FB8F16512A1d46',
'USDT-USD': '0x3E7d1eAB13ad0104d2750B8863b489D65364e32D',
'UST-ETH': '0xa20623070413d42a5C01Db2c8111640DD7A5A03a',
'WAVES-USD': '0x9a79fdCd0E326dF6Fa34EA13c05d3106610798E9',
'WING-USD': '0x134fE0a225Fb8e6683617C13cEB6B3319fB4fb82',
'WNXM-ETH': '0xe5Dc0A609Ab8bCF15d3f35cFaa1Ff40f521173Ea',
'WOM-ETH': '0xcEBD2026d3C99F2a7CE028acf372C154aB4638a9',
'WTI-USD': '0xf3584F4dd3b467e73C2339EfD008665a70A4185c',
'XAG-USD': '0x379589227b15F1a12195D3f2d90bBc9F31f95235',
'XAU-USD': '0x214eD9Da11D2fbe465a6fc601a91E62EbEc1a0D6',
'XHV-USD': '0xeccBeEd9691d8521385259AE596CF00D68429de0',
'XMR-USD': '0xFA66458Cce7Dd15D8650015c4fce4D278271618F',
'XRP-USD': '0xCed2660c6Dd1Ffd856A5A82C67f3482d88C50b12',
'XTZ-USD': '0x5239a625dEb44bF3EeAc2CD5366ba24b8e9DB63F',
'YFI-ETH': '0x7c5d4F8345e66f68099581Db340cd65B078C41f4',
'YFI-USD': '0xA027702dbb89fbd58938e4324ac03B58d812b0E1',
'YFII-ETH': '0xaaB2f6b45B28E962B3aCd1ee4fC88aEdDf557756',
'ZRX-ETH': '0x2Da4983a622a8498bb1a21FaE9D8F6C664939962',
'ZRX-USD': '0x2885d15b8Af22648b98B122b22FDF4D2a56c6023',
'sCEX-USD': '0x283D433435cFCAbf00263beEF6A362b7cc5ed9f2',
'sDEFI-USD': '0xa8E875F94138B0C5b51d1e1d5dE35bbDdd28EA87'}
FIAT_SYMBOLS = ['AED', 'AFN', 'ALL', 'AMD', 'ANG', 'AOA', 'ARS', 'AUD', 'AWG', 'AZN', 'BAM', 'BBD', 'BDT', 'BGN', 'BHD',
'BIF', 'BMD', 'BND', 'BOB', 'BRL', 'BSD', 'BTN', 'BWP', 'BYR', 'BZD', 'CAD', 'CDF', 'CHF', 'CLP', 'CNY',
'COP', 'CRC', 'CUC', 'CVE', 'CZK', 'DJF', 'DKK', 'DOP', 'DZD', 'EEK', 'EGP', 'ERN', 'ETB', 'EUR', 'FJD',
'FKP', 'GBP', 'GEL', 'GHS', 'GIP', 'GMD', 'GNF', 'GQE', 'GTQ', 'GYD', 'HKD', 'HNL', 'HRK', 'HTG', 'HUF',
'IDR', 'ILS', 'INR', 'IQD', 'IRR', 'ISK', 'JMD', 'JOD', 'JPY', 'KES', 'KGS', 'KHR', 'KMF', 'KPW', 'KRW',
'KWD', 'KYD', 'KZT', 'LAK', 'LBP', 'LKR', 'LRD', 'LSL', 'LTL', 'LVL', 'LYD', 'MAD', 'MDL', 'MGA', 'MKD',
'MMK', 'MNT', 'MOP', 'MRO', 'MUR', 'MVR', 'MWK', 'MXN', 'MYR', 'MZM', 'NAD', 'NGN', 'NIO', 'NOK', 'NPR',
'NZD', 'OMR', 'PAB', 'PEN', 'PGK', 'PHP', 'PKR', 'PLN', 'PYG', 'QAR', 'RON', 'RSD', 'RUB', 'SAR', 'SBD',
'SCR', 'SDG', 'SEK', 'SGD', 'SHP', 'SLL', 'SOS', 'SRD', 'SYP', 'SZL', 'THB', 'TJS', 'TMT', 'TND', 'TRY',
'TTD', 'TWD', 'TZS', 'UAH', 'UGX', 'USD', 'UYU', 'UZS', 'VEB', 'VND', 'VUV', 'WST', 'XAF', 'XCD', 'XDR',
'XOF', 'XPF', 'YER', 'ZAR', 'ZMK', 'ZWR']
| 80.97619
| 120
| 0.5813
| 643
| 13,604
| 12.290824
| 0.688958
| 0.002025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.400253
| 0.302852
| 13,604
| 168
| 121
| 80.97619
| 0.433045
| 0
| 0
| 0
| 0
| 0
| 0.589783
| 0.456891
| 0
| 0
| 0.456891
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e18aa7606044247b303f6a68e3c9285ed0bc2fb4
| 1,735
|
py
|
Python
|
Data-Structures/linked-list/challenge-07/ll-kth-from-end/tests/test_ll_kth_from_end.py
|
AymanNaif/data-structures-and-algorithms
|
f68c8a2829362a4c9cf9084973b56464ca951ca5
|
[
"MIT"
] | null | null | null |
Data-Structures/linked-list/challenge-07/ll-kth-from-end/tests/test_ll_kth_from_end.py
|
AymanNaif/data-structures-and-algorithms
|
f68c8a2829362a4c9cf9084973b56464ca951ca5
|
[
"MIT"
] | 1
|
2021-06-13T16:51:53.000Z
|
2021-06-13T16:51:53.000Z
|
Data-Structures/linked-list/challenge-07/ll-kth-from-end/tests/test_ll_kth_from_end.py
|
AymanNaif/data-structures-and-algorithms
|
f68c8a2829362a4c9cf9084973b56464ca951ca5
|
[
"MIT"
] | 1
|
2021-04-26T09:16:53.000Z
|
2021-04-26T09:16:53.000Z
|
from ll_kth_from_end import __version__
from ll_kth_from_end.ll_kth_from_end import LinkedList
def test_version():
assert __version__ == '0.1.0'
def test_k_greater():
ll = LinkedList()
ll.append('test')
ll.append('hii')
ll.insert('hisai')
ll.insertBefore('test', 'wow')
ll.insertAfter('hii', 55)
print(ll.node_lst)
actual = ll.kthFromEnd(10)
excepted = 'out of index'
assert actual == excepted
def test_k_same_length():
ll = LinkedList()
ll.append('test')
ll.append('hii')
ll.insert('hisai')
ll.insertBefore('test', 'wow')
ll.insertAfter('hii', 55)
print(ll.node_lst)
actual = ll.kthFromEnd(5)
excepted = 'out of index'
assert actual == excepted
def test_k_same_length():
ll = LinkedList()
ll.append('test')
ll.append('hii')
ll.insert('hisai')
ll.insertBefore('test', 'wow')
ll.insertAfter('hii', 55)
actual = ll.kthFromEnd(-1)
excepted = 55
assert actual == excepted
def test_with_size_1_1():
ll = LinkedList()
ll.append('test')
actual = ll.kthFromEnd(0)
excepted = 'test'
assert actual == excepted
def test_with_size_1_2():
ll = LinkedList()
ll.append('test')
actual = ll.kthFromEnd(4)
excepted = 'out of index'
assert actual == excepted
def test_with_size_1_3():
ll = LinkedList()
ll.append('test')
actual = ll.kthFromEnd(-2)
excepted = 'out of index'
assert actual == excepted
def test_happy_path():
ll = LinkedList()
ll.append('test')
ll.append('hii')
ll.insert('hisai')
ll.insertBefore('test', 'wow')
ll.insertAfter('hii', 55)
actual = ll.kthFromEnd(2)
excepted = 'test'
assert actual == excepted
| 21.419753
| 54
| 0.628818
| 232
| 1,735
| 4.521552
| 0.181034
| 0.083889
| 0.093422
| 0.13346
| 0.914204
| 0.816969
| 0.816969
| 0.816969
| 0.619638
| 0.533842
| 0
| 0.020104
| 0.225937
| 1,735
| 80
| 55
| 21.6875
| 0.760983
| 0
| 0
| 0.734375
| 0
| 0
| 0.092795
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.125
| false
| 0
| 0.03125
| 0
| 0.15625
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bee5928e54e860d294c50d6b9f92c2ec097a9dd6
| 72
|
py
|
Python
|
randterrainpy/__init__.py
|
jackromo/RandTerrainPy
|
4446627848dd1f7d80e613f4557c0eed68efe8cf
|
[
"MIT"
] | 7
|
2017-04-17T19:34:52.000Z
|
2022-01-18T23:37:31.000Z
|
randterrainpy/__init__.py
|
jackromo/RandTerrainPy
|
4446627848dd1f7d80e613f4557c0eed68efe8cf
|
[
"MIT"
] | null | null | null |
randterrainpy/__init__.py
|
jackromo/RandTerrainPy
|
4446627848dd1f7d80e613f4557c0eed68efe8cf
|
[
"MIT"
] | 5
|
2017-11-08T11:22:37.000Z
|
2021-08-07T18:26:54.000Z
|
from terrain import *
from exceptions import *
from terraingen import *
| 18
| 24
| 0.791667
| 9
| 72
| 6.333333
| 0.555556
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 72
| 3
| 25
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bee9d720faa0f355f09cdfca0c063ffe884c6a62
| 36
|
py
|
Python
|
pyvodb/__main__.py
|
messa/pyvodb
|
6b61d82448470bad518f9db6e8ef5abdc8056379
|
[
"MIT"
] | 1
|
2018-11-27T15:13:53.000Z
|
2018-11-27T15:13:53.000Z
|
pyvodb/__main__.py
|
messa/pyvodb
|
6b61d82448470bad518f9db6e8ef5abdc8056379
|
[
"MIT"
] | 7
|
2015-07-30T15:01:31.000Z
|
2018-02-27T15:23:14.000Z
|
pyvodb/__main__.py
|
messa/pyvodb
|
6b61d82448470bad518f9db6e8ef5abdc8056379
|
[
"MIT"
] | 5
|
2015-06-25T09:49:25.000Z
|
2018-02-11T12:25:40.000Z
|
from pyvodb.cli import main
main()
| 9
| 27
| 0.75
| 6
| 36
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 36
| 3
| 28
| 12
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8315be91035cc5970fa13974d86816622e4db0d5
| 131
|
py
|
Python
|
src/numgraph/__init__.py
|
gravins/NumGraph
|
69ada09f0e434411a61ab36cccfface77a86aa65
|
[
"MIT"
] | null | null | null |
src/numgraph/__init__.py
|
gravins/NumGraph
|
69ada09f0e434411a61ab36cccfface77a86aa65
|
[
"MIT"
] | null | null | null |
src/numgraph/__init__.py
|
gravins/NumGraph
|
69ada09f0e434411a61ab36cccfface77a86aa65
|
[
"MIT"
] | null | null | null |
from .distributions import barabasi_albert, clique, erdos_renyi, full_grid, random_tree, simple_grid, star, stochastic_block_model
| 65.5
| 130
| 0.854962
| 18
| 131
| 5.833333
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083969
| 131
| 1
| 131
| 131
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
833af10d0ac26673e02d8bd8c8ddc1b5f0deabe9
| 116
|
py
|
Python
|
cats/admin.py
|
CodeByLine/dj4e-tutorial
|
8cb592658a18777220eb297e63d42f2a7d53733c
|
[
"CC-BY-3.0"
] | null | null | null |
cats/admin.py
|
CodeByLine/dj4e-tutorial
|
8cb592658a18777220eb297e63d42f2a7d53733c
|
[
"CC-BY-3.0"
] | 3
|
2021-04-12T16:18:58.000Z
|
2021-06-10T20:40:08.000Z
|
cats/admin.py
|
CodeByLine/dj4e-tutorial
|
8cb592658a18777220eb297e63d42f2a7d53733c
|
[
"CC-BY-3.0"
] | null | null | null |
from django.contrib import admin
from .models import Cat, Breed
admin.site.register(Cat)
admin.site.register(Breed)
| 23.2
| 32
| 0.810345
| 18
| 116
| 5.222222
| 0.555556
| 0.191489
| 0.361702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094828
| 116
| 5
| 33
| 23.2
| 0.895238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
834d2ac6ad807d23057e2122ced409903c31de0b
| 217
|
py
|
Python
|
ls/joyous/holidays/workalendar.py
|
gardentronic/ls.joyous
|
db2308ce59cd072b83a465b02b9f29c00269fdbb
|
[
"BSD-3-Clause"
] | null | null | null |
ls/joyous/holidays/workalendar.py
|
gardentronic/ls.joyous
|
db2308ce59cd072b83a465b02b9f29c00269fdbb
|
[
"BSD-3-Clause"
] | null | null | null |
ls/joyous/holidays/workalendar.py
|
gardentronic/ls.joyous
|
db2308ce59cd072b83a465b02b9f29c00269fdbb
|
[
"BSD-3-Clause"
] | null | null | null |
# ------------------------------------------------------------------------------
# Joyous Holidays - Workalendar compatibility
# ------------------------------------------------------------------------------
# Monkey
| 43.4
| 80
| 0.202765
| 5
| 217
| 8.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0553
| 217
| 4
| 81
| 54.25
| 0.214634
| 0.958525
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
83584ad56400971fea82b81adf55917b7d2f0ae6
| 7,848
|
py
|
Python
|
tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 3,022
|
2020-11-24T14:02:31.000Z
|
2022-03-31T23:55:31.000Z
|
tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import numpy as np
import tvm.testing
def tensor_core_matmul(warp_tile_m=16, m=64, n=32, l=96):
A = te.placeholder((n, l), name="A", dtype="float16")
B = te.placeholder((l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m), lambda i, j: te.sum(A[i, k].astype("float32") * B[k, j].astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 4
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split(ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(yo, xo, tz, ty, tx, yi, xi)
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[1], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX // TX) * v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[0], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[1], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX // TX) * v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[0], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(l, m)).astype(B.dtype)
c_np = np.zeros((n, m), dtype=np.float32)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print("gemm m=%d n=%d k=%d: %f ms" % (m, n, l, evaluator(a, b, c).mean * 1e3))
c_np = np.dot(a_np, b_np)
np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3)
def tensor_core_batch_matmul(warp_tile_m=16, m=64, n=32, l=96, batch=2):
A = te.placeholder((batch, n, l), name="A", dtype="float16")
B = te.placeholder((batch, l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(batch, n, m), lambda b, i, j: te.sum((A[b, i, k] * B[b, k, j]).astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
z, y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 2
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split(ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(z, yo, xo, tz, ty, tx, yi, xi)
s[C].bind(z, te.thread_axis("blockIdx.z"))
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
zo, yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, zo, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[2], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX // TX) * v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[1], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[2], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX // TX) * v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[1], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(batch, n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(batch, l, m)).astype(B.dtype)
c_np = np.zeros((batch, n, m), dtype=np.float32)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((batch, n, m), dtype=C.dtype), dev)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print(
"batch gemm m=%d n=%d k=%d batch=%d: %f ms"
% (m, n, l, batch, evaluator(a, b, c).mean * 1e3)
)
for bs in range(batch):
c_np[bs, :, :] = np.dot(a_np[bs, :, :], b_np[bs, :, :])
np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3)
@tvm.testing.requires_tensorcore
def test_tensor_core_matmul():
tensor_core_matmul(16) # test with warp_tile 16x16x16
tensor_core_matmul(8) # test with warp_tile 8x32x16
tensor_core_matmul(32) # test with warp_tile 32x8x16
@tvm.testing.requires_tensorcore
def test_tensor_core_batch_matmul():
tensor_core_batch_matmul()
if __name__ == "__main__":
test_tensor_core_matmul()
test_tensor_core_batch_matmul()
| 33.827586
| 98
| 0.595056
| 1,459
| 7,848
| 3.096642
| 0.135024
| 0.014608
| 0.066401
| 0.083665
| 0.762948
| 0.751439
| 0.734838
| 0.730412
| 0.710491
| 0.684374
| 0
| 0.01702
| 0.206422
| 7,848
| 231
| 99
| 33.974026
| 0.708414
| 0.106651
| 0
| 0.674033
| 0
| 0
| 0.06866
| 0
| 0
| 0
| 0
| 0
| 0.01105
| 1
| 0.022099
| false
| 0
| 0.027624
| 0
| 0.049724
| 0.01105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
55d77b5fb76e3e87590685d28bdc50048e72e3f4
| 51
|
py
|
Python
|
ffai/util/__init__.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
ffai/util/__init__.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
ffai/util/__init__.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
from .bothelper import *
from .pathfinding import *
| 25.5
| 26
| 0.784314
| 6
| 51
| 6.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 2
| 26
| 25.5
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
55dc1a216e6032ace7fd5fac7c90cc1bd76dbdd1
| 133
|
py
|
Python
|
bender/data_importer/interface.py
|
otovo/bender
|
b64f0656658287b932ce44d52e6035682652fe33
|
[
"Apache-2.0"
] | 2
|
2021-12-17T15:45:40.000Z
|
2021-12-18T14:15:43.000Z
|
bender/data_importer/interface.py
|
otovo/bender
|
b64f0656658287b932ce44d52e6035682652fe33
|
[
"Apache-2.0"
] | 2
|
2022-03-30T14:31:12.000Z
|
2022-03-31T14:25:25.000Z
|
bender/data_importer/interface.py
|
otovo/bender
|
b64f0656658287b932ce44d52e6035682652fe33
|
[
"Apache-2.0"
] | 1
|
2021-12-19T17:16:38.000Z
|
2021-12-19T17:16:38.000Z
|
from pandas import DataFrame
class DataImporter:
async def import_data(self) -> DataFrame:
raise NotImplementedError()
| 19
| 45
| 0.736842
| 14
| 133
| 6.928571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203008
| 133
| 6
| 46
| 22.166667
| 0.915094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3603af6df185d5a82a98357301692e826d1bd54b
| 11,630
|
py
|
Python
|
akshare/index/index_investing.py
|
rayshifu/akshare
|
4f34f45edf242e78320a86b29347868e4d8d5933
|
[
"MIT"
] | null | null | null |
akshare/index/index_investing.py
|
rayshifu/akshare
|
4f34f45edf242e78320a86b29347868e4d8d5933
|
[
"MIT"
] | null | null | null |
akshare/index/index_investing.py
|
rayshifu/akshare
|
4f34f45edf242e78320a86b29347868e4d8d5933
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/3/18 17:03
Desc: 英为财情-股票指数-全球股指与期货指数数据接口
https://cn.investing.com/indices/volatility-s-p-500-historical-data
"""
import re
import pandas as pd
from bs4 import BeautifulSoup
from akshare.index.cons import short_headers, long_headers
from akshare.utils.ak_session import session
def _get_global_index_country_name_url() -> dict:
"""
全球指数-各国的全球指数数据
https://cn.investing.com/indices/global-indices?majorIndices=on&primarySectors=on&bonds=on&additionalIndices=on&otherIndices=on&c_id=37
:return: 国家和代码
:rtype: dict
"""
url = "https://cn.investing.com/indices/global-indices"
params = {
"majorIndices": "on",
"primarySectors": "on",
"bonds": "on",
"additionalIndices": "on",
"otherIndices": "on",
}
r = session.get(url, params=params, headers=short_headers)
data_text = r.text
soup = BeautifulSoup(data_text, "lxml")
name_url_option_list = soup.find_all("option")[1:]
url_list = [
item["value"]
for item in name_url_option_list
if "c_id" in item["value"]
]
url_list_code = [
item["value"].split("?")[1].split("=")[1]
for item in name_url_option_list
if "c_id" in item["value"]
]
name_list = [item.get_text() for item in name_url_option_list][
: len(url_list)
]
_temp_df = pd.DataFrame([name_list, url_list_code]).T
name_code_list = dict(zip(_temp_df.iloc[:, 0], _temp_df.iloc[:, 1]))
return name_code_list
def _get_global_country_name_url() -> dict:
"""
可获得指数数据国家对应的 URL
:return: URL
:rtype: dict
"""
url = "https://cn.investing.com/indices/"
res = session.post(url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
name_url_option_list = soup.find(
"select", attrs={"name": "country"}
).find_all("option")[
1:
] # 去掉-所有国家及地区
url_list = [item["value"] for item in name_url_option_list]
name_list = [item.get_text() for item in name_url_option_list]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, url_list))
return name_code_map_dict
def index_investing_global_country_name_url(country: str = "中国") -> dict:
"""
参考网页: https://cn.investing.com/indices/
获取选择国家对应的: 主要指数, 主要行业, 附加指数, 其他指数
:param country: str 中文国家名称, 对应 get_global_country_name_url 函数返回的国家名称
:return: dict
"""
pd.set_option("mode.chained_assignment", None)
name_url_dict = _get_global_country_name_url()
name_code_dict = _get_global_index_country_name_url()
url = f"https://cn.investing.com{name_url_dict[country]}?&majorIndices=on&primarySectors=on&additionalIndices=on&otherIndices=on"
res = session.post(url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
url_list = [
item.find("a")["href"]
for item in soup.find_all(attrs={"class": "plusIconTd"})
]
name_list = [
item.find("a").get_text()
for item in soup.find_all(attrs={"class": "plusIconTd"})
]
name_code_map_dict = {}
name_code_map_dict.update(zip(name_list, url_list))
url = "https://cn.investing.com/indices/global-indices"
params = {
"majorIndices": "on",
"primarySectors": "on",
"bonds": "on",
"additionalIndices": "on",
"otherIndices": "on",
"c_id": name_code_dict[country],
}
r = session.get(url, params=params, headers=short_headers)
data_text = r.text
soup = BeautifulSoup(data_text, "lxml")
soup_list = soup.find("table", attrs={"id": "cr_12"}).find_all("a")
global_index_url = [item["href"] for item in soup_list]
global_index_name = [item["title"] for item in soup_list]
name_code_map_dict.update(zip(global_index_name, global_index_url))
return name_code_map_dict
def index_investing_global(
country: str = "美国",
index_name: str = "纳斯达克100",
period: str = "每日",
start_date: str = "20100101",
end_date: str = "20211031",
) -> pd.DataFrame:
"""
具体国家的具体指数的从 start_date 到 end_date 期间的数据
:param country: 对应函数中的国家名称
:type country: str
:param index_name: 对应函数中的指数名称
:type index_name: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '2000-01-01', 注意格式
:type start_date: str
:param end_date: '2019-10-17', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
"""
start_date = "/".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "/".join([end_date[:4], end_date[4:6], end_date[6:]])
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
name_code_dict = index_investing_global_country_name_url(country)
temp_url = f"https://cn.investing.com/{name_code_dict[index_name]}-historical-data"
res = session.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
title = soup.find("h2", attrs={"class": "float_lang_base_1"}).get_text()
res = session.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
data = soup.find_all(text=re.compile("window.histDataExcessInfo"))[
0
].strip()
para_data = re.findall(r"\d+", data)
payload = {
"curr_id": para_data[0],
"smlID": para_data[1],
"header": title,
"st_date": start_date,
"end_date": end_date,
"interval_sec": period_map[period],
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
res = session.post(url, data=payload, headers=long_headers)
df_data = pd.read_html(res.text)[0]
if period == "每月":
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月")
else:
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月%d日")
if any(df_data["交易量"].astype(str).str.contains("-")):
df_data["交易量"][df_data["交易量"].str.contains("-")] = df_data["交易量"][
df_data["交易量"].str.contains("-")
].replace("-", 0)
if any(df_data["交易量"].astype(str).str.contains("B")):
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)]
.str.replace("B", "")
.str.replace(",", "")
.astype(float)
* 1000000000
)
if any(df_data["交易量"].astype(str).str.contains("M")):
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)]
.str.replace("M", "")
.str.replace(",", "")
.astype(float)
* 1000000
)
if any(df_data["交易量"].astype(str).str.contains("K")):
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)]
.str.replace("K", "")
.str.replace(",", "")
.astype(float)
* 1000
)
df_data["交易量"] = df_data["交易量"].astype(float)
df_data = df_data[["收盘", "开盘", "高", "低", "交易量"]]
df_data = df_data.astype(float)
df_data.sort_index(inplace=True)
df_data.reset_index(inplace=True)
df_data["日期"] = pd.to_datetime(df_data["日期"]).dt.date
return df_data
def index_investing_global_from_url(
url: str = "https://www.investing.com/indices/ftse-epra-nareit-eurozone",
period: str = "每日",
start_date: str = "20000101",
end_date: str = "20210909",
) -> pd.DataFrame:
"""
获得具体指数的从 start_date 到 end_date 期间的数据
https://www.investing.com/indices/ftse-epra-nareit-eurozone
:param url: 具体数据链接
:type url: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '2000-01-01', 注意格式
:type start_date: str
:param end_date: '2019-10-17', 注意格式
:type end_date: str
:return: 指定参数的数据
:rtype: pandas.DataFrame
"""
start_date = "/".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "/".join([end_date[:4], end_date[4:6], end_date[6:]])
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
url_name = url.split("/")[-1]
temp_url = f"https://cn.investing.com/indices/{url_name}-historical-data"
res = session.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
title = soup.find("h2", attrs={"class": "float_lang_base_1"}).get_text()
res = session.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
data = soup.find_all(text=re.compile("window.histDataExcessInfo"))[
0
].strip()
para_data = re.findall(r"\d+", data)
payload = {
"curr_id": para_data[0],
"smlID": para_data[1],
"header": title,
"st_date": start_date,
"end_date": end_date,
"interval_sec": period_map[period],
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
res = session.post(url, data=payload, headers=long_headers)
df_data = pd.read_html(res.text)[0]
if period == "每月":
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月")
else:
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月%d日")
if any(df_data["交易量"].astype(str).str.contains("-")):
df_data["交易量"][df_data["交易量"].str.contains("-")] = df_data["交易量"][
df_data["交易量"].str.contains("-")
].replace("-", 0)
if any(df_data["交易量"].astype(str).str.contains("B")):
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)]
.str.replace("B", "")
.str.replace(",", "")
.astype(float)
* 1000000000
)
if any(df_data["交易量"].astype(str).str.contains("M")):
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)]
.str.replace("M", "")
.str.replace(",", "")
.astype(float)
* 1000000
)
if any(df_data["交易量"].astype(str).str.contains("K")):
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)]
.str.replace("K", "")
.str.replace(",", "")
.astype(float)
* 1000
)
df_data["交易量"] = df_data["交易量"].astype(float)
df_data = df_data[["收盘", "开盘", "高", "低", "交易量"]]
df_data = df_data.astype(float)
df_data.sort_index(inplace=True)
df_data.reset_index(inplace=True)
df_data["日期"] = pd.to_datetime(df_data["日期"]).dt.date
return df_data
if __name__ == "__main__":
index_investing_global_from_url_df = index_investing_global_from_url(
url="https://www.investing.com/indices/ftse-epra-nareit-hong-kong",
period="每日",
start_date="19900101",
end_date="20210909",
)
print(index_investing_global_from_url_df)
index_investing_global_country_name_url_dict = (
index_investing_global_country_name_url("美国")
)
index_investing_global_df = index_investing_global(
country="中国",
index_name="富时中国A50指数",
period="每日",
start_date="20010101",
end_date="20110316",
)
print(index_investing_global_df)
| 36.23053
| 139
| 0.610146
| 1,567
| 11,630
| 4.287811
| 0.145501
| 0.064295
| 0.058937
| 0.029469
| 0.813514
| 0.77705
| 0.743712
| 0.71082
| 0.690281
| 0.66111
| 0
| 0.021911
| 0.215133
| 11,630
| 320
| 140
| 36.34375
| 0.714176
| 0.108341
| 0
| 0.66129
| 0
| 0.004032
| 0.157858
| 0.007184
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020161
| false
| 0
| 0.020161
| 0
| 0.060484
| 0.008065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
361f2399caa5f13f5a2092e1236b6cbe3fd8986e
| 3,280
|
py
|
Python
|
tests/integration/test_over.py
|
goodwanghan/dask-sql
|
990d647d3788aa200c28c01fb1429b176dfbc617
|
[
"MIT"
] | null | null | null |
tests/integration/test_over.py
|
goodwanghan/dask-sql
|
990d647d3788aa200c28c01fb1429b176dfbc617
|
[
"MIT"
] | null | null | null |
tests/integration/test_over.py
|
goodwanghan/dask-sql
|
990d647d3788aa200c28c01fb1429b176dfbc617
|
[
"MIT"
] | 1
|
2021-01-24T05:52:50.000Z
|
2021-01-24T05:52:50.000Z
|
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
def test_over_with_sorting(c, user_table_1):
df = c.sql(
"""
SELECT
user_id,
ROW_NUMBER() OVER (ORDER BY user_id, b) AS R
FROM user_table_1
"""
)
df = df.compute()
expected_df = pd.DataFrame({"user_id": user_table_1.user_id, "R": [3, 1, 2, 4]})
expected_df["R"] = expected_df["R"].astype("Int64")
assert_frame_equal(df, expected_df)
def test_over_with_partitioning(c, user_table_2):
df = c.sql(
"""
SELECT
user_id,
ROW_NUMBER() OVER (PARTITION BY c) AS R
FROM user_table_2
"""
)
df = df.compute()
expected_df = pd.DataFrame({"user_id": user_table_2.user_id, "R": [1, 1, 1, 1]})
expected_df["R"] = expected_df["R"].astype("Int64")
assert_frame_equal(df, expected_df)
def test_over_with_grouping_and_sort(c, user_table_1):
df = c.sql(
"""
SELECT
user_id,
ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY b) AS R
FROM user_table_1
"""
)
df = df.compute()
expected_df = pd.DataFrame({"user_id": user_table_1.user_id, "R": [2, 1, 1, 1]})
expected_df["R"] = expected_df["R"].astype("Int64")
assert_frame_equal(df, expected_df)
def test_over_with_different(c, user_table_1):
df = c.sql(
"""
SELECT
user_id,
ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY b) AS R1,
ROW_NUMBER() OVER (ORDER BY user_id, b) AS R2
FROM user_table_1
"""
)
df = df.compute()
expected_df = pd.DataFrame(
{"user_id": user_table_1.user_id, "R1": [2, 1, 1, 1], "R2": [3, 1, 2, 4],}
)
for col in ["R1", "R2"]:
expected_df[col] = expected_df[col].astype("Int64")
assert_frame_equal(df, expected_df)
def test_over_calls(c):
df = c.sql(
"""
SELECT
ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY b) AS O1,
FIRST_VALUE(user_id*10 - b) OVER (PARTITION BY user_id ORDER BY b) AS O2,
SINGLE_VALUE(user_id*10 - b) OVER (PARTITION BY user_id ORDER BY b) AS O3,
LAST_VALUE(user_id*10 - b) OVER (PARTITION BY user_id ORDER BY b) AS O4,
SUM(user_id) OVER (PARTITION BY user_id ORDER BY b) AS O5,
AVG(user_id) OVER (PARTITION BY user_id ORDER BY b) AS O6,
COUNT(*) OVER (PARTITION BY user_id ORDER BY b) AS O7,
COUNT(b) OVER (PARTITION BY user_id ORDER BY b) AS O7b,
MAX(b) OVER (PARTITION BY user_id ORDER BY b) AS O8,
MIN(b) OVER (PARTITION BY user_id ORDER BY b) AS O9
FROM user_table_1
"""
)
df = df.compute()
expected_df = pd.DataFrame(
{
"O1": [2, 1, 1, 1],
"O2": [19, 7, 19, 27],
"O3": [19, 7, 19, 27],
"O4": [17, 7, 17, 27],
"O5": [4, 1, 4, 3],
"O6": [2, 1, 2, 3],
"O7": [2, 1, 2, 1],
"O7b": [2, 1, 2, 1],
"O8": [3, 3, 3, 3],
"O9": [1, 3, 1, 3],
}
)
for col in expected_df.columns:
if col in ["06"]:
continue
expected_df[col] = expected_df[col].astype("Int64")
expected_df["O6"] = expected_df["O6"].astype("float64")
assert_frame_equal(df, expected_df)
| 29.026549
| 84
| 0.567073
| 523
| 3,280
| 3.332696
| 0.151052
| 0.106713
| 0.064257
| 0.130809
| 0.765921
| 0.756741
| 0.740677
| 0.740677
| 0.704532
| 0.662651
| 0
| 0.05601
| 0.292378
| 3,280
| 112
| 85
| 29.285714
| 0.694959
| 0
| 0
| 0.37931
| 0
| 0
| 0.051741
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 1
| 0.086207
| false
| 0
| 0.051724
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
361fdd8c5a5a90cf974a26da3b2163432daed5e1
| 626
|
py
|
Python
|
pycta2045/models/model.py
|
mohamm-alsaid/CTA2045-DER
|
e66d0d468dda42f8b3640b9f81bad6b6193002d1
|
[
"MIT"
] | 1
|
2021-10-04T17:32:53.000Z
|
2021-10-04T17:32:53.000Z
|
pycta2045/models/model.py
|
mohamm-alsaid/CTA2045-DER
|
e66d0d468dda42f8b3640b9f81bad6b6193002d1
|
[
"MIT"
] | null | null | null |
pycta2045/models/model.py
|
mohamm-alsaid/CTA2045-DER
|
e66d0d468dda42f8b3640b9f81bad6b6193002d1
|
[
"MIT"
] | null | null | null |
import abc
class CTA2045Model(abc.ABC):
@abc.abstractmethod
def loadup(self,payload:dict)->dict:
pass
@abc.abstractmethod
def operating_status(self,payload:dict)->dict:
pass
@abc.abstractmethod
def shed(self,payload:dict)->dict:
pass
@abc.abstractmethod
def endshed(self,payload:dict)->dict:
pass
@abc.abstractmethod
def commodity_read(self,payload:dict)->dict:
pass
@abc.abstractmethod
def critical_peak_event(self,payload:dict)->dict:
pass
@abc.abstractmethod
def grid_emergency(self,payload:dict)->dict:
pass
| 24.076923
| 53
| 0.659744
| 74
| 626
| 5.513514
| 0.297297
| 0.291667
| 0.343137
| 0.32598
| 0.688725
| 0.632353
| 0.632353
| 0.632353
| 0
| 0
| 0
| 0.008333
| 0.233227
| 626
| 25
| 54
| 25.04
| 0.841667
| 0
| 0
| 0.608696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.304348
| false
| 0.304348
| 0.043478
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
3626069cf41d4452bfaebcb371becf98853c94c9
| 8,913
|
py
|
Python
|
py_dp/dispersion/mapping_input.py
|
amirdel/dispersion-continua
|
2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60
|
[
"0BSD"
] | null | null | null |
py_dp/dispersion/mapping_input.py
|
amirdel/dispersion-continua
|
2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60
|
[
"0BSD"
] | null | null | null |
py_dp/dispersion/mapping_input.py
|
amirdel/dispersion-continua
|
2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60
|
[
"0BSD"
] | null | null | null |
# Copyright 2017 Amir Hossein Delgoshaie, amirdel@stanford.edu
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee
# is hereby granted, provided that the above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE
# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from py_dp.dispersion.binning import abs_vel_log_bins_low_high, make_theta_bins_linear, \
make_input_for_binning_v_theta_freq, binning_input_v_theta_freq_y, make_y_bins_linear
import numpy as np
import os as os
import itertools as itertools
class TemporalMapInput(object):
def __init__(self, input_folder, n_binning_realz, n_absv_class, n_theta_class, time_step,
n_slow_class, max_allowed, verbose=True, print_every=40, average_available=False):
"""
Generate the input for the mapping object for a temporal Markov model, it contains sample arrays for velocity,
angle and frequency, and the bins for the velocity and angle processes
:param input_folder:
:param n_binning_realz:
:param n_absv_class:
:param n_theta_class:
:param time_step:
:param n_slow_class:
:param max_allowed:
"""
self.time_step = time_step
self.input_folder = input_folder
self.n_binning_realz = n_binning_realz
self.n_absv_class = n_absv_class
self.n_theta_class = n_theta_class
self.n_slow_class = n_slow_class
self.max_allowed = max_allowed
#making sample data for creating bins
self.v_array, self.theta_array, self.freq_array, self.pointer_list, self.initial_v, self.initial_f, self.initial_theta = \
self.binning_data(average_available, input_folder, n_binning_realz, time_step, verbose, print_every)
self.v_log_edges = abs_vel_log_bins_low_high(self.v_array, n_absv_class, n_slow_class, max_allowed)
self.theta_edges = make_theta_bins_linear(n_theta_class)
def binning_data(self, average_available, input_folder, n_binning_realz, time_step, verbose, print_every):
if average_available:
init_data = np.load(os.path.join(input_folder, 'initial_arrays.npz'))
initial_v, initial_theta, initial_f = init_data['v'], init_data['theta'],init_data['f']
big_v, big_theta, big_f, big_ptr = [[] for i in range(4)]
realz_used = 0
for file_idx in range(n_binning_realz):
# load a polar file
data = np.load(os.path.join(input_folder, 'avg_polar_'+str(file_idx)+'.npz'))
file_idx += 1
realz_used += data['n_realz']
big_v.append(data['V'])
big_theta.append(data['Theta'])
big_f.append(data['F'])
big_ptr.append(data['ptr'])
#flatten the nested lists
big_v, big_theta, big_f, big_ptr = [flatten(ll) for ll in [big_v, big_theta, big_f, big_ptr]]
return big_v, big_theta, big_f, big_ptr, initial_v, initial_f, initial_theta
else:
return make_input_for_binning_v_theta_freq(input_folder, n_binning_realz, time_step, verbose=verbose, print_every=print_every)
def flatten(ll):
chain = itertools.chain(*ll)
return np.array(list(chain), dtype=np.float)
class SpatialMapInput(object):
def __init__(self, input_folder, n_binning_realz, n_absv_class, n_slow_class, max_allowed,
init_percentile=None):
self.input_folder = input_folder
self.n_binning_realz = n_binning_realz
self.n_absv_class = n_absv_class
self.n_slow_class = n_slow_class
self.max_allowed = max_allowed
#making sample data for creating bins
self.v_array, self.theta_array, self.pointer_list, self.initial_v, self.initial_theta = \
self.binning_data(input_folder, n_binning_realz)
self.v_log_edges = abs_vel_log_bins_low_high(self.v_array, n_absv_class, n_slow_class, max_allowed,
init_percentile)
self.theta_edges = make_theta_bins_linear(4)
def binning_data(self, input_folder, n_binning_realz):
init_data = np.load(os.path.join(input_folder, 'initial_arrays.npz'))
initial_v, initial_theta = init_data['v'], init_data['theta']
big_v, big_theta, big_ptr = [[] for i in range(3)]
realz_used = 0
for file_idx in range(n_binning_realz):
# load a polar file
data = np.load(os.path.join(input_folder, 'polar_'+str(file_idx)+'.npz'))
file_idx += 1
realz_used += data['n_realz']
big_v.append(data['V'])
big_theta.append(data['Theta'])
big_ptr.append(data['ptr'])
#flatten the nested lists
big_v, big_theta, big_ptr = [flatten(ll) for ll in [big_v, big_theta, big_ptr]]
return big_v, big_theta, big_ptr, initial_v, initial_theta
class TemporalMapInputWithY(object):
def __init__(self, input_folder, n_binning_realz, n_absv_class, n_theta_class, n_y_class, time_step,
n_slow_class, max_allowed, verbose=True, print_every=40, average_available=False):
"""
Generate the input for the mapping object for a temporal Markov model, it contains sample arrays for velocity,
angle and frequency, and the bins for the velocity and angle processes
:param input_folder:
:param n_binning_realz:
:param n_absv_class:
:param n_theta_class:
:param time_step:
:param n_slow_class:
:param max_allowed:
"""
self.time_step = time_step
self.input_folder = input_folder
self.n_binning_realz = n_binning_realz
self.n_absv_class = n_absv_class
self.n_theta_class = n_theta_class
self.n_slow_class = n_slow_class
self.max_allowed = max_allowed
# making sample data for creating bins
self.v_array, self.theta_array, self.big_y_array, self.freq_array, self.v_inst_array, self.pointer_list,\
self.initial_v, self.initial_f, self.initial_theta , self.initial_v_inst = \
self.binning_data(average_available, input_folder, n_binning_realz, time_step, verbose, print_every)
# generate edges from the data
self.v_log_edges = abs_vel_log_bins_low_high(self.v_array, n_absv_class, n_slow_class, max_allowed)
self.theta_edges = make_theta_bins_linear(n_theta_class)
self.y_edges = make_y_bins_linear(self.big_y_array, n_y_class)
print 'making bins for instantaneous velocity...'
self.v_inst_log_edges = abs_vel_log_bins_low_high(self.v_inst_array, n_absv_class, n_slow_class, max_allowed)
def binning_data(self, average_available, input_folder, n_binning_realz, time_step, verbose, print_every):
if average_available:
init_data = np.load(os.path.join(input_folder, 'initial_arrays.npz'))
initial_v, initial_theta, initial_f, initial_v_inst = init_data['v'], init_data['theta'],\
init_data['f'], init_data['v_inst']
big_v, big_theta, big_f, big_ptr, big_y, big_v_inst = [[] for _ in range(6)]
realz_used = 0
for file_idx in range(n_binning_realz):
# load a polar file
data = np.load(os.path.join(input_folder, 'avg_polar_'+str(file_idx)+'.npz'))
realz_used += data['n_realz']
big_v.append(data['V'])
big_theta.append(data['Theta'])
big_f.append(data['F'])
big_ptr.append(data['ptr'])
big_v_inst.extend(data['V_inst'])
cartesian = np.load(os.path.join(input_folder, 'avg_cartesian_'+str(file_idx)+'.npz'))
big_y.append(cartesian['Y'])
#flatten the nested lists
big_v, big_theta, big_f, big_ptr, big_y = [flatten(ll) for ll in
[big_v, big_theta, big_f, big_ptr, big_y]]
big_v_inst = np.array(big_v_inst)
return big_v, big_theta, big_y, big_f, big_v_inst, big_ptr, initial_v,\
initial_f, initial_theta, initial_v_inst
else:
raise 'v_inst not implemented without average data available'
# return binning_input_v_theta_freq_y(input_folder, n_binning_realz, time_step, verbose=verbose)
| 55.018519
| 138
| 0.66835
| 1,301
| 8,913
| 4.222137
| 0.139892
| 0.052066
| 0.052066
| 0.026215
| 0.76643
| 0.754597
| 0.718187
| 0.70162
| 0.6714
| 0.630803
| 0
| 0.002536
| 0.24784
| 8,913
| 162
| 139
| 55.018519
| 0.816826
| 0.125098
| 0
| 0.550459
| 0
| 0
| 0.041331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.036697
| null | null | 0.073395
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
362dcac7488926a4c2596e31ed7b67c0ad49130e
| 353
|
py
|
Python
|
fastai/metrics.py
|
Engid/faitest
|
0080d4f526d01395870416745d9a3f8d5ac33551
|
[
"Apache-2.0"
] | null | null | null |
fastai/metrics.py
|
Engid/faitest
|
0080d4f526d01395870416745d9a3f8d5ac33551
|
[
"Apache-2.0"
] | null | null | null |
fastai/metrics.py
|
Engid/faitest
|
0080d4f526d01395870416745d9a3f8d5ac33551
|
[
"Apache-2.0"
] | null | null | null |
from .imports import *
from .torch_imports import *
def accuracy(preds, targs):
preds = torch.max(preds, dim=1)[1]
return (preds==targs).float().mean()
def accuracy_thresh(thresh):
return lambda preds,targs: accuracy_multi(preds, targs, thresh)
def accuracy_multi(preds, targs, thresh):
return ((preds>thresh)==targs).float().mean()
| 25.214286
| 67
| 0.705382
| 48
| 353
| 5.104167
| 0.354167
| 0.204082
| 0.114286
| 0.187755
| 0.236735
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006623
| 0.144476
| 353
| 13
| 68
| 27.153846
| 0.804636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.222222
| 0.222222
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
3695b59018fd811fc5e322ded1f1e5f445f2afae
| 388
|
py
|
Python
|
gpflow/inducing_variables/__init__.py
|
elvijs/GPflow
|
056e59f2c5aa2b5021de9b7b91ce1cee2ea0bb92
|
[
"Apache-2.0"
] | null | null | null |
gpflow/inducing_variables/__init__.py
|
elvijs/GPflow
|
056e59f2c5aa2b5021de9b7b91ce1cee2ea0bb92
|
[
"Apache-2.0"
] | null | null | null |
gpflow/inducing_variables/__init__.py
|
elvijs/GPflow
|
056e59f2c5aa2b5021de9b7b91ce1cee2ea0bb92
|
[
"Apache-2.0"
] | null | null | null |
from .inducing_variables import InducingVariables, InducingPoints, Multiscale
from .inducing_patch import InducingPatches
from .mo_inducing_variables import (MultioutputInducingVariables, FallbackSharedIndependentInducingVariables, FallbackSeparateIndependentInducingVariables,
SharedIndependentInducingVariables, SeparateIndependentInducingVariables)
| 77.6
| 155
| 0.832474
| 22
| 388
| 14.5
| 0.681818
| 0.075235
| 0.144201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139175
| 388
| 4
| 156
| 97
| 0.95509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3697c42695447884c159ec977e7cc70fc4281cec
| 728
|
py
|
Python
|
15/duelingGenerators.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | 1
|
2020-04-12T17:54:52.000Z
|
2020-04-12T17:54:52.000Z
|
15/duelingGenerators.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | null | null | null |
15/duelingGenerators.py
|
jcsesznegi/advent-of-code-2017
|
9710e184e092b82aa798076b9ce3915c6e42758d
|
[
"MIT"
] | null | null | null |
def generatorA(value, stop):
i = 0
while i < stop:
value = (value * 16807) % 2147483647
yield value
i += 1
def generatorB(value, stop):
i = 0
while i < stop:
value = (value * 48271) % 2147483647
yield value
i += 1
def generatorAPart2(value, stop):
i = 0
while i < stop:
value = (value * 16807) % 2147483647
while value % 4 != 0:
value = (value * 16807) % 2147483647
yield value
i += 1
def generatorBPart2(value, stop):
i = 0
while i < stop:
value = (value * 48271) % 2147483647
while value % 8 != 0:
value = (value * 48271) % 2147483647
yield value
i += 1
| 22.060606
| 48
| 0.505495
| 84
| 728
| 4.380952
| 0.202381
| 0.163043
| 0.108696
| 0.119565
| 0.790761
| 0.790761
| 0.782609
| 0.782609
| 0.649457
| 0.5
| 0
| 0.235294
| 0.392857
| 728
| 32
| 49
| 22.75
| 0.597285
| 0
| 0
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
36a3608ab4d69cfcd568c0a613ce6e8f41e0cc31
| 156
|
py
|
Python
|
backend/server/core/views.py
|
JenardKin/django-react-boilerplate
|
dde2c817a502e1bed4c4c0618b10b3a381eff6cf
|
[
"MIT"
] | null | null | null |
backend/server/core/views.py
|
JenardKin/django-react-boilerplate
|
dde2c817a502e1bed4c4c0618b10b3a381eff6cf
|
[
"MIT"
] | null | null | null |
backend/server/core/views.py
|
JenardKin/django-react-boilerplate
|
dde2c817a502e1bed4c4c0618b10b3a381eff6cf
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def index( request ):
return render( request, 'core/index.html', { 'msg': 'You have reached the django apis.' } )
| 26
| 95
| 0.692308
| 21
| 156
| 5.142857
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 156
| 5
| 96
| 31.2
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0.326923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
36d9ea5c81dd5ff8eaa5f964e9d547a047b4f26a
| 137
|
py
|
Python
|
basicsr/version.py
|
BCV-Uniandes/RSR
|
dad60eedd3560f2655e3d1ed444153ed2616af2e
|
[
"zlib-acknowledgement"
] | 14
|
2021-08-28T04:15:37.000Z
|
2021-12-28T17:00:33.000Z
|
basicsr/version.py
|
BCV-Uniandes/RSR
|
dad60eedd3560f2655e3d1ed444153ed2616af2e
|
[
"zlib-acknowledgement"
] | 2
|
2021-09-26T01:27:06.000Z
|
2021-12-24T19:06:09.000Z
|
basicsr/version.py
|
BCV-Uniandes/RSR
|
dad60eedd3560f2655e3d1ed444153ed2616af2e
|
[
"zlib-acknowledgement"
] | 1
|
2021-10-18T15:48:56.000Z
|
2021-10-18T15:48:56.000Z
|
# GENERATED VERSION FILE
# TIME: Mon Oct 11 04:02:23 2021
__version__ = '1.2.0+f83fd55'
short_version = '1.2.0'
version_info = (1, 2, 0)
| 22.833333
| 32
| 0.686131
| 26
| 137
| 3.384615
| 0.653846
| 0.068182
| 0.102273
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 0.160584
| 137
| 5
| 33
| 27.4
| 0.547826
| 0.386861
| 0
| 0
| 1
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7fcbbe0feafa2e1f8ccf912176165f8f65724eb8
| 63
|
py
|
Python
|
yammled/admin.py
|
Guest007/smyted
|
5b425e8456f97b1051e29b95d9e5c0528d1f01ca
|
[
"MIT"
] | null | null | null |
yammled/admin.py
|
Guest007/smyted
|
5b425e8456f97b1051e29b95d9e5c0528d1f01ca
|
[
"MIT"
] | null | null | null |
yammled/admin.py
|
Guest007/smyted
|
5b425e8456f97b1051e29b95d9e5c0528d1f01ca
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from yammled.models import *
| 15.75
| 32
| 0.809524
| 9
| 63
| 5.666667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 63
| 3
| 33
| 21
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3d02a3ee892ea2a72150136619d3988df9fb46ac
| 305
|
py
|
Python
|
intervul/datFiles/general_data/geometry/_definitions.py
|
mpacheco62/intervul
|
c0eaadf54580de4b3c2dea46e8f196eab52280e1
|
[
"MIT"
] | 1
|
2021-04-13T13:28:16.000Z
|
2021-04-13T13:28:16.000Z
|
intervul/datFiles/general_data/geometry/_definitions.py
|
andresutrera/intervul
|
75c5f824067549b3ddcbe9fe667964fb85a05ce3
|
[
"MIT"
] | null | null | null |
intervul/datFiles/general_data/geometry/_definitions.py
|
andresutrera/intervul
|
75c5f824067549b3ddcbe9fe667964fb85a05ce3
|
[
"MIT"
] | 1
|
2021-05-06T20:29:42.000Z
|
2021-05-06T20:29:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .._definitions import _General_data
class _Geometry(_General_data):
pass
class Interpolate(_Geometry):
def __str__(self):
return "INTERPOLATE"
class Noninterpolate(_Geometry):
def __str__(self):
return "NONINTERPOLATE"
| 16.052632
| 40
| 0.685246
| 33
| 305
| 5.848485
| 0.636364
| 0.11399
| 0.145078
| 0.186529
| 0.248705
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004098
| 0.2
| 305
| 18
| 41
| 16.944444
| 0.786885
| 0.137705
| 0
| 0.222222
| 0
| 0
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.111111
| 0.111111
| 0.222222
| 0.888889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
3d08d62d31d70f826588b330150f53d1ee96090d
| 315
|
py
|
Python
|
Mycoffee/products/views.py
|
AbderrhmanAbdellatif/MyCoffee
|
01563ccd1881caea605391fb813b7d0f2f59be02
|
[
"MIT"
] | null | null | null |
Mycoffee/products/views.py
|
AbderrhmanAbdellatif/MyCoffee
|
01563ccd1881caea605391fb813b7d0f2f59be02
|
[
"MIT"
] | null | null | null |
Mycoffee/products/views.py
|
AbderrhmanAbdellatif/MyCoffee
|
01563ccd1881caea605391fb813b7d0f2f59be02
|
[
"MIT"
] | null | null | null |
from django.http import request
from django.shortcuts import render
def products(request):
return render(request,'Products\products.html')
def product(request):
return render(request,'Products\product.html')
def search(request):
return render(request,'Products\search.html')
# Create your views here.
| 26.25
| 51
| 0.768254
| 41
| 315
| 5.902439
| 0.414634
| 0.161157
| 0.235537
| 0.322314
| 0.421488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12381
| 315
| 11
| 52
| 28.636364
| 0.876812
| 0.073016
| 0
| 0
| 0
| 0
| 0.217241
| 0.148276
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0.25
| 0.375
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
3d168ac782d471cdafd30c5477a7c01e23b0b1af
| 58
|
py
|
Python
|
src/neptuneml_toolkit/modelzoo/encoder/__init__.py
|
riaz/neptuneml-toolkit
|
6c68ba6d02a3d52116e6e8ca23d5f755693ae3d8
|
[
"Apache-2.0"
] | 7
|
2021-11-11T15:47:39.000Z
|
2022-03-31T03:44:40.000Z
|
src/neptuneml_toolkit/modelzoo/encoder/__init__.py
|
riaz/neptuneml-toolkit
|
6c68ba6d02a3d52116e6e8ca23d5f755693ae3d8
|
[
"Apache-2.0"
] | null | null | null |
src/neptuneml_toolkit/modelzoo/encoder/__init__.py
|
riaz/neptuneml-toolkit
|
6c68ba6d02a3d52116e6e8ca23d5f755693ae3d8
|
[
"Apache-2.0"
] | 2
|
2021-11-23T19:32:25.000Z
|
2022-02-24T18:42:42.000Z
|
from .han import *
from .rgcn import *
from .rgat import *
| 19.333333
| 19
| 0.706897
| 9
| 58
| 4.555556
| 0.555556
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189655
| 58
| 3
| 20
| 19.333333
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3d40709100e485f753e9bc6e733684b31f860cfe
| 42
|
py
|
Python
|
tox_add_factor/__init__.py
|
jayvdb/tox-add-factor
|
38742451c74a7655592d6930f8f365574cb13ee7
|
[
"MIT"
] | 1
|
2020-12-17T11:14:00.000Z
|
2020-12-17T11:14:00.000Z
|
tox_add_factor/__init__.py
|
jayvdb/tox-add-factor
|
38742451c74a7655592d6930f8f365574cb13ee7
|
[
"MIT"
] | 3
|
2020-10-15T14:49:04.000Z
|
2020-10-20T07:49:25.000Z
|
tox_add_factor/__init__.py
|
jayvdb/tox-add-factor
|
38742451c74a7655592d6930f8f365574cb13ee7
|
[
"MIT"
] | null | null | null |
"""Make it easy to add factors to tox."""
| 21
| 41
| 0.642857
| 8
| 42
| 3.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 42
| 1
| 42
| 42
| 0.794118
| 0.833333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3d5e88049493a0d9b8269359be9d265bdc4ef730
| 89
|
py
|
Python
|
plugins/beebeeto/utils/http/__init__.py
|
aliluyala/PocHunter
|
ff2d7e745eabd81ffb77920fe00813b17fc432cf
|
[
"MIT"
] | 95
|
2016-07-05T12:44:25.000Z
|
2022-01-24T09:16:44.000Z
|
plugins/beebeeto/utils/http/__init__.py
|
sigma-random/PocHunter
|
ff2d7e745eabd81ffb77920fe00813b17fc432cf
|
[
"MIT"
] | 2
|
2016-10-24T09:35:24.000Z
|
2017-07-28T08:50:31.000Z
|
plugins/beebeeto/utils/http/__init__.py
|
sigma-random/PocHunter
|
ff2d7e745eabd81ffb77920fe00813b17fc432cf
|
[
"MIT"
] | 39
|
2016-06-13T07:47:39.000Z
|
2020-11-26T00:53:48.000Z
|
from forgeheaders import ForgeHeaders, Win, IOS, Android, Linux, MacOS
from http import *
| 44.5
| 70
| 0.797753
| 12
| 89
| 5.916667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134831
| 89
| 2
| 71
| 44.5
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e9fca05afcb0ee1c25603b2aa4b5c7229bdb9b95
| 49
|
py
|
Python
|
develop/custom_modules/__init__.py
|
mustard-seed/SparseNN_training
|
267a1fb5bed650e66ad5cf3d98069891bb307aec
|
[
"Apache-2.0"
] | null | null | null |
develop/custom_modules/__init__.py
|
mustard-seed/SparseNN_training
|
267a1fb5bed650e66ad5cf3d98069891bb307aec
|
[
"Apache-2.0"
] | null | null | null |
develop/custom_modules/__init__.py
|
mustard-seed/SparseNN_training
|
267a1fb5bed650e66ad5cf3d98069891bb307aec
|
[
"Apache-2.0"
] | null | null | null |
#Author: James Liu <linqiao.liu@mail.utoronto.ca>
| 49
| 49
| 0.77551
| 8
| 49
| 4.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 49
| 1
| 49
| 49
| 0.826087
| 0.979592
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
18048b0aa5d03f85d7540f7736807707b2e90851
| 195
|
py
|
Python
|
frontend/urls.py
|
yalld/webgl-test
|
0b4c821f7dbcedca6e80470eb8d67cfd4261795c
|
[
"MIT"
] | null | null | null |
frontend/urls.py
|
yalld/webgl-test
|
0b4c821f7dbcedca6e80470eb8d67cfd4261795c
|
[
"MIT"
] | 7
|
2021-03-30T14:15:18.000Z
|
2022-02-19T04:49:53.000Z
|
frontend/urls.py
|
yalld/webgl-test
|
0b4c821f7dbcedca6e80470eb8d67cfd4261795c
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.urls import re_path
from frontend.views import index_view
app_name = 'frontend'
urlpatterns = [
re_path(r'^(?:.*)/?$', index_view, name='index'),
]
| 19.5
| 53
| 0.707692
| 28
| 195
| 4.75
| 0.5
| 0.150376
| 0.210526
| 0.300752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14359
| 195
| 9
| 54
| 21.666667
| 0.796407
| 0
| 0
| 0
| 0
| 0
| 0.117949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.428571
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
18084028814a8ffa15e92b101d8d2e46fda0c351
| 171
|
py
|
Python
|
src/config.py
|
juliangruendner/ketos_annotation
|
eb7f4b518132be60a08df2c47efc2c86565a32f7
|
[
"MIT"
] | null | null | null |
src/config.py
|
juliangruendner/ketos_annotation
|
eb7f4b518132be60a08df2c47efc2c86565a32f7
|
[
"MIT"
] | null | null | null |
src/config.py
|
juliangruendner/ketos_annotation
|
eb7f4b518132be60a08df2c47efc2c86565a32f7
|
[
"MIT"
] | null | null | null |
import os
POSTGRES_USER = str(os.getenv('POSTGRES_USER', ''))
POSTGRES_PASSWORD = str(os.getenv('POSTGRES_PASSWORD', ''))
POSTGRES_DB = str(os.getenv('POSTGRES_DB', ''))
| 28.5
| 59
| 0.719298
| 23
| 171
| 5.086957
| 0.347826
| 0.128205
| 0.282051
| 0.487179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 171
| 5
| 60
| 34.2
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.239766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.25
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
18217cab25ca3cef00f09afa9b2a8a1fe4ae975a
| 214
|
py
|
Python
|
bridger/display/__init__.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 2
|
2020-03-17T00:53:23.000Z
|
2020-07-16T07:00:33.000Z
|
bridger/display/__init__.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 76
|
2019-12-05T01:15:57.000Z
|
2021-09-07T16:47:27.000Z
|
bridger/display/__init__.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 1
|
2020-02-05T15:09:47.000Z
|
2020-02-05T15:09:47.000Z
|
from .formatting import Condition, Formatting, FormattingRule
from .instance_display import FieldSet, InstanceDisplay, Section, SectionList
from .list_display import Calendar, Field, Legend, LegendItem, ListDisplay
| 71.333333
| 77
| 0.850467
| 23
| 214
| 7.826087
| 0.73913
| 0.144444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093458
| 214
| 3
| 78
| 71.333333
| 0.927835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
18674b34c9a83175605866d662f9ca902e2ed6ec
| 100
|
py
|
Python
|
python/testData/mover/insideStatement.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/mover/insideStatement.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/mover/insideStatement.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def foo(a, b):
if a:
pass
else:
pass
a = <caret>3
if b:
pass
| 12.5
| 16
| 0.37
| 15
| 100
| 2.466667
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.53
| 100
| 8
| 17
| 12.5
| 0.765957
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.375
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
18824315d4ba1b9978f980f3e33303a5f0cd3511
| 116
|
py
|
Python
|
mundo 1/aula 7/exer6.py
|
jonatan098/cursopython
|
6e4cbaef6229e230fdbc66d80ec1b5a089887b0d
|
[
"MIT"
] | null | null | null |
mundo 1/aula 7/exer6.py
|
jonatan098/cursopython
|
6e4cbaef6229e230fdbc66d80ec1b5a089887b0d
|
[
"MIT"
] | null | null | null |
mundo 1/aula 7/exer6.py
|
jonatan098/cursopython
|
6e4cbaef6229e230fdbc66d80ec1b5a089887b0d
|
[
"MIT"
] | 1
|
2020-02-22T17:21:05.000Z
|
2020-02-22T17:21:05.000Z
|
n = int(input('digite um numero '))
print(f'o dobro do numero {n} e {n*2} e o triplo e {n*3} e a raiz e {n**(1/2)}')
| 58
| 80
| 0.586207
| 29
| 116
| 2.344828
| 0.62069
| 0.088235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 0.189655
| 116
| 2
| 80
| 58
| 0.680851
| 0
| 0
| 0
| 0
| 0.5
| 0.74359
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a106f0aa7984f815b1c2ce217cf1e7e2a38bf027
| 137
|
py
|
Python
|
02-json-to-csv.py
|
sb-olr/C2C-Shipping-ETL-Analysis
|
043640e58df9ca58ea361d9d8df37b4274540428
|
[
"Apache-2.0"
] | null | null | null |
02-json-to-csv.py
|
sb-olr/C2C-Shipping-ETL-Analysis
|
043640e58df9ca58ea361d9d8df37b4274540428
|
[
"Apache-2.0"
] | null | null | null |
02-json-to-csv.py
|
sb-olr/C2C-Shipping-ETL-Analysis
|
043640e58df9ca58ea361d9d8df37b4274540428
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Usage: ./02-json-to-csv.py
import my_utils
from config import CONFIG
my_utils.generate_csv_files(archive=True)
| 19.571429
| 41
| 0.781022
| 24
| 137
| 4.291667
| 0.791667
| 0.135922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024194
| 0.094891
| 137
| 6
| 42
| 22.833333
| 0.806452
| 0.350365
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a11cc56177ca425de270255de4da4b2e6e7008b3
| 52
|
py
|
Python
|
cobrakbase/core/kbasebiochem/__init__.py
|
Fxe/cobrakbase
|
a41c142b0808b4ded16b400167c70b2466eebd85
|
[
"MIT"
] | 3
|
2018-11-28T12:48:54.000Z
|
2022-02-28T22:20:32.000Z
|
cobrakbase/core/kbasebiochem/__init__.py
|
Fxe/cobrakbase
|
a41c142b0808b4ded16b400167c70b2466eebd85
|
[
"MIT"
] | 2
|
2020-06-26T20:13:16.000Z
|
2020-10-27T05:10:34.000Z
|
cobrakbase/core/kbasebiochem/__init__.py
|
Fxe/cobrakbase
|
a41c142b0808b4ded16b400167c70b2466eebd85
|
[
"MIT"
] | 1
|
2020-09-02T17:40:34.000Z
|
2020-09-02T17:40:34.000Z
|
from cobrakbase.core.kbasebiochem.media import Media
| 52
| 52
| 0.884615
| 7
| 52
| 6.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057692
| 52
| 1
| 52
| 52
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a18621e5ab043a19831adff810a4215ae534afc4
| 80
|
py
|
Python
|
authorization/__init__.py
|
edubadges/django_ims_toolbox
|
b1c5a79e62eac951780e89924c098b00402b5591
|
[
"MIT"
] | 1
|
2019-01-20T23:00:40.000Z
|
2019-01-20T23:00:40.000Z
|
authorization/__init__.py
|
edubadges/django_ims_toolbox
|
b1c5a79e62eac951780e89924c098b00402b5591
|
[
"MIT"
] | 1
|
2018-12-19T06:51:07.000Z
|
2018-12-19T06:51:07.000Z
|
authorization/__init__.py
|
edubadges/django_ims_toolbox
|
b1c5a79e62eac951780e89924c098b00402b5591
|
[
"MIT"
] | 2
|
2018-12-22T20:19:13.000Z
|
2020-09-02T07:32:21.000Z
|
from ims.authorization.oauth_1 import LTIRequestValidator, LTIRemoteUserBackend
| 40
| 79
| 0.9
| 8
| 80
| 8.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013333
| 0.0625
| 80
| 1
| 80
| 80
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a19f1d08c47859b8d8d46012df013e6e807b6d61
| 91,298
|
py
|
Python
|
position/controllers.py
|
josephevans/WeVoteServer
|
137f3037ba9984e036eb146aeeeecb4e979c21e1
|
[
"MIT"
] | null | null | null |
position/controllers.py
|
josephevans/WeVoteServer
|
137f3037ba9984e036eb146aeeeecb4e979c21e1
|
[
"MIT"
] | null | null | null |
position/controllers.py
|
josephevans/WeVoteServer
|
137f3037ba9984e036eb146aeeeecb4e979c21e1
|
[
"MIT"
] | null | null | null |
# position/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import PositionEntered, PositionEnteredManager, PositionListManager, ANY_STANCE, NO_STANCE, \
FRIENDS_AND_PUBLIC, FRIENDS_ONLY, PUBLIC_ONLY, SHOW_PUBLIC
from ballot.models import OFFICE, CANDIDATE, MEASURE
from candidate.models import CandidateCampaignManager
from config.base import get_environment_variable
from django.contrib import messages
from django.http import HttpResponse
from election.models import fetch_election_state
from exception.models import handle_record_not_saved_exception
from follow.models import FollowOrganizationManager, FollowOrganizationList
from measure.models import ContestMeasureManager
from office.models import ContestOfficeManager
from organization.models import OrganizationManager
import json
import requests
from voter.models import fetch_voter_id_from_voter_device_link, VoterManager
from voter_guide.models import ORGANIZATION, PUBLIC_FIGURE, VOTER, UNKNOWN_VOTER_GUIDE
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, is_voter_device_id_valid, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY")
POSITIONS_SYNC_URL = get_environment_variable("POSITIONS_SYNC_URL")
# We retrieve from only one of the two possible variables
def position_retrieve_for_api(position_we_vote_id, voter_device_id): # positionRetrieve
position_we_vote_id = position_we_vote_id.strip().lower()
# TODO for certain positions (voter positions), we need to restrict the retrieve based on voter_device_id / voter_id
if voter_device_id:
pass
we_vote_id = position_we_vote_id.strip().lower()
if not positive_value_exists(position_we_vote_id):
json_data = {
'status': "POSITION_RETRIEVE_BOTH_IDS_MISSING",
'success': False,
'position_we_vote_id': position_we_vote_id,
'ballot_item_display_name': '',
'speaker_display_name': '',
'speaker_image_url_https': '',
'speaker_twitter_handle': '',
'is_support': False,
'is_positive_rating': False,
'is_support_or_positive_rating': False,
'is_oppose': False,
'is_negative_rating': False,
'is_oppose_or_negative_rating': False,
'is_information_only': False,
'organization_we_vote_id': '',
'google_civic_election_id': '',
'voter_id': 0,
'office_we_vote_id': '',
'candidate_we_vote_id': '',
'measure_we_vote_id': '',
'stance': '',
'statement_text': '',
'statement_html': '',
'more_info_url': '',
'vote_smart_rating': '',
'vote_smart_time_span': '',
'last_updated': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionEnteredManager()
organization_id = 0
organization_we_vote_id = ''
contest_office_id = 0
candidate_campaign_id = 0
contest_measure_id = 0
position_voter_id = 0
results = position_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, position_voter_id,
contest_office_id, candidate_campaign_id, contest_measure_id)
if results['position_found']:
position = results['position']
json_data = {
'success': True,
'status': results['status'],
'position_we_vote_id': position.we_vote_id,
'ballot_item_display_name': position.ballot_item_display_name,
'speaker_display_name': position.speaker_display_name,
'speaker_image_url_https': position.speaker_image_url_https,
'speaker_twitter_handle': position.speaker_twitter_handle,
'is_support': results['is_support'],
'is_positive_rating': results['is_positive_rating'],
'is_support_or_positive_rating': results['is_support_or_positive_rating'],
'is_oppose': results['is_oppose'],
'is_negative_rating': results['is_negative_rating'],
'is_oppose_or_negative_rating': results['is_oppose_or_negative_rating'],
'is_information_only': results['is_information_only'],
'organization_we_vote_id': position.organization_we_vote_id,
'google_civic_election_id': position.google_civic_election_id,
'voter_id': position.voter_id,
'office_we_vote_id': '', # position.office_we_vote_id,
'candidate_we_vote_id': position.candidate_campaign_we_vote_id,
'measure_we_vote_id': position.contest_measure_we_vote_id,
'stance': position.stance,
'statement_text': position.statement_text,
'statement_html': position.statement_html,
'more_info_url': position.more_info_url,
'vote_smart_rating': position.vote_smart_rating,
'vote_smart_time_span': position.vote_smart_time_span,
'last_updated': position.last_updated(),
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
json_data = {
'status': results['status'],
'success': results['success'],
'position_we_vote_id': we_vote_id,
'ballot_item_display_name': '',
'speaker_display_name': '',
'speaker_image_url_https': '',
'speaker_twitter_handle': '',
'is_support': False,
'is_positive_rating': False,
'is_support_or_positive_rating': False,
'is_oppose': False,
'is_negative_rating': False,
'is_oppose_or_negative_rating': False,
'is_information_only': False,
'organization_we_vote_id': '',
'google_civic_election_id': '',
'voter_id': 0,
'office_we_vote_id': '',
'candidate_we_vote_id': '',
'measure_we_vote_id': '',
'stance': NO_STANCE,
'statement_text': '',
'statement_html': '',
'more_info_url': '',
'vote_smart_rating': '',
'vote_smart_time_span': '',
'last_updated': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def position_save_for_api( # positionSave
voter_device_id, position_we_vote_id,
organization_we_vote_id,
public_figure_we_vote_id,
voter_we_vote_id,
google_civic_election_id,
ballot_item_display_name,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
stance,
set_as_public_position,
statement_text,
statement_html,
more_info_url
):
position_we_vote_id = position_we_vote_id.strip().lower()
existing_unique_identifier_found = positive_value_exists(position_we_vote_id)
new_unique_identifier_found = positive_value_exists(organization_we_vote_id) \
and positive_value_exists(google_civic_election_id) and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
unique_identifier_found = existing_unique_identifier_found or new_unique_identifier_found
# We must have these variables in order to create a new entry
required_variables_for_new_entry = positive_value_exists(organization_we_vote_id) \
and positive_value_exists(google_civic_election_id) and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
if not unique_identifier_found:
results = {
'status': "POSITION_REQUIRED_UNIQUE_IDENTIFIER_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'position_we_vote_id': position_we_vote_id,
'new_position_created': False,
'ballot_item_display_name': ballot_item_display_name,
'speaker_display_name': '',
'speaker_image_url_https': '',
'speaker_twitter_handle': '',
'is_support': False,
'is_positive_rating': False,
'is_support_or_positive_rating': False,
'is_oppose': False,
'is_negative_rating': False,
'is_oppose_or_negative_rating': False,
'is_information_only': False,
'is_public_position': False,
'organization_we_vote_id': organization_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'state_code': '',
'voter_id': 0,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'stance': stance,
'statement_text': statement_text,
'statement_html': statement_html,
'more_info_url': more_info_url,
'last_updated': '',
}
return results
elif not existing_unique_identifier_found and not required_variables_for_new_entry:
results = {
'status': "NEW_POSITION_REQUIRED_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'position_we_vote_id': position_we_vote_id,
'new_position_created': False,
'ballot_item_display_name': ballot_item_display_name,
'speaker_display_name': '',
'speaker_image_url_https': '',
'speaker_twitter_handle': '',
'is_support': False,
'is_positive_rating': False,
'is_support_or_positive_rating': False,
'is_oppose': False,
'is_negative_rating': False,
'is_oppose_or_negative_rating': False,
'is_information_only': False,
'is_public_position': False,
'organization_we_vote_id': organization_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'state_code': '',
'voter_id': 0,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'stance': stance,
'statement_text': statement_text,
'statement_html': statement_html,
'more_info_url': more_info_url,
'last_updated': '',
}
return results
# Look up the state_code from the election
state_code = fetch_election_state(google_civic_election_id)
position_manager = PositionEnteredManager()
save_results = position_manager.update_or_create_position(
position_we_vote_id=position_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
public_figure_we_vote_id=public_figure_we_vote_id,
voter_we_vote_id=voter_we_vote_id,
google_civic_election_id=google_civic_election_id,
state_code=state_code,
ballot_item_display_name=ballot_item_display_name,
office_we_vote_id=office_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_we_vote_id=measure_we_vote_id,
stance=stance,
set_as_public_position=set_as_public_position,
statement_text=statement_text,
statement_html=statement_html,
more_info_url=more_info_url,
)
if save_results['success']:
position = save_results['position']
results = {
'success': save_results['success'],
'status': save_results['status'],
'voter_device_id': voter_device_id,
'position_we_vote_id': position.we_vote_id,
'new_position_created': save_results['new_position_created'],
'ballot_item_display_name': position.ballot_item_display_name,
'speaker_display_name': position.speaker_display_name,
'speaker_image_url_https': position.speaker_image_url_https,
'speaker_twitter_handle': position.speaker_twitter_handle,
'is_support': position.is_support(),
'is_positive_rating': position.is_positive_rating(),
'is_support_or_positive_rating': position.is_support_or_positive_rating(),
'is_oppose': position.is_oppose(),
'is_negative_rating': position.is_negative_rating(),
'is_oppose_or_negative_rating': position.is_oppose_or_negative_rating(),
'is_information_only': position.is_information_only(),
'is_public_position': position.is_public_position,
'organization_we_vote_id': position.organization_we_vote_id,
'google_civic_election_id': position.google_civic_election_id,
'state_code': position.state_code,
'voter_id': position.voter_id,
'office_we_vote_id': '', # position.office_we_vote_id,
'candidate_we_vote_id': position.candidate_campaign_we_vote_id,
'measure_we_vote_id': position.contest_measure_we_vote_id,
'stance': position.stance,
'statement_text': position.statement_text,
'statement_html': position.statement_html,
'more_info_url': position.more_info_url,
'last_updated': position.last_updated(),
}
return results
else:
results = {
'success': False,
'status': save_results['status'],
'voter_device_id': voter_device_id,
'position_we_vote_id': position_we_vote_id,
'new_position_created': False,
'ballot_item_display_name': '',
'speaker_display_name': '',
'speaker_image_url_https': '',
'speaker_twitter_handle': '',
'is_support': False,
'is_positive_rating': False,
'is_support_or_positive_rating': False,
'is_oppose': False,
'is_negative_rating': False,
'is_oppose_or_negative_rating': False,
'is_information_only': False,
'is_public_position': False,
'organization_we_vote_id': organization_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'voter_id': 0,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'stance': stance,
'statement_text': statement_text,
'statement_html': statement_html,
'more_info_url': more_info_url,
'last_updated': '',
}
return results
def position_list_for_ballot_item_for_api(voter_device_id, friends_vs_public, # positionListForBallotItem
office_id, office_we_vote_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id,
stance_we_are_looking_for=ANY_STANCE,
show_positions_this_voter_follows=True):
"""
We want to return a JSON file with the position identifiers from orgs, friends and public figures the voter follows
This list of information is used to retrieve the detailed information
"""
status = ""
success = False
position_manager = PositionEnteredManager()
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
position_list = []
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
'count': 0,
'kind_of_ballot_item': "UNKNOWN",
'ballot_item_id': 0,
'position_list': position_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if voter_results['voter_found']:
voter = voter_results['voter']
voter_id = voter.id
voter_we_vote_id = voter.we_vote_id
else:
voter_id = 0
voter_we_vote_id = ""
if not positive_value_exists(voter_id):
position_list = []
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
'count': 0,
'kind_of_ballot_item': "UNKNOWN",
'ballot_item_id': 0,
'position_list': position_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
# If we are looking for positions that the voter is following, we can also show friend's opinions
# If show_positions_this_voter_follows = False, then we are looking for positions we can follow
retrieve_friends_positions = friends_vs_public in (FRIENDS_ONLY, FRIENDS_AND_PUBLIC) \
and show_positions_this_voter_follows
retrieve_public_positions = friends_vs_public in (PUBLIC_ONLY, FRIENDS_AND_PUBLIC)
friends_we_vote_id_list = [] # TODO DALE We need to pass in the voter's list of friends
# Add yourself as a friend so your opinions show up
friends_we_vote_id_list.append(voter_we_vote_id)
position_list_manager = PositionListManager()
ballot_item_found = False
friends_positions_list = []
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
kind_of_ballot_item = CANDIDATE
############################
# Retrieve public positions
if retrieve_public_positions:
retrieve_public_positions_now = True # The alternate is positions for friends-only
return_only_latest_position_per_speaker = True
public_positions_list = position_list_manager.retrieve_all_positions_for_candidate_campaign(
retrieve_public_positions_now, candidate_id, candidate_we_vote_id, stance_we_are_looking_for,
return_only_latest_position_per_speaker)
is_public_position_setting = True
public_positions_list = position_list_manager.add_is_public_position(public_positions_list,
is_public_position_setting)
else:
public_positions_list = []
##################################
# Now retrieve friend's positions
if retrieve_friends_positions:
retrieve_public_positions_now = False # This being False means: "Positions from friends-only"
return_only_latest_position_per_speaker = True
friends_positions_list = position_list_manager.retrieve_all_positions_for_candidate_campaign(
retrieve_public_positions_now, candidate_id, candidate_we_vote_id, stance_we_are_looking_for,
return_only_latest_position_per_speaker, friends_we_vote_id_list)
# Now add is_public_position to each value
is_public_position_setting = False
friends_positions_list = position_list_manager.add_is_public_position(friends_positions_list,
is_public_position_setting)
else:
friends_positions_list = []
# Since we want to return the id and we_vote_id for this ballot item, and we don't know for sure that
# there are any positions for this ballot_item (which would include both the id and we_vote_id),
# we retrieve the following so we can get the ballot item's id and we_vote_id (per the request of
# the WebApp team)
candidate_campaign_manager = CandidateCampaignManager()
if positive_value_exists(candidate_id):
results = candidate_campaign_manager.retrieve_candidate_campaign_from_id(candidate_id)
else:
results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(candidate_we_vote_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
ballot_item_id = candidate_campaign.id
ballot_item_we_vote_id = candidate_campaign.we_vote_id
ballot_item_found = True
else:
ballot_item_id = candidate_id
ballot_item_we_vote_id = candidate_we_vote_id
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
kind_of_ballot_item = MEASURE
############################
# Retrieve public positions
if retrieve_public_positions:
retrieve_public_positions_now = True # The alternate is positions for friends-only
return_only_latest_position_per_speaker = True
public_positions_list = position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now,
measure_id, measure_we_vote_id, stance_we_are_looking_for,
return_only_latest_position_per_speaker)
is_public_position_setting = True
public_positions_list = position_list_manager.add_is_public_position(public_positions_list,
is_public_position_setting)
else:
public_positions_list = []
##################################
# Now retrieve friend's positions
if retrieve_friends_positions:
retrieve_public_positions_now = False # This being False means: "Positions from friends-only"
return_only_latest_position_per_speaker = True
friends_positions_list = position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now,
measure_id, measure_we_vote_id, stance_we_are_looking_for,
return_only_latest_position_per_speaker, friends_we_vote_id_list)
is_public_position_setting = False
friends_positions_list = position_list_manager.add_is_public_position(friends_positions_list,
is_public_position_setting)
else:
friends_positions_list = []
# Since we want to return the id and we_vote_id, and we don't know for sure that there are any positions
# for this ballot_item, we retrieve the following so we can get the id and we_vote_id (per the request of
# the WebApp team)
contest_measure_manager = ContestMeasureManager()
if positive_value_exists(measure_id):
results = contest_measure_manager.retrieve_contest_measure_from_id(measure_id)
else:
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
ballot_item_id = contest_measure.id
ballot_item_we_vote_id = contest_measure.we_vote_id
ballot_item_found = True
else:
ballot_item_id = measure_id
ballot_item_we_vote_id = measure_we_vote_id
elif positive_value_exists(office_id) or positive_value_exists(office_we_vote_id):
public_positions_list = position_list_manager.retrieve_all_positions_for_contest_office(
office_id, office_we_vote_id, stance_we_are_looking_for)
kind_of_ballot_item = OFFICE
# Since we want to return the id and we_vote_id, and we don't know for sure that there are any positions
# for this ballot_item, we retrieve the following so we can get the id and we_vote_id (per the request of
# the WebApp team)
contest_office_manager = ContestOfficeManager()
if positive_value_exists(office_id):
results = contest_office_manager.retrieve_contest_office_from_id(office_id)
else:
results = contest_office_manager.retrieve_contest_office_from_we_vote_id(office_we_vote_id)
if results['contest_office_found']:
contest_office = results['contest_office']
ballot_item_id = contest_office.id
ballot_item_we_vote_id = contest_office.we_vote_id
ballot_item_found = True
else:
ballot_item_id = office_id
ballot_item_we_vote_id = office_we_vote_id
else:
position_list = []
json_data = {
'status': 'POSITION_LIST_RETRIEVE_MISSING_BALLOT_ITEM_ID',
'success': False,
'count': 0,
'kind_of_ballot_item': "UNKNOWN",
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'position_list': position_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if not ballot_item_found:
position_list = []
json_data = {
'status': 'POSITION_LIST_RETRIEVE_BALLOT_ITEM_NOT_FOUND',
'success': False,
'count': 0,
'kind_of_ballot_item': "UNKNOWN",
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'position_list': position_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if len(public_positions_list):
follow_organization_list_manager = FollowOrganizationList()
organizations_followed_by_voter = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id_simple_id_array(voter_id)
if show_positions_this_voter_follows:
position_objects = position_list_manager.calculate_positions_followed_by_voter(
voter_id, public_positions_list, organizations_followed_by_voter)
status = 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_FOLLOWED'
success = True
else:
position_objects = position_list_manager.calculate_positions_not_followed_by_voter(
public_positions_list, organizations_followed_by_voter)
status = 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_NOT_FOLLOWED'
success = True
else:
position_objects = []
if len(friends_positions_list):
position_objects = friends_positions_list + position_objects
positions_count = len(position_objects)
# We need the linked_organization_we_vote_id so we can remove the viewer's positions from the list.
linked_organization_we_vote_id = ""
if positive_value_exists(positions_count):
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
if results['voter_found']:
voter = results['voter']
linked_organization_we_vote_id = voter.linked_organization_we_vote_id
position_list = []
for one_position in position_objects:
# Is there sufficient information in the position to display it?
some_data_exists = True if one_position.is_support() \
or one_position.is_oppose() \
or one_position.is_information_only() \
or positive_value_exists(one_position.vote_smart_rating) \
or positive_value_exists(one_position.statement_text) \
or positive_value_exists(one_position.more_info_url) else False
if not some_data_exists:
# Skip this position if there isn't any data to display
continue
# Whose position is it?
if positive_value_exists(one_position.organization_we_vote_id):
if linked_organization_we_vote_id == one_position.organization_we_vote_id:
# Do not show your own position on the position list, since it will be in the edit spot already
continue
speaker_type = ORGANIZATION
speaker_id = one_position.organization_id
speaker_we_vote_id = one_position.organization_we_vote_id
one_position_success = True
# Make sure we have this data to display
if not positive_value_exists(one_position.speaker_display_name) \
or not positive_value_exists(one_position.speaker_image_url_https) \
or not positive_value_exists(one_position.speaker_twitter_handle):
one_position = position_manager.refresh_cached_position_info(one_position)
speaker_display_name = one_position.speaker_display_name
elif positive_value_exists(one_position.voter_id):
if voter_id == one_position.voter_id:
# Do not show your own position on the position list, since it will be in the edit spot already
continue
speaker_type = VOTER
speaker_id = one_position.voter_id
speaker_we_vote_id = one_position.voter_we_vote_id
one_position_success = True
# Make sure we have this data to display
if not positive_value_exists(one_position.speaker_display_name) \
or not positive_value_exists(one_position.voter_we_vote_id) \
or not positive_value_exists(one_position.speaker_image_url_https) \
or not positive_value_exists(one_position.speaker_twitter_handle):
one_position = position_manager.refresh_cached_position_info(one_position)
speaker_display_name = "You"
elif positive_value_exists(one_position.public_figure_we_vote_id):
speaker_type = PUBLIC_FIGURE
speaker_id = one_position.public_figure_id
speaker_we_vote_id = one_position.public_figure_we_vote_id
one_position_success = True
# Make sure we have this data to display
if not positive_value_exists(one_position.speaker_display_name) \
or not positive_value_exists(one_position.speaker_image_url_https) \
or not positive_value_exists(one_position.speaker_twitter_handle):
one_position = position_manager.refresh_cached_position_info(one_position)
speaker_display_name = one_position.speaker_display_name
else:
speaker_type = UNKNOWN_VOTER_GUIDE
speaker_display_name = "Unknown"
speaker_id = None
speaker_we_vote_id = None
one_position_success = False
if one_position_success:
one_position_dict_for_api = {
'position_we_vote_id': one_position.we_vote_id,
'ballot_item_display_name': one_position.ballot_item_display_name,
'speaker_display_name': speaker_display_name,
'speaker_image_url_https': one_position.speaker_image_url_https,
'speaker_twitter_handle': one_position.speaker_twitter_handle,
'speaker_type': speaker_type,
'speaker_id': speaker_id,
'speaker_we_vote_id': speaker_we_vote_id,
'is_support': one_position.is_support(),
'is_positive_rating': one_position.is_positive_rating(),
'is_support_or_positive_rating': one_position.is_support_or_positive_rating(),
'is_oppose': one_position.is_oppose(),
'is_negative_rating': one_position.is_negative_rating(),
'is_oppose_or_negative_rating': one_position.is_oppose_or_negative_rating(),
'is_information_only': one_position.is_information_only(),
'is_public_position': one_position.is_public_position,
'vote_smart_rating': one_position.vote_smart_rating,
'vote_smart_time_span': one_position.vote_smart_time_span,
'statement_text': one_position.statement_text,
'more_info_url': one_position.more_info_url,
'last_updated': one_position.last_updated(),
}
position_list.append(one_position_dict_for_api)
json_data = {
'status': status,
'success': success,
'count': positions_count,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'position_list': position_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def position_list_for_opinion_maker_for_api(voter_device_id, # positionListForOpinionMaker
organization_id, organization_we_vote_id,
public_figure_id, public_figure_we_vote_id,
friends_vs_public=FRIENDS_AND_PUBLIC,
stance_we_are_looking_for=ANY_STANCE,
filter_for_voter=False,
filter_out_voter=False,
google_civic_election_id=0,
state_code=''):
"""
We want to return a JSON file with a list of positions held by orgs, and friends public figures.
We can limit the positions to friend's only if needed.
"""
is_following = False
is_ignoring = False
opinion_maker_display_name = ''
opinion_maker_image_url_https = ''
status = ''
position_list_raw = []
# Convert incoming variables to "opinion_maker"
if positive_value_exists(organization_id) or positive_value_exists(organization_we_vote_id):
kind_of_opinion_maker = ORGANIZATION
kind_of_opinion_maker_text = "ORGANIZATION" # For returning a value via the API
opinion_maker_id = organization_id
opinion_maker_we_vote_id = organization_we_vote_id
elif positive_value_exists(public_figure_id) or positive_value_exists(public_figure_we_vote_id):
kind_of_opinion_maker = PUBLIC_FIGURE
kind_of_opinion_maker_text = "PUBLIC_FIGURE"
opinion_maker_id = public_figure_id
opinion_maker_we_vote_id = public_figure_we_vote_id
else:
kind_of_opinion_maker = UNKNOWN_VOTER_GUIDE
kind_of_opinion_maker_text = "UNKNOWN_VOTER_GUIDE"
opinion_maker_id = 0
opinion_maker_we_vote_id = ''
position_manager = PositionEnteredManager()
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
position_list = []
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING_OPINION_MAKER_POSITION_LIST',
'success': False,
'count': 0,
'kind_of_opinion_maker': kind_of_opinion_maker_text,
'opinion_maker_id': opinion_maker_id,
'opinion_maker_we_vote_id': opinion_maker_we_vote_id,
'opinion_maker_display_name': opinion_maker_display_name,
'opinion_maker_image_url_https': opinion_maker_image_url_https,
'is_following': is_following,
'is_ignoring': is_ignoring,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'position_list': position_list,
'filter_for_voter': filter_for_voter,
'filter_out_voter': filter_out_voter,
'friends_vs_public': friends_vs_public,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
position_list = []
json_data = {
'status': "VALID_VOTER_ID_MISSING_OPINION_MAKER_POSITION_LIST ",
'success': False,
'count': 0,
'kind_of_opinion_maker': kind_of_opinion_maker_text,
'opinion_maker_id': opinion_maker_id,
'opinion_maker_we_vote_id': opinion_maker_we_vote_id,
'opinion_maker_display_name': opinion_maker_display_name,
'opinion_maker_image_url_https': opinion_maker_image_url_https,
'is_following': is_following,
'is_ignoring': is_ignoring,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'position_list': position_list,
'filter_for_voter': filter_for_voter,
'filter_out_voter': filter_out_voter,
'friends_vs_public': friends_vs_public,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_list_manager = PositionListManager()
opinion_maker_found = False
if kind_of_opinion_maker == ORGANIZATION:
# Since we want to return the id and we_vote_id, and we don't know for sure that there are any positions
# for this opinion_maker, we retrieve the following so we can get the id and we_vote_id (per the request of
# the WebApp team)
organization_manager = OrganizationManager()
if positive_value_exists(organization_id):
results = organization_manager.retrieve_organization_from_id(organization_id)
else:
results = organization_manager.retrieve_organization_from_we_vote_id(organization_we_vote_id)
if results['organization_found']:
organization = results['organization']
opinion_maker_id = organization.id
opinion_maker_we_vote_id = organization.we_vote_id
opinion_maker_display_name = organization.organization_name
opinion_maker_image_url_https = organization.organization_photo_url()
opinion_maker_found = True
follow_organization_manager = FollowOrganizationManager()
voter_we_vote_id = ''
following_results = follow_organization_manager.retrieve_voter_following_org_status(
voter_id, voter_we_vote_id, opinion_maker_id, opinion_maker_we_vote_id)
if following_results['is_following']:
is_following = True
elif following_results['is_ignoring']:
is_ignoring = True
position_list_raw = position_list_manager.retrieve_all_positions_for_organization(
organization_id, organization_we_vote_id, stance_we_are_looking_for, friends_vs_public,
filter_for_voter, filter_out_voter, voter_device_id, google_civic_election_id, state_code)
else:
opinion_maker_id = organization_id
opinion_maker_we_vote_id = organization_we_vote_id
elif kind_of_opinion_maker == PUBLIC_FIGURE:
position_list_raw = position_list_manager.retrieve_all_positions_for_public_figure(
public_figure_id, public_figure_we_vote_id, stance_we_are_looking_for,
filter_for_voter, filter_out_voter, voter_device_id, google_civic_election_id, state_code)
# Since we want to return the id and we_vote_id, and we don't know for sure that there are any positions
# for this opinion_maker, we retrieve the following so we can have the id and we_vote_id (per the request of
# the WebApp team)
# TODO Do we want to give public figures an entry separate from their voter account? Needs to be implemented.
# candidate_campaign_manager = CandidateCampaignManager()
# if positive_value_exists(candidate_id):
# results = candidate_campaign_manager.retrieve_candidate_campaign_from_id(candidate_id)
# else:
# results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(candidate_we_vote_id)
#
# if results['candidate_campaign_found']:
# candidate_campaign = results['candidate_campaign']
# ballot_item_id = candidate_campaign.id
# ballot_item_we_vote_id = candidate_campaign.we_vote_id
# opinion_maker_found = True
# else:
# ballot_item_id = candidate_id
# ballot_item_we_vote_id = candidate_we_vote_id
else:
position_list = []
json_data = {
'status': 'POSITION_LIST_RETRIEVE_MISSING_OPINION_MAKER_ID',
'success': False,
'count': 0,
'kind_of_opinion_maker': kind_of_opinion_maker_text,
'opinion_maker_id': opinion_maker_id,
'opinion_maker_we_vote_id': opinion_maker_we_vote_id,
'opinion_maker_display_name': opinion_maker_display_name,
'opinion_maker_image_url_https': opinion_maker_image_url_https,
'is_following': is_following,
'is_ignoring': is_ignoring,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'position_list': position_list,
'filter_for_voter': filter_for_voter,
'filter_out_voter': filter_out_voter,
'friends_vs_public': friends_vs_public,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if not opinion_maker_found:
position_list = []
json_data = {
'status': 'POSITION_LIST_RETRIEVE_OPINION_MAKER_NOT_FOUND',
'success': False,
'count': 0,
'kind_of_opinion_maker': kind_of_opinion_maker_text,
'opinion_maker_id': opinion_maker_id,
'opinion_maker_we_vote_id': opinion_maker_we_vote_id,
'opinion_maker_display_name': opinion_maker_display_name,
'opinion_maker_image_url_https': opinion_maker_image_url_https,
'is_following': is_following,
'is_ignoring': is_ignoring,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'position_list': position_list,
'filter_for_voter': filter_for_voter,
'filter_out_voter': filter_out_voter,
'friends_vs_public': friends_vs_public,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_list = []
for one_position in position_list_raw:
# Whose position is it?
missing_ballot_item_image = False
missing_office_information = False
if positive_value_exists(one_position.candidate_campaign_we_vote_id):
kind_of_ballot_item = CANDIDATE
ballot_item_id = one_position.candidate_campaign_id
ballot_item_we_vote_id = one_position.candidate_campaign_we_vote_id
if not positive_value_exists(one_position.contest_office_we_vote_id) \
or not positive_value_exists(one_position.contest_office_name):
missing_office_information = True
if not positive_value_exists(one_position.ballot_item_image_url_https):
missing_ballot_item_image = True
one_position_success = True
elif positive_value_exists(one_position.contest_measure_we_vote_id):
kind_of_ballot_item = MEASURE
ballot_item_id = one_position.contest_measure_id
ballot_item_we_vote_id = one_position.contest_measure_we_vote_id
one_position_success = True
elif positive_value_exists(one_position.contest_office_we_vote_id):
kind_of_ballot_item = OFFICE
ballot_item_id = one_position.contest_office_id
ballot_item_we_vote_id = one_position.contest_office_we_vote_id
one_position_success = True
else:
kind_of_ballot_item = "UNKNOWN_BALLOT_ITEM"
ballot_item_id = None
ballot_item_we_vote_id = None
one_position_success = False
if one_position_success:
# Make sure we have this data to display. If we don't, refresh PositionEntered table from other tables.
if not positive_value_exists(one_position.ballot_item_display_name) \
or not positive_value_exists(one_position.state_code) \
or not positive_value_exists(one_position.speaker_image_url_https) \
or missing_ballot_item_image \
or missing_office_information:
one_position = position_manager.refresh_cached_position_info(one_position)
one_position_dict_for_api = {
'position_we_vote_id': one_position.we_vote_id,
'ballot_item_display_name': one_position.ballot_item_display_name, # Candidate name or Measure
'ballot_item_image_url_https': one_position.ballot_item_image_url_https,
'ballot_item_twitter_handle': one_position.ballot_item_twitter_handle,
'ballot_item_political_party': one_position.political_party,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'ballot_item_state_code': one_position.state_code,
'contest_office_id': one_position.contest_office_id,
'contest_office_we_vote_id': one_position.contest_office_we_vote_id,
'contest_office_name': one_position.contest_office_name,
'is_support': one_position.is_support(),
'is_positive_rating': one_position.is_positive_rating(),
'is_support_or_positive_rating': one_position.is_support_or_positive_rating(),
'is_oppose': one_position.is_oppose(),
'is_negative_rating': one_position.is_negative_rating(),
'is_oppose_or_negative_rating': one_position.is_oppose_or_negative_rating(),
'is_information_only': one_position.is_information_only(),
'is_public_position': one_position.is_public_position,
'speaker_display_name': one_position.speaker_display_name, # Organization name
'vote_smart_rating': one_position.vote_smart_rating,
'vote_smart_time_span': one_position.vote_smart_time_span,
'google_civic_election_id': one_position.google_civic_election_id,
'more_info_url': one_position.more_info_url,
'statement_text': one_position.statement_text,
'last_updated': one_position.last_updated(),
}
position_list.append(one_position_dict_for_api)
status += ' POSITION_LIST_FOR_OPINION_MAKER_SUCCEEDED'
success = True
json_data = {
'status': status,
'success': success,
'count': len(position_list),
'kind_of_opinion_maker': kind_of_opinion_maker_text,
'opinion_maker_id': opinion_maker_id,
'opinion_maker_we_vote_id': opinion_maker_we_vote_id,
'opinion_maker_display_name': opinion_maker_display_name,
'opinion_maker_image_url_https': opinion_maker_image_url_https,
'is_following': is_following,
'is_ignoring': is_ignoring,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'position_list': position_list,
'filter_for_voter': filter_for_voter,
'filter_out_voter': filter_out_voter,
'friends_vs_public': friends_vs_public,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def positions_import_from_sample_file(request=None): # , load_from_uri=False
"""
Get the json data, and either create new entries or update existing
:return:
"""
# Load saved json from local file
with open("position/import_data/positions_sample.json") as json_data:
structured_json = json.load(json_data)
request = None
return positions_import_from_structured_json(request, structured_json)
def positions_import_from_master_server(request, google_civic_election_id=''):
"""
Get the json data, and either create new entries or update existing
:return:
"""
messages.add_message(request, messages.INFO, "Loading Positions from We Vote Master servers")
logger.info("Loading Positions from We Vote Master servers")
# Request json file from We Vote servers
request = requests.get(POSITIONS_SYNC_URL, params={
"key": WE_VOTE_API_KEY, # This comes from an environment variable
"format": 'json',
"google_civic_election_id": google_civic_election_id,
})
structured_json = json.loads(request.text)
results = filter_positions_structured_json_for_local_duplicates(structured_json)
filtered_structured_json = results['structured_json']
duplicates_removed = results['duplicates_removed']
import_results = positions_import_from_structured_json(filtered_structured_json)
import_results['duplicates_removed'] = duplicates_removed
return import_results
def filter_positions_structured_json_for_local_duplicates(structured_json):
"""
With this function, we remove positions that seem to be duplicates, but have different we_vote_id's.
We do not check to see if we have a matching office this routine -- that is done elsewhere.
:param structured_json:
:return:
"""
duplicates_removed = 0
filtered_structured_json = []
position_list_manager = PositionListManager()
for one_position in structured_json:
we_vote_id = one_position['we_vote_id'] if 'we_vote_id' in one_position else ''
google_civic_election_id = \
one_position['google_civic_election_id'] if 'google_civic_election_id' in one_position else ''
organization_we_vote_id = \
one_position['organization_we_vote_id'] if 'organization_we_vote_id' in one_position else ''
candidate_campaign_we_vote_id = one_position['candidate_campaign_we_vote_id'] \
if 'candidate_campaign_we_vote_id' in one_position else ''
contest_measure_we_vote_id = one_position['contest_measure_we_vote_id'] \
if 'contest_measure_we_vote_id' in one_position else ''
# Check to see if there is an entry that matches in all critical ways, minus the we_vote_id
we_vote_id_from_master = we_vote_id
results = position_list_manager.retrieve_possible_duplicate_positions(
google_civic_election_id, organization_we_vote_id,
candidate_campaign_we_vote_id, contest_measure_we_vote_id,
we_vote_id_from_master)
if results['position_list_found']:
# There seems to be a duplicate already in this database using a different we_vote_id
duplicates_removed += 1
else:
filtered_structured_json.append(one_position)
positions_results = {
'success': True,
'status': "FILTER_POSITIONS_FOR_DUPLICATES_PROCESS_COMPLETE",
'duplicates_removed': duplicates_removed,
'structured_json': filtered_structured_json,
}
return positions_results
def positions_import_from_structured_json(structured_json):
positions_saved = 0
positions_updated = 0
positions_not_processed = 0
for one_position in structured_json:
# Make sure we have the minimum required variables
if positive_value_exists(one_position["we_vote_id"]) \
and (positive_value_exists(one_position["organization_we_vote_id"]) or positive_value_exists(
one_position["public_figure_we_vote_id"])) \
and positive_value_exists(one_position["candidate_campaign_we_vote_id"]):
# organization position on candidate
pass
elif positive_value_exists(one_position["we_vote_id"]) \
and (positive_value_exists(one_position["organization_we_vote_id"]) or positive_value_exists(
one_position["public_figure_we_vote_id"])) \
and positive_value_exists(one_position["contest_measure_we_vote_id"]):
# organization position on measure
pass
else:
# Note that we do not import voter_we_vote_id positions at this point because they are considered private
positions_not_processed += 1
continue
# Check to see if this position had been imported previously
position_on_stage_found = False
try:
if len(one_position["we_vote_id"]) > 0:
position_query = PositionEntered.objects.filter(we_vote_id=one_position["we_vote_id"])
if len(position_query):
position_on_stage = position_query[0]
position_on_stage_found = True
except PositionEntered.DoesNotExist as e:
pass
except Exception as e:
pass
# We need to look up the local organization_id and store for internal use
organization_id = 0
if positive_value_exists(one_position["organization_we_vote_id"]):
organization_manager = OrganizationManager()
organization_id = organization_manager.fetch_organization_id(one_position["organization_we_vote_id"])
if not positive_value_exists(organization_id):
# If an id does not exist, then we don't have this organization locally
positions_not_processed += 1
continue
elif positive_value_exists(one_position["public_figure_we_vote_id"]):
# TODO Build this for public_figure - skip for now
continue
candidate_campaign_manager = CandidateCampaignManager()
candidate_campaign_id = 0
contest_measure_id = 0
if positive_value_exists(one_position["candidate_campaign_we_vote_id"]):
# We need to look up the local candidate_campaign_id and store for internal use
candidate_campaign_id = candidate_campaign_manager.fetch_candidate_campaign_id_from_we_vote_id(
one_position["candidate_campaign_we_vote_id"])
if not positive_value_exists(candidate_campaign_id):
# If an id does not exist, then we don't have this candidate locally
positions_not_processed += 1
continue
elif positive_value_exists(one_position["contest_measure_we_vote_id"]):
contest_measure_manager = ContestMeasureManager()
contest_measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(
one_position["contest_measure_we_vote_id"])
if not positive_value_exists(contest_measure_id):
# If an id does not exist, then we don't have this measure locally
positions_not_processed += 1
continue
contest_office_id = 0
if positive_value_exists(one_position['contest_office_we_vote_id']):
# TODO
pass
politician_id = 0
if positive_value_exists(one_position['politician_we_vote_id']):
# TODO
pass
voter_id = 0
if positive_value_exists(one_position['voter_we_vote_id']):
# TODO
pass
# Find the google_civic_candidate_name so we have a backup way to link position if the we_vote_id is lost
google_civic_candidate_name = one_position["google_civic_candidate_name"] if \
"google_civic_candidate_name" in one_position else ''
if not positive_value_exists(google_civic_candidate_name):
google_civic_candidate_name = candidate_campaign_manager.fetch_google_civic_candidate_name_from_we_vote_id(
one_position["candidate_campaign_we_vote_id"])
try:
if position_on_stage_found:
# Update
position_on_stage.we_vote_id = one_position["we_vote_id"]
position_on_stage.candidate_campaign_id = candidate_campaign_id
position_on_stage.candidate_campaign_we_vote_id = one_position["candidate_campaign_we_vote_id"]
position_on_stage.contest_measure_id = contest_measure_id
position_on_stage.contest_measure_we_vote_id = one_position["contest_measure_we_vote_id"]
position_on_stage.contest_office_id = contest_office_id
position_on_stage.contest_office_we_vote_id = one_position["contest_office_we_vote_id"]
position_on_stage.date_entered = one_position["date_entered"]
position_on_stage.google_civic_candidate_name = google_civic_candidate_name
position_on_stage.google_civic_election_id = one_position["google_civic_election_id"]
position_on_stage.more_info_url = one_position["more_info_url"]
position_on_stage.organization_id = organization_id
position_on_stage.organization_we_vote_id = one_position["organization_we_vote_id"]
position_on_stage.stance = one_position["stance"]
position_on_stage.statement_text = one_position["statement_text"]
position_on_stage.statement_html = one_position["statement_html"]
else:
# Create new
position_on_stage = PositionEntered(
we_vote_id=one_position["we_vote_id"],
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=one_position["candidate_campaign_we_vote_id"],
contest_measure_id=contest_measure_id,
contest_measure_we_vote_id=one_position["contest_measure_we_vote_id"],
contest_office_id=contest_office_id,
contest_office_we_vote_id=one_position["contest_office_we_vote_id"],
date_entered=one_position["date_entered"],
google_civic_candidate_name=google_civic_candidate_name,
google_civic_election_id=one_position["google_civic_election_id"],
more_info_url=one_position["more_info_url"],
organization_id=organization_id,
organization_we_vote_id=one_position["organization_we_vote_id"],
stance=one_position["stance"],
statement_html=one_position["statement_html"],
statement_text=one_position["statement_text"],
)
position_on_stage.ballot_item_display_name = one_position["ballot_item_display_name"]
position_on_stage.ballot_item_image_url_https = one_position["ballot_item_image_url_https"]
position_on_stage.ballot_item_twitter_handle = one_position["ballot_item_twitter_handle"]
position_on_stage.from_scraper = one_position["from_scraper"]
position_on_stage.date_last_changed = one_position["date_last_changed"]
position_on_stage.organization_certified = one_position["organization_certified"]
position_on_stage.politician_id = politician_id
position_on_stage.politician_we_vote_id = one_position["politician_we_vote_id"]
position_on_stage.public_figure_we_vote_id = one_position["public_figure_we_vote_id"]
position_on_stage.speaker_display_name = one_position["speaker_display_name"]
position_on_stage.speaker_image_url_https = one_position["speaker_image_url_https"]
position_on_stage.speaker_twitter_handle = one_position["speaker_twitter_handle"]
position_on_stage.tweet_source_id = one_position["tweet_source_id"]
position_on_stage.twitter_user_entered_position = one_position["twitter_user_entered_position"]
position_on_stage.volunteer_certified = one_position["volunteer_certified"]
position_on_stage.vote_smart_rating = one_position["vote_smart_rating"]
position_on_stage.vote_smart_rating_id = one_position["vote_smart_rating_id"]
position_on_stage.vote_smart_rating_name = one_position["vote_smart_rating_name"]
position_on_stage.vote_smart_time_span = one_position["vote_smart_time_span"]
position_on_stage.voter_entering_position = one_position["voter_entering_position"]
position_on_stage.voter_id = voter_id
position_on_stage.voter_we_vote_id = one_position["voter_we_vote_id"]
position_on_stage.save()
if position_on_stage_found:
# Update
positions_updated += 1
else:
# New
positions_saved += 1
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
positions_not_processed += 1
positions_results = {
'success': True,
'status': "POSITIONS_IMPORT_PROCESS_COMPLETE",
'saved': positions_saved,
'updated': positions_updated,
'not_processed': positions_not_processed,
}
return positions_results
# We retrieve the position for this voter for one ballot item. Could just be the stance, but for now we are
# retrieving the entire position
def voter_position_retrieve_for_api(voter_device_id, office_we_vote_id, candidate_we_vote_id, measure_we_vote_id):
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
return HttpResponse(json.dumps(results['json_data']), content_type='application/json')
if positive_value_exists(office_we_vote_id):
kind_of_ballot_item = OFFICE
ballot_item_we_vote_id = office_we_vote_id
elif positive_value_exists(candidate_we_vote_id):
kind_of_ballot_item = CANDIDATE
ballot_item_we_vote_id = candidate_we_vote_id
elif positive_value_exists(measure_we_vote_id):
kind_of_ballot_item = MEASURE
ballot_item_we_vote_id = candidate_we_vote_id
else:
kind_of_ballot_item = ''
ballot_item_we_vote_id = ''
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
# Don't need is_positive_rating, is_support_or_positive_rating, is_negative_rating,
# or is_oppose_or_negative_rating
json_data = {
'status': "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID",
'success': False,
'position_we_vote_id': '',
'ballot_item_display_name': '',
'speaker_display_name': '',
'speaker_image_url_https': '',
'speaker_twitter_handle': '',
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'google_civic_election_id': '',
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'stance': '',
'statement_text': '',
'statement_html': '',
'more_info_url': '',
'last_updated': '',
'voter_device_id': voter_device_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
office_we_vote_id = office_we_vote_id.strip().lower()
candidate_we_vote_id = candidate_we_vote_id.strip().lower()
measure_we_vote_id = measure_we_vote_id.strip().lower()
if not positive_value_exists(office_we_vote_id) and \
not positive_value_exists(candidate_we_vote_id) and \
not positive_value_exists(measure_we_vote_id):
# Don't need is_positive_rating, is_support_or_positive_rating, is_negative_rating,
# or is_oppose_or_negative_rating
json_data = {
'status': "POSITION_RETRIEVE_MISSING_AT_LEAST_ONE_BALLOT_ITEM_ID",
'success': False,
'position_we_vote_id': '',
'ballot_item_display_name': '',
'speaker_display_name': '',
'speaker_image_url_https': '',
'speaker_twitter_handle': '',
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'google_civic_election_id': '',
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'stance': '',
'statement_text': '',
'statement_html': '',
'more_info_url': '',
'last_updated': '',
'voter_device_id': voter_device_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionEnteredManager()
if positive_value_exists(office_we_vote_id):
results = position_manager.retrieve_voter_contest_office_position_with_we_vote_id(
voter_id, office_we_vote_id)
elif positive_value_exists(candidate_we_vote_id):
results = position_manager.retrieve_voter_candidate_campaign_position_with_we_vote_id(
voter_id, candidate_we_vote_id)
elif positive_value_exists(measure_we_vote_id):
results = position_manager.retrieve_voter_contest_measure_position_with_we_vote_id(
voter_id, measure_we_vote_id)
if results['position_found']:
position = results['position']
# Don't need is_positive_rating, is_support_or_positive_rating, is_negative_rating,
# or is_oppose_or_negative_rating
json_data = {
'success': True,
'status': results['status'],
'position_we_vote_id': position.we_vote_id,
'ballot_item_display_name': position.ballot_item_display_name,
'speaker_display_name': position.speaker_display_name,
'speaker_image_url_https': position.speaker_image_url_https,
'speaker_twitter_handle': position.speaker_twitter_handle,
'is_support': results['is_support'],
'is_oppose': results['is_oppose'],
'is_information_only': results['is_information_only'],
'google_civic_election_id': position.google_civic_election_id,
'office_we_vote_id': position.contest_office_we_vote_id,
'candidate_we_vote_id': position.candidate_campaign_we_vote_id,
'measure_we_vote_id': position.contest_measure_we_vote_id,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'stance': position.stance,
'statement_text': position.statement_text,
'statement_html': position.statement_html,
'more_info_url': position.more_info_url,
'last_updated': position.last_updated(),
'voter_device_id': voter_device_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
# Don't need is_positive_rating, is_support_or_positive_rating, is_negative_rating,
# or is_oppose_or_negative_rating
json_data = {
'status': results['status'],
'success': True,
'position_we_vote_id': '',
'ballot_item_display_name': '',
'speaker_display_name': '',
'speaker_image_url_https': '',
'speaker_twitter_handle': '',
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'google_civic_election_id': '',
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'stance': '',
'statement_text': '',
'statement_html': '',
'more_info_url': '',
'last_updated': '',
'voter_device_id': voter_device_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
# We retrieve the position for this voter for all ballot items. Could just be the stance, but for now we are
# retrieving the entire position
def voter_all_positions_retrieve_for_api(voter_device_id, google_civic_election_id): # voterAllPositionsRetrieve
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': "VOTER_DEVICE_ID_NOT_VALID-VOTER_ALL_POSITIONS",
'success': False,
'position_list_found': False,
'position_list': [],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID-VOTER_ALL_POSITIONS",
'success': False,
'position_list_found': False,
'position_list': [],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_list_manager = PositionListManager()
voter_we_vote_id = ''
results = position_list_manager.retrieve_all_positions_for_voter_simple(voter_id, voter_we_vote_id,
google_civic_election_id)
if results['position_list_found']:
position_list = results['position_list']
json_data = {
'status': results['status'],
'success': True,
'position_list_found': True,
'position_list': position_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
json_data = {
'status': "VOTER_POSITIONS_NOT_FOUND-NONE_EXIST",
'success': True,
'position_list_found': False,
'position_list': [],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_position_comment_save_for_api( # voterPositionCommentSave
voter_device_id, position_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
statement_text,
statement_html,
):
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data_from_results = results['json_data']
json_data = {
'status': json_data_from_results['status'],
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'statement_text': statement_text,
'is_public_position': False
}
return json_data
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
voter_id = voter_results['voter_id']
if not positive_value_exists(voter_id):
json_data = {
'status': "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID-VOTER_POSITION_COMMENT",
'success': False,
'voter_device_id': voter_device_id,
'position_we_vote_id': position_we_vote_id,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'statement_text': statement_text,
'is_public_position': False
}
return json_data
voter = voter_results['voter']
position_we_vote_id = position_we_vote_id.strip().lower()
existing_unique_identifier_found = positive_value_exists(position_we_vote_id)
new_unique_identifier_found = positive_value_exists(voter_id) \
and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
unique_identifier_found = existing_unique_identifier_found or new_unique_identifier_found
# We must have these variables in order to create a new entry
required_variables_for_new_entry = positive_value_exists(voter_id) \
and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
if not unique_identifier_found:
json_data = {
'status': "POSITION_REQUIRED_UNIQUE_IDENTIFIER_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'position_we_vote_id': position_we_vote_id,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'statement_text': statement_text,
'is_public_position': False
}
return json_data
elif not existing_unique_identifier_found and not required_variables_for_new_entry:
# Don't need is_positive_rating, is_support_or_positive_rating, is_negative_rating,
# or is_oppose_or_negative_rating
json_data = {
'status': "NEW_POSITION_REQUIRED_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'position_we_vote_id': position_we_vote_id,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'statement_text': statement_text,
'is_public_position': False
}
return json_data
position_manager = PositionEnteredManager()
save_results = position_manager.update_or_create_position_comment(
position_we_vote_id=position_we_vote_id,
voter_id=voter_id,
voter_we_vote_id=voter.we_vote_id,
office_we_vote_id=office_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_we_vote_id=measure_we_vote_id,
statement_text=statement_text,
statement_html=statement_html
)
if save_results['success']:
position = save_results['position']
is_public_position = save_results['is_public_position']
if positive_value_exists(position.candidate_campaign_we_vote_id):
kind_of_ballot_item = CANDIDATE
ballot_item_id = position.candidate_campaign_id
ballot_item_we_vote_id = position.candidate_campaign_we_vote_id
elif positive_value_exists(position.contest_measure_we_vote_id):
kind_of_ballot_item = MEASURE
ballot_item_id = position.contest_measure_id
ballot_item_we_vote_id = position.contest_measure_we_vote_id
elif positive_value_exists(position.contest_office_we_vote_id):
kind_of_ballot_item = OFFICE
ballot_item_id = position.contest_office_id
ballot_item_we_vote_id = position.contest_office_we_vote_id
else:
kind_of_ballot_item = "UNKNOWN_BALLOT_ITEM"
ballot_item_id = None
ballot_item_we_vote_id = None
json_data = {
'success': save_results['success'],
'status': save_results['status'],
'voter_device_id': voter_device_id,
'position_we_vote_id': position.we_vote_id,
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'kind_of_ballot_item': kind_of_ballot_item,
'statement_text': position.statement_text,
'statement_html': position.statement_html,
'is_public_position': is_public_position
}
return json_data
else:
json_data = {
'success': False,
'status': save_results['status'],
'voter_device_id': voter_device_id,
'position_we_vote_id': '',
'ballot_item_id': 0,
'ballot_item_we_vote_id': "",
'kind_of_ballot_item': "",
'statement_text': statement_text,
'statement_html': statement_html,
'is_public_position': False
}
return json_data
def voter_position_visibility_save_for_api( # voterPositionVisibilitySave
voter_device_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
visibility_setting
):
status = "ENTERING_VOTER_POSITION_VISIBILITY"
is_public_position = None
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data_from_results = results['json_data']
json_data = {
'status': json_data_from_results['status'],
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'visibility_setting': visibility_setting,
}
return json_data
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
voter_id = voter_results['voter_id']
if not positive_value_exists(voter_id):
json_data = {
'status': "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID-VOTER_POSITION_COMMENT",
'success': False,
'voter_device_id': voter_device_id,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'visibility_setting': visibility_setting,
'is_public_position': is_public_position,
}
return json_data
voter = voter_results['voter']
unique_identifier_found = positive_value_exists(voter_id) \
and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
if not unique_identifier_found:
json_data = {
'status': "VOTER_POSITION_VISIBILITY-REQUIRED_UNIQUE_IDENTIFIER_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'visibility_setting': visibility_setting,
'is_public_position': is_public_position,
}
return json_data
switch_to_show_position_to_public = visibility_setting == SHOW_PUBLIC
switch_to_show_position_to_friends = visibility_setting == FRIENDS_ONLY
if not switch_to_show_position_to_public and not switch_to_show_position_to_friends:
json_data = {
'status': "VOTER_POSITION_VISIBILITY-NO_VISIBILITY_SETTING_PROVIDED",
'success': False,
'voter_device_id': voter_device_id,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'visibility_setting': visibility_setting,
'is_public_position': is_public_position,
}
return json_data
# Make sure we can lay our hands on the existing position entry
success = False
position_entered_manager = PositionEnteredManager()
if positive_value_exists(candidate_we_vote_id):
results = position_entered_manager.retrieve_voter_candidate_campaign_position_with_we_vote_id(
voter_id, candidate_we_vote_id)
elif positive_value_exists(measure_we_vote_id):
results = position_entered_manager.retrieve_voter_contest_measure_position_with_we_vote_id(
voter_id, measure_we_vote_id)
elif positive_value_exists(office_we_vote_id):
results = position_entered_manager.retrieve_voter_contest_office_position_with_we_vote_id(
voter_id, office_we_vote_id)
if not results['position_found']:
# If here, an existing position does not exist and a new position needs to be created
results = position_entered_manager.create_position_for_visibility_change(
voter_id, office_we_vote_id, candidate_we_vote_id, measure_we_vote_id, visibility_setting)
if results['position_found']:
is_public_position = results['is_public_position']
position = results['position']
status = results['status']
success = results['success']
else:
status = "VOTER_POSITION_VISIBILITY-POSITION_NOT_FOUND_AND_NOT_CREATED"
success = False
elif results['position_found']:
is_public_position = results['is_public_position']
position = results['position']
if positive_value_exists(switch_to_show_position_to_public):
if positive_value_exists(is_public_position):
status = "VOTER_POSITION_VISIBILITY-ALREADY_PUBLIC_POSITION"
merge_results = position_entered_manager.merge_into_public_position(position)
success = merge_results['success']
status += " " + merge_results['status']
else:
# If here, copy the position from the PositionForFriends table to the PositionEntered table
status = "VOTER_POSITION_VISIBILITY-SWITCHING_TO_PUBLIC_POSITION"
change_results = position_entered_manager.transfer_to_public_position(position)
success = change_results['success']
status += " " + change_results['status']
if success:
is_public_position = True
elif positive_value_exists(switch_to_show_position_to_friends):
if positive_value_exists(is_public_position):
# If here, copy the position from the PositionEntered to the PositionForFriends table
status = "VOTER_POSITION_VISIBILITY-SWITCHING_TO_FRIENDS_ONLY_POSITION"
change_results = position_entered_manager.transfer_to_friends_only_position(position)
success = change_results['success']
status += " " + change_results['status']
if success:
is_public_position = False
else:
status = "VOTER_POSITION_VISIBILITY-ALREADY_FRIENDS_ONLY_POSITION"
merge_results = position_entered_manager.merge_into_friends_only_position(position)
success = merge_results['success']
status += " " + merge_results['status']
else:
status = "VOTER_POSITION_VISIBILITY-POSITION_NOT_FOUND-COULD_NOT_BE_CREATED"
# If here, an existing position could not be created
position_entered_manager.create_position_for_visibility_change()
if success:
# Prepare return values
if positive_value_exists(candidate_we_vote_id):
kind_of_ballot_item = CANDIDATE
ballot_item_id = position.candidate_campaign_id
ballot_item_we_vote_id = position.candidate_campaign_we_vote_id
elif positive_value_exists(measure_we_vote_id):
kind_of_ballot_item = MEASURE
ballot_item_id = position.contest_measure_id
ballot_item_we_vote_id = measure_we_vote_id
elif positive_value_exists(office_we_vote_id):
kind_of_ballot_item = OFFICE
ballot_item_id = position.contest_office_id
ballot_item_we_vote_id = position.contest_office_we_vote_id
else:
kind_of_ballot_item = "UNKNOWN_BALLOT_ITEM"
ballot_item_id = None
ballot_item_we_vote_id = None
json_data = {
'success': success,
'status': status,
'voter_device_id': voter_device_id,
'position_we_vote_id': position.we_vote_id,
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'kind_of_ballot_item': kind_of_ballot_item,
'visibility_setting': visibility_setting,
'is_public_position': is_public_position,
}
return json_data
else:
if positive_value_exists(candidate_we_vote_id):
kind_of_ballot_item = CANDIDATE
ballot_item_id = 0
ballot_item_we_vote_id = candidate_we_vote_id
elif positive_value_exists(measure_we_vote_id):
kind_of_ballot_item = MEASURE
ballot_item_id = 0
ballot_item_we_vote_id = measure_we_vote_id
elif positive_value_exists(office_we_vote_id):
kind_of_ballot_item = OFFICE
ballot_item_id = 0
ballot_item_we_vote_id = office_we_vote_id
else:
kind_of_ballot_item = "UNKNOWN_BALLOT_ITEM"
ballot_item_id = None
ballot_item_we_vote_id = None
json_data = {
'success': success,
'status': status,
'voter_device_id': voter_device_id,
'position_we_vote_id': '',
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'kind_of_ballot_item': kind_of_ballot_item,
'visibility_setting': visibility_setting,
'is_public_position': is_public_position,
}
return json_data
| 51.551666
| 120
| 0.626071
| 10,212
| 91,298
| 5.063455
| 0.038484
| 0.055465
| 0.072871
| 0.019726
| 0.829021
| 0.783399
| 0.739228
| 0.708324
| 0.652085
| 0.622592
| 0
| 0.000904
| 0.309076
| 91,298
| 1,770
| 121
| 51.580791
| 0.818817
| 0.07952
| 0
| 0.646865
| 0
| 0
| 0.165484
| 0.071229
| 0
| 0
| 0
| 0.00226
| 0
| 1
| 0.007921
| false
| 0.005281
| 0.017822
| 0
| 0.053465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1aa2644c42d33fc16ec6f3728f4af39b511f6bd
| 123
|
py
|
Python
|
cron/admin.py
|
Littledelma/mofadog
|
5a7c6672da248e400a8a5746506a6e7b273c9510
|
[
"MIT"
] | null | null | null |
cron/admin.py
|
Littledelma/mofadog
|
5a7c6672da248e400a8a5746506a6e7b273c9510
|
[
"MIT"
] | 1
|
2021-06-08T03:28:08.000Z
|
2021-06-08T03:28:08.000Z
|
cron/admin.py
|
Littledelma/mofadog
|
5a7c6672da248e400a8a5746506a6e7b273c9510
|
[
"MIT"
] | 1
|
2021-06-08T03:23:34.000Z
|
2021-06-08T03:23:34.000Z
|
from django.contrib import admin
from .models import Cron_beat
# Register your models here.
admin.site.register(Cron_beat)
| 24.6
| 32
| 0.821138
| 19
| 123
| 5.210526
| 0.631579
| 0.161616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 123
| 5
| 33
| 24.6
| 0.908257
| 0.211382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a1c40c42bb903110c00dd53c0189fc8332e921f5
| 15,078
|
py
|
Python
|
test/test_integration_list.py
|
jdumas/conjur-api-python3
|
96e438765d580722357a070507305feafcb59d2a
|
[
"Apache-2.0"
] | 16
|
2019-05-17T15:34:59.000Z
|
2021-11-08T10:30:21.000Z
|
test/test_integration_list.py
|
jdumas/conjur-api-python3
|
96e438765d580722357a070507305feafcb59d2a
|
[
"Apache-2.0"
] | 301
|
2019-05-07T18:27:10.000Z
|
2022-01-26T13:03:49.000Z
|
test/test_integration_list.py
|
jdumas/conjur-api-python3
|
96e438765d580722357a070507305feafcb59d2a
|
[
"Apache-2.0"
] | 10
|
2019-07-30T17:00:13.000Z
|
2022-01-20T17:00:34.000Z
|
# -*- coding: utf-8 -*-
"""
CLI List Integration tests
This test file handles the main test flows for the list command
"""
import io
import os
import shutil
from contextlib import redirect_stderr
from conjur.constants import DEFAULT_CONFIG_FILE
from test.util.test_infrastructure import integration_test
from test.util.test_runners.integration_test_case import IntegrationTestCaseBase
from test.util import test_helpers as utils
# Not coverage tested since integration tests doesn't run in
# the same build step
class CliIntegrationTestList(IntegrationTestCaseBase): # pragma: no cover
def __init__(self, testname, client_params=None, environment_params=None):
super(CliIntegrationTestList, self).__init__(testname, client_params, environment_params)
# *************** HELPERS ***************
def setUp(self):
self.setup_cli_params({})
# Used to configure the CLI and login to run tests
utils.setup_cli(self)
self.invoke_cli(self.cli_auth_params,
['policy', 'replace', '-b', 'root', '-f', self.environment.path_provider.get_policy_path("list")])
# *************** TESTS ***************
@integration_test()
def test_list_without_valid_conjurrc_raises_config_exception(self):
os.remove(DEFAULT_CONFIG_FILE)
shutil.copy(self.environment.path_provider.test_missing_account_conjurrc, self.environment.path_provider.conjurrc_path)
with self.assertRaises(AssertionError):
self.invoke_cli(self.cli_auth_params, ['list'])
@integration_test()
def test_list_returns_resources(self):
output = self.invoke_cli(self.cli_auth_params, ['list'])
self.assertIn(f'[\n "{self.client_params.account}:policy:root",\n'
f' "{self.client_params.account}:user:someuser",\n'
f' "{self.client_params.account}:layer:somelayer",\n'
f' "{self.client_params.account}:group:somegroup",\n'
f' "{self.client_params.account}:host:anotherhost",\n'
f' "{self.client_params.account}:variable:one/password",\n'
f' "{self.client_params.account}:webservice:somewebservice"\n]\n', output)
@integration_test(True)
def test_list_help_returns_help_screen(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-h'])
self.assertIn("Name:\n list", output)
@integration_test(True)
def test_list_inspect_user_returns_info_on_user(self):
self.invoke_cli(self.cli_auth_params,
['policy', 'replace', '-b', 'root', '-f', self.environment.path_provider.get_policy_path("conjur")])
output = self.invoke_cli(self.cli_auth_params, ['list', '--inspect', '--kind', 'user'])
self.assertIn(f' "id": "{self.client_params.account}:user:someuser",\n'
f' "owner": "{self.client_params.account}:user:admin",\n'
f' "policy": "{self.client_params.account}:policy:root",\n'
f' "permissions": [],\n'
f' "annotations": [],\n'
f' "restricted_to": []\n'
f' }}\n'
f']\n', output)
@integration_test()
def test_list_kind_layer_returns_layers(self):
with self.assertLogs('', level='DEBUG') as mock_log:
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'layer'])
self.assertIn(f'[\n "{self.client_params.account}:layer:somelayer"\n]\n',output)
self.assertIn("Executing list command with the following constraints: {'kind': 'layer'}",
str(mock_log.output))
@integration_test(True)
def test_list_kind_user_returns_users(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'user'])
self.assertIn(f'[\n "{self.client_params.account}:user:someuser"\n]\n', output)
@integration_test(True)
def test_list_kind_nonexistent_returns_empty_list(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'nonexistentkind'])
self.assertIn(f'[]\n', output)
@integration_test(True)
def test_list_kind_variable_returns_variables(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'variable'])
self.assertIn(f'[\n "{self.client_params.account}:variable:one/password"\n]\n', output)
@integration_test(True)
def test_list_kind_group_returns_groups(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'group'])
self.assertIn(f'[\n "{self.client_params.account}:group:somegroup"\n]\n', output)
@integration_test()
def test_list_kind_policy_returns_policies(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'policy'])
self.assertIn(f'[\n "{self.client_params.account}:policy:root"\n]\n', output)
@integration_test()
def test_list_kind_policy_returns_policies_insecure(self):
self.setup_insecure()
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'policy'])
self.assertIn(f'[\n "{self.client_params.account}:policy:root"\n]\n', output)
@integration_test()
def test_list_kind_policy_returns_policies_insecure(self):
self.setup_insecure()
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'policy'])
self.assertIn(f'[\n "{self.client_params.account}:policy:root"\n]\n', output)
@integration_test(True)
def test_list_kind_short_host_returns_hosts(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'host'])
self.assertIn(f'[\n "{self.client_params.account}:host:anotherhost"\n]\n', output)
@integration_test(True)
def test_list_kind_long_host_returns_hosts(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '--kind=host'])
self.assertIn(f'[\n "{self.client_params.account}:host:anotherhost"\n]\n', output)
@integration_test(True)
def test_list_kind_webservice_returns_webservices(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-k', 'webservice'])
self.assertIn(f'[\n "{self.client_params.account}:webservice:somewebservice"\n]\n', output)
@integration_test()
def test_list_insecure_list_prints_warning_in_log(self):
with self.assertLogs('', level='DEBUG') as mock_log:
self.invoke_cli(self.cli_auth_params, ['--insecure', 'list'])
self.assertIn("Warning: Running the command with '--insecure' makes your system vulnerable to security attacks",
str(mock_log.output))
@integration_test()
def test_list_limit_short_returns_same_number_of_resources(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-l', '5'])
self.assertIn(f'[\n "{self.client_params.account}:group:somegroup",\n'
f' "{self.client_params.account}:host:anotherhost",\n'
f' "{self.client_params.account}:layer:somelayer",\n'
f' "{self.client_params.account}:policy:root",\n'
f' "{self.client_params.account}:user:someuser"\n]\n', output)
@integration_test()
def test_list_limit_long_returns_same_number_of_resources(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '--limit=5'])
self.assertIn(f'[\n "{self.client_params.account}:group:somegroup",\n'
f' "{self.client_params.account}:host:anotherhost",\n'
f' "{self.client_params.account}:layer:somelayer",\n'
f' "{self.client_params.account}:policy:root",\n'
f' "{self.client_params.account}:user:someuser"\n]\n', output)
@integration_test(True)
def test_list_limit_invalid_param_returns_empty_list(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-l', '0'], exit_code=1)
self.assertIn("422 Client Error", output)
@integration_test(True)
def test_list_limit_string_raises_error(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-l', 'somestring'], exit_code=1)
self.assertIn("422 Client Error", output)
'''
Validates that an invalid input (a negative number) raises an error
'''
@integration_test(True)
def test_list_limit_invalid_negative_param_raises_error(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-l', '-5'], exit_code=1)
self.assertIn("422 Client Error", output)
@integration_test()
def test_list_random_input_raises_error(self):
capture_stream = io.StringIO()
with redirect_stderr(capture_stream):
self.invoke_cli(self.cli_auth_params, ['list', 'someinput'], exit_code=1)
self.assertIn("Error unrecognized arguments", capture_stream.getvalue())
@integration_test(True)
def test_list_offset_short_returns_list_starting_at_param(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-o', '2'])
self.assertIn(f'[\n "{self.client_params.account}:layer:somelayer",\n'
f' "{self.client_params.account}:policy:root",\n'
f' "{self.client_params.account}:user:someuser",\n'
f' "{self.client_params.account}:variable:one/password",\n'
f' "{self.client_params.account}:webservice:somewebservice"\n]\n', output)
@integration_test()
def test_list_offset_long_returns_list_starting_at_param(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '--offset=2'])
self.assertIn(f'[\n "{self.client_params.account}:layer:somelayer",\n'
f' "{self.client_params.account}:policy:root",\n'
f' "{self.client_params.account}:user:someuser",\n'
f' "{self.client_params.account}:variable:one/password",\n'
f' "{self.client_params.account}:webservice:somewebservice"\n]\n', output)
@integration_test()
def test_list_offset_returns_list_of_all_resources(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-o', '0'])
self.assertIn(f'[\n "{self.client_params.account}:group:somegroup",\n'
f' "{self.client_params.account}:host:anotherhost",\n'
f' "{self.client_params.account}:layer:somelayer",\n'
f' "{self.client_params.account}:policy:root",\n'
f' "{self.client_params.account}:user:someuser",\n'
f' "{self.client_params.account}:variable:one/password",\n'
f' "{self.client_params.account}:webservice:somewebservice"\n]\n', output)
@integration_test(True)
def test_list_offset_negative_raises_error(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-o', '-1'], exit_code=1)
self.assertIn("422 Client Error", output)
# This tests is commented out because of a bug in server (https://github.com/cyberark/conjur/issues/1997)
# where a string is considered valid input for offset. For example, when offset=somestring a list
# of Conjur resources are returned instead of a 500 internal server error
# @integration_test(True)
# def test_list_string_offset_raises_error(self):
# output = self.invoke_cli(self.cli_auth_params, ['list', '-o', 'somestring'])
# self.assertIn(output,
# f'[\n "{self.client_params.account}:group:somegroup",\n'
# f' "{self.client_params.account}:host:anotherhost",\n'
# f' "{self.client_params.account}:layer:somelayer",\n'
# f' "{self.client_params.account}:policy:root",\n'
# f' "{self.client_params.account}:user:someuser",\n'
# f' "{self.client_params.account}:variable:one/password",\n'
# f' "{self.client_params.account}:webservice:somewebservice"\n]\n')
@integration_test(True)
def test_list_short_search_returns_list_with_param(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-s', 'someuser'])
self.assertIn(f'[\n "{self.client_params.account}:user:someuser"\n]\n', output)
@integration_test(True)
def test_list_long_search_returns_list_with_param(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '--search=someuser'])
self.assertIn(f'[\n "{self.client_params.account}:user:someuser"\n]\n', output)
@integration_test(True)
def test_list_search_nonexistent_resource_returns_empty_list(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-s', 'some'])
self.assertIn(f'[]\n', output)
@integration_test(True)
def test_list_role_short_user_returns_resources_that_can_be_viewed(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-r', f'{self.client_params.account}:user:someuser'])
self.assertIn(f'[\n "{self.client_params.account}:variable:one/password"\n]\n', output)
@integration_test(True)
def test_list_role_long_user_returns_resources_that_can_be_viewed(self):
output = self.invoke_cli(self.cli_auth_params, ['list', f'--role={self.client_params.account}:user:someuser'])
self.assertIn(f'[\n "{self.client_params.account}:variable:one/password"\n]\n', output)
@integration_test(True)
def test_list_role_nonexistent_user_returns_forbidden(self):
output = self.invoke_cli(self.cli_auth_params, ['list', '-r', f'{self.client_params.account}:user:nonexistinguser'], exit_code=1)
self.assertRegex(output, '403 Client Error')
@integration_test(True)
def test_list_combo_limit_and_kind_returns_specified_kind(self):
self.invoke_cli(self.cli_auth_params,
['policy', 'load', '-b', 'root', '-f', self.environment.path_provider.get_policy_path("conjur")])
output = self.invoke_cli(self.cli_auth_params, ['list', '-l', '2', '-k', 'host'])
self.assertIn(f'[\n "{self.client_params.account}:host:anotherhost",\n'
f' "{self.client_params.account}:host:somehost"\n]\n', output)
@integration_test()
def test_list_combo_limit_and_offset_returns_specified_list(self):
self.invoke_cli(self.cli_auth_params,
['policy', 'load', '-b', 'root', '-f', self.environment.path_provider.get_policy_path("conjur")])
output = self.invoke_cli(self.cli_auth_params, ['list', '-o', '2', '-l', '3'])
self.assertIn(f'[\n "{self.client_params.account}:host:somehost",\n'
f' "{self.client_params.account}:layer:somelayer",\n'
f' "{self.client_params.account}:policy:root"\n]\n', output)
| 54.043011
| 137
| 0.639806
| 1,891
| 15,078
| 4.835008
| 0.120571
| 0.089249
| 0.115498
| 0.166029
| 0.761238
| 0.750191
| 0.740676
| 0.713879
| 0.675599
| 0.651537
| 0
| 0.00337
| 0.212694
| 15,078
| 278
| 138
| 54.23741
| 0.766827
| 0.090065
| 0
| 0.487923
| 0
| 0
| 0.30585
| 0.217078
| 0
| 0
| 0
| 0
| 0.178744
| 1
| 0.173913
| false
| 0.033816
| 0.038647
| 0
| 0.217391
| 0.004831
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1d2a588d4942404dac45f5a8d80c3d88c6274df
| 2,612
|
py
|
Python
|
main.py
|
Chiki1601/Batman-drawing
|
216d9e4e3643c3aa92e92251f5193860f1dd606c
|
[
"Unlicense"
] | null | null | null |
main.py
|
Chiki1601/Batman-drawing
|
216d9e4e3643c3aa92e92251f5193860f1dd606c
|
[
"Unlicense"
] | null | null | null |
main.py
|
Chiki1601/Batman-drawing
|
216d9e4e3643c3aa92e92251f5193860f1dd606c
|
[
"Unlicense"
] | null | null | null |
#bat man using python
import turtle
import math
kalam = turtle.Turtle()
kalam.speed(500)
window = turtle.Screen()
window.bgcolor("#000000")
kalam.color("yellow")
ankur = 20
kalam.left(90)
kalam.penup()
kalam.goto(-7 * ankur, 0)
kalam.pendown()
for a in range(-7 * ankur, -3 * ankur, 1):
x = a / ankur
rel = math.fabs(x)
y = 1.5 * math.sqrt((-math.fabs(rel - 1)) * math.fabs(3 - rel) / ((rel - 1) * (3 - rel))) * (
1 + math.fabs(rel - 3) / (rel - 3)) * math.sqrt(1 - (x / 7) ** 2) + (
4.5 + 0.75 * (math.fabs(x - 0.5) + math.fabs(x + 0.5)) - 2.75 * (
math.fabs(x - 0.75) + math.fabs(x + 0.75))) * (1 + math.fabs(1 - rel) / (1 - rel))
kalam.goto(a, y * ankur)
for a in range(-3 * ankur, -1 * ankur - 1, 1):
x = a / ankur
rel = math.fabs(x)
y = (2.71052 + 1.5 - 0.5 * rel - 1.35526 * math.sqrt(4 - (rel - 1) ** 2)) * math.sqrt(
math.fabs(rel - 1) / (rel - 1))
kalam.goto(a, y * ankur)
kalam.goto(-1 * ankur, 3 * ankur)
kalam.goto(int(-0.5 * ankur), int(2.2 * ankur))
kalam.goto(int(0.5 * ankur), int(2.2 * ankur))
kalam.goto(1 * ankur, 3 * ankur)
print("Batman Logo with Python Turtle")
for a in range(1 * ankur + 1, 3 * ankur + 1, 1):
x = a / ankur
rel = math.fabs(x)
y = (2.71052 + 1.5 - 0.5 * rel - 1.35526 * math.sqrt(4 - (rel - 1) ** 2)) * math.sqrt(
math.fabs(rel - 1) / (rel - 1))
kalam.goto(a, y * ankur)
for a in range(3 * ankur + 1, 7 * ankur + 1, 1):
x = a / ankur
rel = math.fabs(x)
y = 1.5 * math.sqrt((-math.fabs(rel - 1)) * math.fabs(3 - rel) / ((rel - 1) * (3 - rel))) * (
1 + math.fabs(rel - 3) / (rel - 3)) * math.sqrt(1 - (x / 7) ** 2) + (
4.5 + 0.75 * (math.fabs(x - 0.5) + math.fabs(x + 0.5)) - 2.75 * (
math.fabs(x - 0.75) + math.fabs(x + 0.75))) * (1 + math.fabs(1 - rel) / (1 - rel))
kalam.goto(a, y * ankur)
for a in range(7 * ankur, 4 * ankur, -1):
x = a / ankur
rel = math.fabs(x)
y = (-3) * math.sqrt(1 - (x / 7) ** 2) * math.sqrt(math.fabs(rel - 4) / (rel - 4))
kalam.goto(a, y * ankur)
for a in range(4 * ankur, -4 * ankur, -1):
x = a / ankur
rel = math.fabs(x)
y = math.fabs(x / 2) - 0.0913722 * x ** 2 - 3 + math.sqrt(1 - (math.fabs(rel - 2) - 1) ** 2)
kalam.goto(a, y * ankur)
for a in range(-4 * ankur - 1, -7 * ankur - 1, -1):
x = a / ankur
rel = math.fabs(x)
y = (-3) * math.sqrt(1 - (x / 7) ** 2) * math.sqrt(math.fabs(rel - 4) / (rel - 4))
kalam.goto(a, y * ankur)
kalam.penup()
kalam.goto(300, 300)
turtle.done()
| 33.063291
| 106
| 0.492726
| 462
| 2,612
| 2.785714
| 0.108225
| 0.180264
| 0.111888
| 0.06216
| 0.764569
| 0.764569
| 0.746698
| 0.724942
| 0.724942
| 0.724942
| 0
| 0.105376
| 0.287902
| 2,612
| 78
| 107
| 33.487179
| 0.586559
| 0.007657
| 0
| 0.587302
| 0
| 0
| 0.016609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.031746
| 0
| 0.031746
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1d3a7445794ad7b7dfe82d5d8ee19e8703d25cb
| 40
|
py
|
Python
|
testing/assets/exp_syntax_error.py
|
ice-stuff/ice
|
151b741feb23275b4ebe40c3657df563f55cbfee
|
[
"MIT"
] | 2
|
2015-03-06T20:22:48.000Z
|
2015-09-20T10:18:07.000Z
|
testing/assets/exp_syntax_error.py
|
glestaris/ice
|
151b741feb23275b4ebe40c3657df563f55cbfee
|
[
"MIT"
] | null | null | null |
testing/assets/exp_syntax_error.py
|
glestaris/ice
|
151b741feb23275b4ebe40c3657df563f55cbfee
|
[
"MIT"
] | null | null | null |
def this_is_not valid python:
pass
| 10
| 29
| 0.725
| 7
| 40
| 3.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 40
| 3
| 30
| 13.333333
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.5
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
a1e9986e5bf06289cd4dd089dbac5d440483ac6b
| 51
|
py
|
Python
|
ucdev/cy7c65211/__init__.py
|
vpetrigo/python-ucdev
|
d3606fd1244dfefef039c7c38acb8b4a1f086c29
|
[
"MIT"
] | 11
|
2015-07-08T01:28:01.000Z
|
2022-01-26T14:29:47.000Z
|
ucdev/cy7c65211/__init__.py
|
vpetrigo/python-ucdev
|
d3606fd1244dfefef039c7c38acb8b4a1f086c29
|
[
"MIT"
] | 5
|
2017-12-07T15:04:00.000Z
|
2021-06-02T14:47:14.000Z
|
ucdev/cy7c65211/__init__.py
|
vpetrigo/python-ucdev
|
d3606fd1244dfefef039c7c38acb8b4a1f086c29
|
[
"MIT"
] | 4
|
2017-02-18T18:20:13.000Z
|
2022-03-23T16:21:20.000Z
|
# -*- coding: utf-8-unix -*-
from .device import *
| 17
| 28
| 0.588235
| 7
| 51
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.176471
| 51
| 2
| 29
| 25.5
| 0.690476
| 0.509804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b80b27d224c954b748bcc8898633150a624746ca
| 468
|
py
|
Python
|
stacks/list_stack.py
|
fgoncalves/datastructures.python
|
91afd63c27b04855715f49dc94c46772ff89bb60
|
[
"MIT"
] | null | null | null |
stacks/list_stack.py
|
fgoncalves/datastructures.python
|
91afd63c27b04855715f49dc94c46772ff89bb60
|
[
"MIT"
] | null | null | null |
stacks/list_stack.py
|
fgoncalves/datastructures.python
|
91afd63c27b04855715f49dc94c46772ff89bb60
|
[
"MIT"
] | null | null | null |
from .stack import Stack
class ListStack(Stack):
"""
A stack backed up by a list. There's no limit to the stack's capacity
"""
def __init__(self):
Stack.__init__(self)
self.elements = list()
def push(self, element):
raise NotImplementedError()
def pop(self):
raise NotImplementedError()
def peek(self):
raise NotImplementedError()
def is_empty(self):
raise NotImplementedError()
| 21.272727
| 73
| 0.619658
| 54
| 468
| 5.203704
| 0.537037
| 0.341637
| 0.288256
| 0.220641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.286325
| 468
| 22
| 74
| 21.272727
| 0.841317
| 0.147436
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.384615
| false
| 0
| 0.076923
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
62d914ab67168870b0749b74ce7386d986722857
| 55
|
py
|
Python
|
my_script.py
|
EvgSkv/toyrepo
|
da9ef8e9e770472575a1e18cbb1a239991d109b0
|
[
"Apache-2.0"
] | 1
|
2021-05-03T05:37:24.000Z
|
2021-05-03T05:37:24.000Z
|
my_script.py
|
EvgSkv/toyrepo
|
da9ef8e9e770472575a1e18cbb1a239991d109b0
|
[
"Apache-2.0"
] | null | null | null |
my_script.py
|
EvgSkv/toyrepo
|
da9ef8e9e770472575a1e18cbb1a239991d109b0
|
[
"Apache-2.0"
] | null | null | null |
print("Hello, human! ~~~")
print("How can I help you?")
| 27.5
| 28
| 0.618182
| 9
| 55
| 3.777778
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 55
| 2
| 28
| 27.5
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
62e352bdf1f384bb98d6eed0a42143cbcbd24c67
| 124
|
py
|
Python
|
server/testCGI.py
|
padlesky/Lab03
|
ec374222dcffc5cf9962771ccf2c988741c634da
|
[
"Apache-2.0"
] | null | null | null |
server/testCGI.py
|
padlesky/Lab03
|
ec374222dcffc5cf9962771ccf2c988741c634da
|
[
"Apache-2.0"
] | null | null | null |
server/testCGI.py
|
padlesky/Lab03
|
ec374222dcffc5cf9962771ccf2c988741c634da
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
print "Content-type: text/html"
print
print "<title> Test CGI </title>"
print "<p>Hellow World!</p>"
| 17.714286
| 33
| 0.677419
| 20
| 124
| 4.2
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120968
| 124
| 6
| 34
| 20.666667
| 0.770642
| 0.16129
| 0
| 0
| 0
| 0
| 0.660194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1a0fa524f864cd8514898d22cc4beedc1fb52b3a
| 63
|
py
|
Python
|
__init__.py
|
anish175/anish_outlier
|
a53d2410553d09f73bdf7d027027bf3575484abf
|
[
"MIT"
] | null | null | null |
__init__.py
|
anish175/anish_outlier
|
a53d2410553d09f73bdf7d027027bf3575484abf
|
[
"MIT"
] | null | null | null |
__init__.py
|
anish175/anish_outlier
|
a53d2410553d09f73bdf7d027027bf3575484abf
|
[
"MIT"
] | null | null | null |
from anish_101703072_outlier.outlier import outlier_row_removal
| 63
| 63
| 0.936508
| 9
| 63
| 6.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.047619
| 63
| 1
| 63
| 63
| 0.766667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a7e1d284df3568f073738af6e2ae77feb738785d
| 69
|
py
|
Python
|
helloworld.py
|
Douwanna/Douwanna
|
ebd7f47d4b25d93fed46c9d0ff6a263de26ccd56
|
[
"MIT"
] | 1
|
2018-01-24T02:47:07.000Z
|
2018-01-24T02:47:07.000Z
|
helloworld.py
|
Douwanna/Douwanna
|
ebd7f47d4b25d93fed46c9d0ff6a263de26ccd56
|
[
"MIT"
] | null | null | null |
helloworld.py
|
Douwanna/Douwanna
|
ebd7f47d4b25d93fed46c9d0ff6a263de26ccd56
|
[
"MIT"
] | null | null | null |
#coding=utf-8
print("hello world!")
print('Welcome to a new world!')
| 17.25
| 32
| 0.695652
| 12
| 69
| 4
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.115942
| 69
| 4
| 32
| 17.25
| 0.770492
| 0.173913
| 0
| 0
| 0
| 0
| 0.614035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a7f4fd4f578061b465a13b62c3d0684802480480
| 32,758
|
py
|
Python
|
tests/agent_features/test_asgi_browser.py
|
bradleycamacho/newrelic-python-agent
|
d41fb461e7a4eb428fa85b0abc21734814bd7390
|
[
"Apache-2.0"
] | null | null | null |
tests/agent_features/test_asgi_browser.py
|
bradleycamacho/newrelic-python-agent
|
d41fb461e7a4eb428fa85b0abc21734814bd7390
|
[
"Apache-2.0"
] | null | null | null |
tests/agent_features/test_asgi_browser.py
|
bradleycamacho/newrelic-python-agent
|
d41fb461e7a4eb428fa85b0abc21734814bd7390
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import pytest
import six
from testing_support.fixtures import (override_application_settings,
validate_transaction_errors, validate_custom_parameters)
from testing_support.asgi_testing import AsgiTest
from newrelic.api.application import application_settings
from newrelic.api.transaction import (get_browser_timing_header,
get_browser_timing_footer, add_custom_parameter,
disable_browser_autorum)
from newrelic.api.asgi_application import asgi_application
from newrelic.common.encoding_utils import deobfuscate
from bs4 import BeautifulSoup
_runtime_error_name = (RuntimeError.__module__ + ':' + RuntimeError.__name__)
@asgi_application()
async def target_asgi_application_manual_rum(scope, receive, send):
text = '<html><head>%s</head><body><p>RESPONSE</p>%s</body></html>'
output = (text % (get_browser_timing_header(),
get_browser_timing_footer())).encode('UTF-8')
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode('utf-8'))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_manual_rum = AsgiTest(target_asgi_application_manual_rum)
_test_footer_attributes = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': False,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_footer_attributes)
def test_footer_attributes():
settings = application_settings()
assert settings.browser_monitoring.enabled
assert settings.browser_key
assert settings.browser_monitoring.loader_version
assert settings.js_agent_loader
assert isinstance(settings.js_agent_file, six.string_types)
assert settings.beacon
assert settings.error_beacon
token = '0123456789ABCDEF'
headers = { 'Cookie': 'NRAGENT=tk=%s' % token }
response = target_application_manual_rum.get('/', headers=headers)
html = BeautifulSoup(response.body, 'html.parser')
header = html.html.head.script.string
content = html.html.body.p.string
footer = html.html.body.script.string
# Validate actual body content.
assert content == 'RESPONSE'
# Validate the insertion of RUM header.
assert header.find('NREUM HEADER') != -1
# Now validate the various fields of the footer. The fields are
# held by a JSON dictionary.
data = json.loads(footer.split('NREUM.info=')[1])
assert data['licenseKey'] == settings.browser_key
assert data['applicationID'] == settings.application_id
assert data['agent'] == settings.js_agent_file
assert data['beacon'] == settings.beacon
assert data['errorBeacon'] == settings.error_beacon
assert data['applicationTime'] >= 0
assert data['queueTime'] >= 0
obfuscation_key = settings.license_key[:13]
assert type(data['transactionName']) == type(u'')
txn_name = deobfuscate(data['transactionName'], obfuscation_key)
assert txn_name == u'WebTransaction/Uri/'
assert 'atts' not in data
_test_rum_ssl_for_http_is_none = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': False,
'browser_monitoring.ssl_for_http': None,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_rum_ssl_for_http_is_none)
def test_ssl_for_http_is_none():
settings = application_settings()
assert settings.browser_monitoring.ssl_for_http is None
response = target_application_manual_rum.get('/')
html = BeautifulSoup(response.body, 'html.parser')
footer = html.html.body.script.string
data = json.loads(footer.split('NREUM.info=')[1])
assert 'sslForHttp' not in data
_test_rum_ssl_for_http_is_true = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': False,
'browser_monitoring.ssl_for_http': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_rum_ssl_for_http_is_true)
def test_ssl_for_http_is_true():
settings = application_settings()
assert settings.browser_monitoring.ssl_for_http is True
response = target_application_manual_rum.get('/')
html = BeautifulSoup(response.body, 'html.parser')
footer = html.html.body.script.string
data = json.loads(footer.split('NREUM.info=')[1])
assert data['sslForHttp'] is True
_test_rum_ssl_for_http_is_false = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': False,
'browser_monitoring.ssl_for_http': False,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_rum_ssl_for_http_is_false)
def test_ssl_for_http_is_false():
settings = application_settings()
assert settings.browser_monitoring.ssl_for_http is False
response = target_application_manual_rum.get('/')
html = BeautifulSoup(response.body, 'html.parser')
footer = html.html.body.script.string
data = json.loads(footer.split('NREUM.info=')[1])
assert data['sslForHttp'] is False
@asgi_application()
async def target_asgi_application_yield_single_no_head(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode('utf-8'))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_yield_single_no_head = AsgiTest(
target_asgi_application_yield_single_no_head)
_test_html_insertion_yield_single_no_head_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_yield_single_no_head_settings)
def test_html_insertion_yield_single_no_head():
response = target_application_yield_single_no_head.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' in response.body
assert b'NREUM.info' in response.body
@asgi_application()
async def target_asgi_application_yield_multi_no_head(scope, receive, send):
output = [ b'<html>', b'<body><p>RESPONSE</p></body></html>' ]
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(b''.join(output))).encode('utf-8'))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
for data in output:
more_body = data is not output[-1]
await send({"type": "http.response.body", "body": data, "more_body": more_body})
target_application_yield_multi_no_head = AsgiTest(
target_asgi_application_yield_multi_no_head)
_test_html_insertion_yield_multi_no_head_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_yield_multi_no_head_settings)
def test_html_insertion_yield_multi_no_head():
response = target_application_yield_multi_no_head.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' in response.body
assert b'NREUM.info' in response.body
@asgi_application()
async def target_asgi_application_unnamed_attachment_header(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode('utf-8')),
(b'content-disposition', b'attachment')]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_unnamed_attachment_header = AsgiTest(
target_asgi_application_unnamed_attachment_header)
_test_html_insertion_unnamed_attachment_header_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_unnamed_attachment_header_settings)
def test_html_insertion_unnamed_attachment_header():
response = target_application_unnamed_attachment_header.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
assert 'content-disposition' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
@asgi_application()
async def target_asgi_application_named_attachment_header(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode('utf-8')),
(b'content-disposition', b'Attachment; filename="X"')]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_named_attachment_header = AsgiTest(
target_asgi_application_named_attachment_header)
_test_html_insertion_named_attachment_header_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_named_attachment_header_settings)
def test_html_insertion_named_attachment_header():
response = target_application_named_attachment_header.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
assert 'content-disposition' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
@asgi_application()
async def target_asgi_application_inline_attachment_header(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode('utf-8')),
(b'content-disposition', b'inline; filename="attachment"')]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_inline_attachment_header = AsgiTest(
target_asgi_application_inline_attachment_header)
_test_html_insertion_inline_attachment_header_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_inline_attachment_header_settings)
def test_html_insertion_inline_attachment_header():
response = target_application_inline_attachment_header.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
assert 'content-disposition' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' in response.body
assert b'NREUM.info' in response.body
@asgi_application()
async def target_asgi_application_empty(scope, receive, send):
status = '200 OK'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', b'0')]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body"})
target_application_empty = AsgiTest(
target_asgi_application_empty)
_test_html_insertion_empty_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_empty_settings)
def test_html_insertion_empty():
response = target_application_empty.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
assert len(response.body) == 0
@asgi_application()
async def target_asgi_application_single_empty_string(scope, receive, send):
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', b'0')]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": b""})
target_application_single_empty_string = AsgiTest(
target_asgi_application_single_empty_string)
_test_html_insertion_single_empty_string_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_single_empty_string_settings)
def test_html_insertion_single_empty_string():
response = target_application_single_empty_string.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
assert len(response.body) == 0
@asgi_application()
async def target_asgi_application_multiple_empty_string(scope, receive, send):
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', b'0')]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": b"", "more_body": True})
await send({"type": "http.response.body", "body": b""})
target_application_multiple_empty_string = AsgiTest(
target_asgi_application_multiple_empty_string)
_test_html_insertion_multiple_empty_string_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_multiple_empty_string_settings)
def test_html_insertion_multiple_empty_string():
response = target_application_multiple_empty_string.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
assert len(response.body) == 0
@asgi_application()
async def target_asgi_application_single_large_prelude(scope, receive, send):
output = 64*1024*b' ' + b'<body></body>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_single_large_prelude = AsgiTest(
target_asgi_application_single_large_prelude)
_test_html_insertion_single_large_prelude_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_single_large_prelude_settings)
def test_html_insertion_single_large_prelude():
response = target_application_single_large_prelude.get('/')
assert response.status == 200
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert 'content-type' in response.headers
assert 'content-length' in response.headers
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
output = [32*1024*b' ', 32*1024*b' ', b'<body></body>']
assert len(response.body) == len(b''.join(output))
@asgi_application()
async def target_asgi_application_multi_large_prelude(scope, receive, send):
output = [32*1024*b' ', 32*1024*b' ', b'<body></body>']
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(b''.join(output))).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
for data in output:
more_body = data is not output[-1]
await send({"type": "http.response.body", "body": data, "more_body": more_body})
target_application_multi_large_prelude = AsgiTest(
target_asgi_application_multi_large_prelude)
_test_html_insertion_multi_large_prelude_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_multi_large_prelude_settings)
def test_html_insertion_multi_large_prelude():
response = target_application_multi_large_prelude.get('/')
assert response.status == 200
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert 'content-type' in response.headers
assert 'content-length' in response.headers
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
output = [32*1024*b' ', 32*1024*b' ', b'<body></body>']
assert len(response.body) == len(b''.join(output))
@asgi_application()
async def target_asgi_application_yield_before_start(scope, receive, send):
# This is not legal but we should see what happens with our middleware
await send({"type": "http.response.body", "body": b"", "more_body": True})
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_yield_before_start = AsgiTest(
target_asgi_application_yield_before_start)
_test_html_insertion_yield_before_start_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_yield_before_start_settings)
def test_html_insertion_yield_before_start():
# The application should complete as pass through, but an assertion error
# would be raised in the AsgiTest class
with pytest.raises(AssertionError):
target_application_yield_before_start.get('/')
@asgi_application()
async def target_asgi_application_start_yield_start(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": b""})
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
target_application_start_yield_start = AsgiTest(
target_asgi_application_start_yield_start)
_test_html_insertion_start_yield_start_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_start_yield_start_settings)
def test_html_insertion_start_yield_start():
# The application should complete as pass through, but an assertion error
# would be raised in the AsgiTest class
with pytest.raises(AssertionError):
target_application_start_yield_start.get('/')
@asgi_application()
async def target_asgi_application_invalid_content_length(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', b'XXX')]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_invalid_content_length = AsgiTest(
target_asgi_application_invalid_content_length)
_test_html_insertion_invalid_content_length_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_invalid_content_length_settings)
def test_html_insertion_invalid_content_length():
response = target_application_invalid_content_length.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
assert response.headers['content-length'] == 'XXX'
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
@asgi_application()
async def target_asgi_application_content_encoding(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode("utf-8")),
(b'content-encoding', b'identity')]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_content_encoding = AsgiTest(
target_asgi_application_content_encoding)
_test_html_insertion_content_encoding_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_content_encoding_settings)
def test_html_insertion_content_encoding():
response = target_application_content_encoding.get('/')
assert response.status == 200
# Technically 'identity' should not be used in Content-Encoding
# but clients will still accept it. Use this fact to disable auto
# RUM for this test. Other option is to compress the response
# and use 'gzip'.
assert 'content-type' in response.headers
assert 'content-length' in response.headers
assert response.headers['content-encoding'] == 'identity'
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
@asgi_application()
async def target_asgi_application_no_content_type(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_no_content_type = AsgiTest(
target_asgi_application_no_content_type)
_test_html_insertion_no_content_type_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_no_content_type_settings)
def test_html_insertion_no_content_type():
response = target_application_no_content_type.get('/')
assert response.status == 200
assert 'content-type' not in response.headers
assert 'content-length' in response.headers
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
@asgi_application()
async def target_asgi_application_plain_text(scope, receive, send):
output = b'RESPONSE'
response_headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_plain_text = AsgiTest(
target_asgi_application_plain_text)
_test_html_insertion_plain_text_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_plain_text_settings)
def test_html_insertion_plain_text():
response = target_application_plain_text.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
@asgi_application()
async def target_asgi_application_param_on_close(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [
(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
try:
await send({"type": "http.response.body", "body": output})
return
finally:
add_custom_parameter('key', 'value')
target_application_param_on_close = AsgiTest(
target_asgi_application_param_on_close)
_test_html_insertion_param_on_close_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_param_on_close_settings)
@validate_custom_parameters(required_params=[('key', 'value')])
def test_html_insertion_param_on_close():
response = target_application_param_on_close.get('/')
assert response.status == 200
assert b'NREUM HEADER' in response.body
assert b'NREUM.info' in response.body
@asgi_application()
async def target_asgi_application_param_on_error(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
response_headers = [
(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
try:
raise RuntimeError('ERROR')
finally:
add_custom_parameter('key', 'value')
target_application_param_on_error = AsgiTest(
target_asgi_application_param_on_error)
_test_html_insertion_param_on_error_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(_test_html_insertion_param_on_error_settings)
@validate_transaction_errors(errors=[_runtime_error_name])
@validate_custom_parameters(required_params=[('key', 'value')])
def test_html_insertion_param_on_error():
try:
target_application_param_on_error.get('/')
except RuntimeError:
pass
@asgi_application()
async def target_asgi_application_disable_autorum_via_api(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
disable_browser_autorum()
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_disable_autorum_via_api = AsgiTest(
target_asgi_application_disable_autorum_via_api)
_test_html_insertion_disable_autorum_via_api_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_disable_autorum_via_api_settings)
def test_html_insertion_disable_autorum_via_api():
response = target_application_disable_autorum_via_api.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
@asgi_application()
async def target_asgi_application_manual_rum_insertion(scope, receive, send):
output = b'<html><body><p>RESPONSE</p></body></html>'
header = get_browser_timing_header()
footer = get_browser_timing_footer()
header = get_browser_timing_header()
footer = get_browser_timing_footer()
assert header == ''
assert footer == ''
response_headers = [(b'content-type', b'text/html; charset=utf-8'),
(b'content-length', str(len(output)).encode("utf-8"))]
await send({"type": "http.response.start", "status": 200, "headers": response_headers})
await send({"type": "http.response.body", "body": output})
target_application_manual_rum_insertion = AsgiTest(
target_asgi_application_manual_rum_insertion)
_test_html_insertion_manual_rum_insertion_settings = {
'browser_monitoring.enabled': True,
'browser_monitoring.auto_instrument': True,
'js_agent_loader': u'<!-- NREUM HEADER -->',
}
@override_application_settings(
_test_html_insertion_manual_rum_insertion_settings)
def test_html_insertion_manual_rum_insertion():
response = target_application_manual_rum_insertion.get('/')
assert response.status == 200
assert 'content-type' in response.headers
assert 'content-length' in response.headers
# The 'NREUM HEADER' value comes from our override for the header.
# The 'NREUM.info' value comes from the programmatically generated
# footer added by the agent.
assert b'NREUM HEADER' not in response.body
assert b'NREUM.info' not in response.body
| 37.480549
| 91
| 0.723335
| 4,201
| 32,758
| 5.361343
| 0.062128
| 0.053279
| 0.045287
| 0.033211
| 0.879279
| 0.841229
| 0.767482
| 0.711761
| 0.690583
| 0.678107
| 0
| 0.008501
| 0.156115
| 32,758
| 873
| 92
| 37.523482
| 0.806251
| 0.094175
| 0
| 0.560684
| 0
| 0.001709
| 0.235832
| 0.073624
| 0
| 0
| 0
| 0
| 0.208547
| 1
| 0.041026
| false
| 0.001709
| 0.018803
| 0
| 0.061538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a7fba8dd77e87ad1b06a627871af64f972741a90
| 69
|
py
|
Python
|
SVM Classification/svm_code/svm_class/__init__.py
|
herr-kistler/machine-learning
|
1ec88fbafec6a0ccbfe788b462c804b8e981d851
|
[
"MIT"
] | 1
|
2020-04-08T06:23:47.000Z
|
2020-04-08T06:23:47.000Z
|
SVM Classification/svm_code/svm_class/__init__.py
|
herr-kistler/machine-learning
|
1ec88fbafec6a0ccbfe788b462c804b8e981d851
|
[
"MIT"
] | null | null | null |
SVM Classification/svm_code/svm_class/__init__.py
|
herr-kistler/machine-learning
|
1ec88fbafec6a0ccbfe788b462c804b8e981d851
|
[
"MIT"
] | null | null | null |
from .svm import SVMTrainer, SVMPredictor
from .kernel import Kernel
| 23
| 41
| 0.826087
| 9
| 69
| 6.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 69
| 2
| 42
| 34.5
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c502ce08bcf5237aa0039d5c0c80fa59fe3c098c
| 127
|
py
|
Python
|
disvoice/articulation/__init__.py
|
jcvasquezc/Disvoice
|
ed9dbd42c3f01f041f90848f96004be8ebb78d8d
|
[
"MIT"
] | null | null | null |
disvoice/articulation/__init__.py
|
jcvasquezc/Disvoice
|
ed9dbd42c3f01f041f90848f96004be8ebb78d8d
|
[
"MIT"
] | null | null | null |
disvoice/articulation/__init__.py
|
jcvasquezc/Disvoice
|
ed9dbd42c3f01f041f90848f96004be8ebb78d8d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .articulation import Articulation
from .articulation_functions import *
__all__=["Articulation"]
| 21.166667
| 38
| 0.748031
| 13
| 127
| 6.923077
| 0.615385
| 0.355556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008929
| 0.11811
| 127
| 6
| 39
| 21.166667
| 0.794643
| 0.165354
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c5247f9bbbac6e9d128d677382c6cde793165c5a
| 156
|
py
|
Python
|
office365/outlook/mail/mailbox_settings.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | null | null | null |
office365/outlook/mail/mailbox_settings.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | null | null | null |
office365/outlook/mail/mailbox_settings.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | null | null | null |
from office365.runtime.client_value import ClientValue
class MailboxSettings(ClientValue):
"""Settings for the primary mailbox of a user."""
pass
| 22.285714
| 54
| 0.762821
| 19
| 156
| 6.210526
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022901
| 0.160256
| 156
| 6
| 55
| 26
| 0.877863
| 0.275641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
c556ddb4ab114e2577a986c282600fd08579d625
| 62
|
py
|
Python
|
winejournal/blueprints/categories/__init__.py
|
rickandersonaia/wine-journal
|
9664c8ed8df9eb853562c500e888490a61a6e44d
|
[
"CC-BY-4.0"
] | null | null | null |
winejournal/blueprints/categories/__init__.py
|
rickandersonaia/wine-journal
|
9664c8ed8df9eb853562c500e888490a61a6e44d
|
[
"CC-BY-4.0"
] | 5
|
2021-02-08T20:22:06.000Z
|
2021-09-07T23:52:33.000Z
|
winejournal/blueprints/categories/__init__.py
|
rickandersonaia/wine-journal
|
9664c8ed8df9eb853562c500e888490a61a6e44d
|
[
"CC-BY-4.0"
] | 2
|
2018-06-27T15:03:38.000Z
|
2020-03-14T15:40:34.000Z
|
from winejournal.blueprints.categories.views import categories
| 62
| 62
| 0.903226
| 7
| 62
| 8
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 62
| 1
| 62
| 62
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
c56fdf1a222a53558fe508011166458e152eb33c
| 16,198
|
py
|
Python
|
src/datamgr/metadata/metadata_biz/types/entities/by_biz/data_model.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/datamgr/metadata/metadata_biz/types/entities/by_biz/data_model.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/datamgr/metadata/metadata_biz/types/entities/by_biz/data_model.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
from datetime import datetime
import attr
from metadata.type_system.basic_type import AddOn, Entity, Relation
from metadata.type_system.core import as_metadata
from metadata_biz.types.entities.add_on import FieldTypeConfig
from metadata_biz.types.entities.data_set import DataSet, ResultTable
from metadata_biz.types.entities.management import ProjectInfo
python_string_type = str
default_json = '{}'
@as_metadata
@attr.s
class DmmModelInfo(Entity):
model_id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
model_name = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
model_alias = attr.ib(type=python_string_type)
model_type = attr.ib(type=python_string_type)
project_id = attr.ib(type=int)
project = attr.ib(type=ProjectInfo)
description = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
table_name = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
table_alias = attr.ib(type=python_string_type)
latest_version_id = attr.ib(type=python_string_type)
publish_status = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact']}})
active_status = attr.ib(type=python_string_type, default='active', metadata={'dgraph': {'index': ['exact']}})
step_id = attr.ib(type=int, default=0)
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelField(AddOn):
"""
数据模型字段
"""
id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
model_id = attr.ib(type=int)
model = attr.ib(type=DmmModelInfo)
field_name = attr.ib(type=python_string_type)
field_alias = attr.ib(type=python_string_type)
field_type = attr.ib(type=python_string_type)
field_type_obj = attr.ib(type=FieldTypeConfig)
field_category = attr.ib(type=python_string_type)
is_primary_key = attr.ib(type=bool)
description = attr.ib(type=python_string_type)
field_constraint_content = attr.ib(type=python_string_type)
field_clean_content = attr.ib(type=python_string_type)
origin_fields = attr.ib(type=python_string_type)
field_index = attr.ib(type=int)
source_model_id = attr.ib(type=int)
source_model = attr.ib(type=DmmModelInfo)
source_field_name = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelTop(DataSet):
"""
数据模型用户置顶表
"""
id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
model_id = attr.ib(type=int)
model = attr.ib(type=DmmModelInfo)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmFieldConstraintConfig(AddOn):
"""
字段约束表
"""
constraint_id = attr.ib(type=python_string_type, metadata={'identifier': True, 'dgraph': {'index': ['exact']}})
constraint_type = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact']}})
constraint_name = attr.ib(type=python_string_type)
constraint_value = attr.ib(type=python_string_type)
validator = attr.ib(type=python_string_type)
description = attr.ib(type=python_string_type)
editable = attr.ib(type=bool)
allow_field_type = attr.ib(type=python_string_type)
@as_metadata
@attr.s
class DmmModelRelation(Relation):
"""
数据模型关系表
"""
id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
model_id = attr.ib(type=int, metadata={'dgraph': {'index': ['int']}})
model = attr.ib(type=DmmModelInfo)
field_name = attr.ib(type=python_string_type)
related_model_id = attr.ib(type=int)
related_model = attr.ib(type=DmmModelInfo)
related_field_name = attr.ib(type=python_string_type)
related_model_version_id = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
related_method = attr.ib(type=python_string_type, default='left-join')
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelRelease(Entity):
"""
数据模型发布记录
"""
__tablename__ = 'dmm_model_release'
version_id = attr.ib(type=python_string_type, metadata={'identifier': True, 'dgraph': {'index': ['exact']}})
version_log = attr.ib(type=python_string_type)
model_id = attr.ib(type=int, metadata={'dgraph': {'index': ['int']}})
model = attr.ib(type=DmmModelInfo)
model_content = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelCalculationAtom(Entity):
"""
数据模型统计口径表
"""
calculation_atom_name = attr.ib(
type=python_string_type, metadata={'identifier': True, 'dgraph': {'index': ['exact']}}
)
calculation_atom_alias = attr.ib(type=python_string_type)
model_id = attr.ib(type=int)
model = attr.ib(type=DmmModelInfo)
project_id = attr.ib(type=int)
project = attr.ib(type=ProjectInfo)
description = attr.ib(type=python_string_type)
field_type = attr.ib(type=python_string_type)
field_type_obj = attr.ib(type=FieldTypeConfig)
calculation_content = attr.ib(type=python_string_type)
calculation_formula = attr.ib(type=python_string_type)
origin_fields = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelCalculationAtomImage(Relation):
"""
数据模型统计口径引用表
"""
id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
model_id = attr.ib(type=int)
model = attr.ib(type=DmmModelInfo)
project_id = attr.ib(type=int)
calculation_atom_name = attr.ib(type=python_string_type)
calculation_atom = attr.ib(type=DmmModelCalculationAtom)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelIndicator(Entity):
"""
数据模型指标表
"""
indicator_name = attr.ib(type=python_string_type, metadata={'identifier': True, 'dgraph': {'index': ['exact']}})
indicator_alias = attr.ib(type=python_string_type)
model_id = attr.ib(type=int)
model = attr.ib(type=DmmModelInfo)
project_id = attr.ib(type=int)
project = attr.ib(type=ProjectInfo)
description = attr.ib(type=python_string_type)
calculation_atom_name = attr.ib(type=python_string_type)
calculation_atom = attr.ib(type=DmmModelCalculationAtom)
aggregation_fields = attr.ib(type=python_string_type)
filter_formula = attr.ib(type=python_string_type)
condition_fields = attr.ib(type=python_string_type)
scheduling_type = attr.ib(type=python_string_type)
scheduling_content = attr.ib(type=python_string_type)
parent_indicator_name = attr.ib(type=python_string_type)
hash = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmCalculationFunctionConfig(AddOn):
"""
SQL 统计函数表
"""
function_name = attr.ib(type=python_string_type, metadata={'identifier': True, 'dgraph': {'index': ['exact']}})
output_type = attr.ib(type=python_string_type)
allow_field_type = attr.ib(type=python_string_type)
@as_metadata
@attr.s
class DmmModelInstance(Entity):
"""
数据模型实例
在数据开发阶段,基于已构建的数据模型创建的任务,将作为该数据模型应用阶段的实例,每个模型应用实例会包含一个主表和多个指标
"""
# 实例配置和属性
instance_id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
model_id = attr.ib(type=int)
model = attr.ib(type=DmmModelInfo)
project_id = attr.ib(type=int)
project = attr.ib(type=ProjectInfo)
version_id = attr.ib(type=python_string_type)
# 任务相关配置
flow_id = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelInstanceTable(DataSet):
"""
数据模型实例主表
即明细数据表
"""
# 主表配置及属性
result_table_id = attr.ib(type=python_string_type, metadata={'identifier': True, 'dgraph': {'index': ['exact']}})
result_table = attr.ib(type=ResultTable)
bk_biz_id = attr.ib(type=int)
instance_id = attr.ib(type=int)
instance = attr.ib(type=DmmModelInstance)
model_id = attr.ib(type=int)
flow_node_id = attr.ib(type=int)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelInstanceField(AddOn):
"""
数据模型实例主表字段
主要记录了主表输出哪些字段及字段应用阶段的清洗规则
"""
# 字段基本属性
id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
instance_id = attr.ib(type=int)
instance = attr.ib(type=DmmModelInstance)
model_id = attr.ib(type=int)
field_name = attr.ib(type=python_string_type)
# 字段来源信息
input_result_table_id = attr.ib(type=python_string_type)
input_field_name = attr.ib(type=python_string_type)
application_clean_content = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelInstanceRelation(Relation):
"""
数据模型实例字段映射关联关系
对于需要进行维度关联的字段,需要在关联关系表中记录维度关联的相关相信
"""
id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
instance_id = attr.ib(type=int)
instance = attr.ib(type=DmmModelInstance)
model_id = attr.ib(type=int)
field_name = attr.ib(type=python_string_type)
# 关联字段来源及关联信息
input_result_table_id = attr.ib(type=python_string_type)
input_field_name = attr.ib(type=python_string_type)
related_model_id = attr.ib(type=int)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelInstanceIndicator(DataSet):
"""
数据模型实例指标
"""
# 应用实例指标基本配置和属性
result_table_id = attr.ib(type=python_string_type, metadata={'identifier': True, 'dgraph': {'index': ['exact']}})
result_table = attr.ib(type=ResultTable)
project_id = attr.ib(type=int)
project = attr.ib(type=ProjectInfo)
bk_biz_id = attr.ib(type=int)
instance_id = attr.ib(type=int)
instance = attr.ib(type=DmmModelInstance)
model_id = attr.ib(type=int)
# 关联结果表和节点的信息
parent_result_table_id = attr.ib(type=python_string_type)
flow_node_id = attr.ib(type=int)
# 来自模型定义的指标继承写入,可以重载
calculation_atom_name = attr.ib(type=python_string_type)
calculation_atom = attr.ib(type=DmmModelCalculationAtom)
aggregation_fields = attr.ib(type=python_string_type)
filter_formula = attr.ib(type=python_string_type)
scheduling_type = attr.ib(type=python_string_type)
scheduling_content = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
@as_metadata
@attr.s
class DmmModelInstanceSource(Entity):
"""
模型实例输入表
"""
id = attr.ib(type=int, metadata={'identifier': True, 'dgraph': {'index': ['int']}})
instance_id = attr.ib(type=int)
instance = attr.ib(type=DmmModelInstance)
input_type = attr.ib(type=python_string_type)
input_result_table_id = attr.ib(type=python_string_type)
created_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
updated_by = attr.ib(type=python_string_type, metadata={'dgraph': {'index': ['exact', 'trigram']}})
created_at = attr.ib(type=datetime, factory=datetime.now)
updated_at = attr.ib(type=datetime, factory=datetime.now)
| 41.533333
| 117
| 0.707742
| 2,174
| 16,198
| 5.070837
| 0.121435
| 0.108309
| 0.180515
| 0.146589
| 0.78184
| 0.7666
| 0.754263
| 0.716528
| 0.69512
| 0.690221
| 0
| 0.000577
| 0.143351
| 16,198
| 389
| 118
| 41.640103
| 0.793889
| 0.106248
| 0
| 0.7
| 0
| 0
| 0.084253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030769
| 0
| 0.861538
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c573c9d703524b4314ba0b270a9bcdc04a8cc26d
| 2,575
|
py
|
Python
|
codigos/numero pi.py
|
skunkworksdev/Ifes_Algoritmo
|
458ef73a304573c12b45d8afae38e13ae0f3354f
|
[
"MIT"
] | null | null | null |
codigos/numero pi.py
|
skunkworksdev/Ifes_Algoritmo
|
458ef73a304573c12b45d8afae38e13ae0f3354f
|
[
"MIT"
] | null | null | null |
codigos/numero pi.py
|
skunkworksdev/Ifes_Algoritmo
|
458ef73a304573c12b45d8afae38e13ae0f3354f
|
[
"MIT"
] | null | null | null |
num_pi = "141592653589793238462643383279502884197169399375105
82097494459230781640628620899862803482534211706798
21480865132823066470938446095505822317253594081284
81117450284102701938521105559644622948954930381964
42881097566593344612847564823378678316527120190914
56485669234603486104543266482133936072602491412737
24587006606315588174881520920962829254091715364367
89259036001133053054882046652138414695194151160943
30572703657595919530921861173819326117931051185480
74462379962749567351885752724891227938183011949129
83367336244065664308602139494639522473719070217986
09437027705392171762931767523846748184676694051320
00568127145263560827785771342757789609173637178721
46844090122495343014654958537105079227968925892354
20199561121290219608640344181598136297747713099605
18707211349999998372978049951059731732816096318595
02445945534690830264252230825334468503526193118817
10100031378387528865875332083814206171776691473035
98253490428755468731159562863882353787593751957781
85778053217122680661300192787661119590921642019893
80952572010654858632788659361533818279682303019520
35301852968995773622599413891249721775283479131515
57485724245415069595082953311686172785588907509838
17546374649393192550604009277016711390098488240128
58361603563707660104710181942955596198946767837449
44825537977472684710404753464620804668425906949129
33136770289891521047521620569660240580381501935112
53382430035587640247496473263914199272604269922796
78235478163600934172164121992458631503028618297455
57067498385054945885869269956909272107975093029553
21165344987202755960236480665499119881834797753566
36980742654252786255181841757467289097777279380008
16470600161452491921732172147723501414419735685481
61361157352552133475741849468438523323907394143334
54776241686251898356948556209921922218427255025425
68876717904946016534668049886272327917860857843838
27967976681454100953883786360950680064225125205117
39298489608412848862694560424196528502221066118630
67442786220391949450471237137869609563643719172874
67764657573962413890865832645995813390478027590099
46576407895126946839835259570982582262052248940772
67194782684826014769909026401363944374553050682034
96252451749399651431429809190659250937221696461515
70985838741059788595977297549893016175392846813826
86838689427741559918559252459539594310499725246808
45987273644695848653836736222626099124608051243884
39045124413654976278079771569143599770012961608944
16948685558484063534220722258284886481584560285060
16842739452267467678895252138522549954666727823986
4565961163548862305774564980355936345681743241125"
print(num_pi)
| 49.519231
| 61
| 0.977476
| 55
| 2,575
| 45.727273
| 0.963636
| 0.003976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.991277
| 0.020583
| 2,575
| 52
| 62
| 49.519231
| 0.005948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.019608
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3d6d2ded67e6efc04d9e4f27d6821d0f5a405036
| 555
|
py
|
Python
|
src/web/CygnusCloud/controllers/plugin_translate.py
|
lbarriosh/cygnus-cloud
|
1a17fbb55de69adba2ec42db4c9a063865af4fbd
|
[
"Apache-2.0"
] | 3
|
2017-09-03T22:01:35.000Z
|
2019-01-10T05:40:44.000Z
|
src/web/CygnusCloud/controllers/plugin_translate.py
|
lbarriosh/cygnus-cloud
|
1a17fbb55de69adba2ec42db4c9a063865af4fbd
|
[
"Apache-2.0"
] | null | null | null |
src/web/CygnusCloud/controllers/plugin_translate.py
|
lbarriosh/cygnus-cloud
|
1a17fbb55de69adba2ec42db4c9a063865af4fbd
|
[
"Apache-2.0"
] | null | null | null |
def translate():
return "jQuery(document).ready(function(){jQuery('body').translate('%s');});" % request.args(0).split('.')[0]
def changeLanguage():
session._language = request.args[0]
#T.force(request.args[0])
#T.set_current_languages(str(request.args[0]),str(request.args[0]) + '-' + str(request.args[0]))
if(len(request.args) == 5):
redirect(URL(request.args[1],request.args[2], request.args[3],args=(request.args[4])))
else:
redirect(URL(request.args[1],request.args[2], request.args[3]))
return
| 39.642857
| 113
| 0.630631
| 77
| 555
| 4.506494
| 0.415584
| 0.443804
| 0.207493
| 0.129683
| 0.400576
| 0.400576
| 0.400576
| 0.400576
| 0.270893
| 0.270893
| 0
| 0.031646
| 0.145946
| 555
| 13
| 114
| 42.692308
| 0.700422
| 0.216216
| 0
| 0
| 0
| 0
| 0.159353
| 0.157044
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| true
| 0
| 0
| 0.111111
| 0.444444
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
3d95ae30f98e58ff75d580c78b00579861a88438
| 2,064
|
py
|
Python
|
tests/date/test_getters.py
|
seandstewart/pendulum
|
daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4
|
[
"MIT"
] | 5,049
|
2016-07-04T07:16:34.000Z
|
2022-03-31T07:41:48.000Z
|
tests/date/test_getters.py
|
seandstewart/pendulum
|
daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4
|
[
"MIT"
] | 536
|
2016-07-05T22:46:29.000Z
|
2022-03-22T12:41:54.000Z
|
tests/date/test_getters.py
|
seandstewart/pendulum
|
daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4
|
[
"MIT"
] | 373
|
2016-07-05T19:51:51.000Z
|
2022-03-23T16:57:46.000Z
|
import pendulum
def test_year():
d = pendulum.Date(1234, 5, 6)
assert d.year == 1234
def test_month():
d = pendulum.Date(1234, 5, 6)
assert d.month == 5
def test_day():
d = pendulum.Date(1234, 5, 6)
assert d.day == 6
def test_day_of_week():
d = pendulum.Date(2012, 5, 7)
assert d.day_of_week == pendulum.MONDAY
def test_day_of_year():
d = pendulum.Date(2015, 12, 31)
assert d.day_of_year == 365
d = pendulum.Date(2016, 12, 31)
assert d.day_of_year == 366
def test_days_in_month():
d = pendulum.Date(2012, 5, 7)
assert d.days_in_month == 31
def test_age():
d = pendulum.Date.today()
assert d.age == 0
assert d.add(years=1).age == -1
assert d.subtract(years=1).age == 1
def test_is_leap_year():
assert pendulum.Date(2012, 1, 1).is_leap_year()
assert not pendulum.Date(2011, 1, 1).is_leap_year()
def test_is_long_year():
assert pendulum.Date(2015, 1, 1).is_long_year()
assert not pendulum.Date(2016, 1, 1).is_long_year()
def test_week_of_month():
assert pendulum.Date(2012, 9, 30).week_of_month == 5
assert pendulum.Date(2012, 9, 28).week_of_month == 5
assert pendulum.Date(2012, 9, 20).week_of_month == 4
assert pendulum.Date(2012, 9, 8).week_of_month == 2
assert pendulum.Date(2012, 9, 1).week_of_month == 1
assert pendulum.date(2020, 1, 1).week_of_month == 1
assert pendulum.date(2020, 1, 7).week_of_month == 2
assert pendulum.date(2020, 1, 14).week_of_month == 3
def test_week_of_year_first_week():
assert pendulum.Date(2012, 1, 1).week_of_year == 52
assert pendulum.Date(2012, 1, 2).week_of_year == 1
def test_week_of_year_last_week():
assert pendulum.Date(2012, 12, 30).week_of_year == 52
assert pendulum.Date(2012, 12, 31).week_of_year == 1
def test_is_future():
d = pendulum.Date.today()
assert not d.is_future()
d = d.add(days=1)
assert d.is_future()
def test_is_past():
d = pendulum.Date.today()
assert not d.is_past()
d = d.subtract(days=1)
assert d.is_past()
| 24
| 57
| 0.65843
| 361
| 2,064
| 3.542936
| 0.146814
| 0.243941
| 0.197029
| 0.172009
| 0.658327
| 0.456607
| 0.39093
| 0.312744
| 0.111024
| 0.056294
| 0
| 0.112257
| 0.20155
| 2,064
| 85
| 58
| 24.282353
| 0.663835
| 0
| 0
| 0.140351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.526316
| 1
| 0.245614
| false
| 0
| 0.017544
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3d9d89911af790ddcf88ecdb541a37d85a1e64ec
| 34
|
py
|
Python
|
cryptomaker.py
|
AndrewMeadows/cryptogram
|
eb1b02dc2a954281be82c92a1bbe01ad015cd5db
|
[
"Apache-2.0"
] | null | null | null |
cryptomaker.py
|
AndrewMeadows/cryptogram
|
eb1b02dc2a954281be82c92a1bbe01ad015cd5db
|
[
"Apache-2.0"
] | null | null | null |
cryptomaker.py
|
AndrewMeadows/cryptogram
|
eb1b02dc2a954281be82c92a1bbe01ad015cd5db
|
[
"Apache-2.0"
] | null | null | null |
#! /user/bin/python
print("foo")
| 8.5
| 19
| 0.617647
| 5
| 34
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 3
| 20
| 11.333333
| 0.7
| 0.529412
| 0
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
3dabcd4c45b175681227f86f9413720f46a627e5
| 2,257
|
py
|
Python
|
hubspot/files/files/__init__.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/files/files/__init__.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/files/files/__init__.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
Files
Upload and manage files. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.0.0"
# import apis into sdk package
from hubspot.files.files.api.files_api import FilesApi
from hubspot.files.files.api.folders_api import FoldersApi
# import ApiClient
from hubspot.files.files.api_client import ApiClient
from hubspot.files.files.configuration import Configuration
from hubspot.files.files.exceptions import OpenApiException
from hubspot.files.files.exceptions import ApiTypeError
from hubspot.files.files.exceptions import ApiValueError
from hubspot.files.files.exceptions import ApiKeyError
from hubspot.files.files.exceptions import ApiException
# import models into sdk package
from hubspot.files.files.models.collection_response_file import CollectionResponseFile
from hubspot.files.files.models.collection_response_folder import CollectionResponseFolder
from hubspot.files.files.models.error import Error
from hubspot.files.files.models.error_category import ErrorCategory
from hubspot.files.files.models.error_detail import ErrorDetail
from hubspot.files.files.models.file import File
from hubspot.files.files.models.file_action_response import FileActionResponse
from hubspot.files.files.models.file_update_input import FileUpdateInput
from hubspot.files.files.models.folder import Folder
from hubspot.files.files.models.folder_action_response import FolderActionResponse
from hubspot.files.files.models.folder_input import FolderInput
from hubspot.files.files.models.folder_update_input import FolderUpdateInput
from hubspot.files.files.models.folder_update_task_locator import FolderUpdateTaskLocator
from hubspot.files.files.models.import_from_url_input import ImportFromUrlInput
from hubspot.files.files.models.import_from_url_task_locator import ImportFromUrlTaskLocator
from hubspot.files.files.models.next_page import NextPage
from hubspot.files.files.models.paging import Paging
from hubspot.files.files.models.previous_page import PreviousPage
from hubspot.files.files.models.signed_url import SignedUrl
from hubspot.files.files.models.standard_error import StandardError
| 42.584906
| 92
| 0.854231
| 303
| 2,257
| 6.227723
| 0.267327
| 0.169051
| 0.245893
| 0.322735
| 0.533651
| 0.446211
| 0.157393
| 0.042395
| 0
| 0
| 0
| 0.004356
| 0.084626
| 2,257
| 52
| 93
| 43.403846
| 0.909003
| 0.104564
| 0
| 0
| 1
| 0
| 0.00251
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.967742
| 0
| 0.967742
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3db556a11d2501212b04ab032688b0220d340ca7
| 2,934
|
py
|
Python
|
python/jittor/test/test_affine_grid.py
|
Exusial/jittor
|
eca21d5bba5098bce4f492fa44908677b6e76588
|
[
"Apache-2.0"
] | 2,571
|
2020-03-20T03:38:35.000Z
|
2022-03-31T08:20:05.000Z
|
python/jittor/test/test_affine_grid.py
|
Exusial/jittor
|
eca21d5bba5098bce4f492fa44908677b6e76588
|
[
"Apache-2.0"
] | 197
|
2020-03-20T04:11:47.000Z
|
2022-03-31T10:14:24.000Z
|
python/jittor/test/test_affine_grid.py
|
Exusial/jittor
|
eca21d5bba5098bce4f492fa44908677b6e76588
|
[
"Apache-2.0"
] | 284
|
2020-03-20T03:53:15.000Z
|
2022-03-28T07:20:32.000Z
|
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <randonlang@gmail.com>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
from jittor.nn import affine_grid,grid_sample
skip_this_test = False
try:
jt.dirty_fix_pytorch_runtime_error()
import torch.nn.functional as F
import torch
except:
skip_this_test = True
@unittest.skipIf(skip_this_test, "No Torch found")
class TestAffineGrid(unittest.TestCase):
def test_affine_grid_2d(self):
N = 8
C = 3
H = 256
W = 128
theta = np.random.randn(N,2,3).astype(np.float32)
features = np.random.randint(256,size=(N,C,H,W)).astype(np.float32)
torch_theta = torch.Tensor(theta)
torch_features = torch.Tensor(features)
torch_grid = F.affine_grid(torch_theta,size=(N,C,H,W),align_corners=False)
torch_sample = F.grid_sample(torch_features,torch_grid,mode='bilinear',padding_mode='zeros',align_corners=False)
jt_theta = jt.array(theta)
jt_features = jt.array(features)
jt_grid = affine_grid(jt_theta,size=(N,C,H,W),align_corners=False)
jt_sample = grid_sample(jt_features,jt_grid,mode='bilinear',padding_mode='zeros',align_corners=False)
assert np.allclose(jt_theta.numpy(),torch_theta.numpy())
assert np.allclose(jt_features.numpy(),torch_features.numpy())
assert np.allclose(jt_grid.numpy(),torch_grid.numpy(),atol=1e-05)
assert np.allclose(torch_sample.numpy(),jt_sample.numpy(),atol=1e-01)
def test_affine_grid_3d(self):
N = 8
C = 3
D = 64
H = 256
W = 128
theta = np.random.randn(N,3,4).astype(np.float32)
features = np.random.randint(256,size=(N,C,D,H,W)).astype(np.float32)
torch_theta = torch.Tensor(theta)
torch_features = torch.Tensor(features)
torch_grid = F.affine_grid(torch_theta,size=(N,C,D,H,W),align_corners=False)
torch_sample = F.grid_sample(torch_features,torch_grid,mode='bilinear',padding_mode='zeros',align_corners=False)
jt_theta = jt.array(theta)
jt_features = jt.array(features)
jt_grid = affine_grid(jt_theta,size=(N,C,D,H,W),align_corners=False)
jt_sample = grid_sample(jt_features,jt_grid,mode='bilinear',padding_mode='zeros',align_corners=False)
assert np.allclose(jt_theta.numpy(),torch_theta.numpy())
assert np.allclose(jt_features.numpy(),torch_features.numpy())
assert np.allclose(jt_grid.numpy(),torch_grid.numpy(),atol=1e-05)
assert np.allclose(torch_sample.numpy(),jt_sample.numpy(),atol=1e-01)
if __name__ == "__main__":
unittest.main()
| 39.648649
| 120
| 0.656783
| 421
| 2,934
| 4.36342
| 0.244656
| 0.052259
| 0.074034
| 0.058792
| 0.726728
| 0.718019
| 0.717474
| 0.717474
| 0.717474
| 0.688078
| 0
| 0.022425
| 0.179277
| 2,934
| 74
| 121
| 39.648649
| 0.740449
| 0.118269
| 0
| 0.518519
| 0
| 0
| 0.028671
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.037037
| false
| 0
| 0.111111
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3dc789bb341fbe33803a4e1bb103d2ee9b4865f0
| 35
|
py
|
Python
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/sync/core/result.py
|
disrupted/Trakttv.bundle
|
24712216c71f3b22fd58cb5dd89dad5bb798ed60
|
[
"RSA-MD"
] | 1,346
|
2015-01-01T14:52:24.000Z
|
2022-03-28T12:50:48.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/sync/core/result.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 474
|
2015-01-01T10:27:46.000Z
|
2022-03-21T12:26:16.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/sync/core/result.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 191
|
2015-01-02T18:27:22.000Z
|
2022-03-29T10:49:48.000Z
|
class SyncResult(object):
pass
| 11.666667
| 25
| 0.714286
| 4
| 35
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 35
| 2
| 26
| 17.5
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
9a805602d21b59185f15b225aec71ba701580e84
| 100
|
py
|
Python
|
terminal/System.py
|
tagadvance/Terminal-Save-and-Restore
|
53c99ddd59886f50d21250c7f41b99c0bb1323b1
|
[
"MIT"
] | 3
|
2018-06-05T19:30:18.000Z
|
2018-07-09T21:53:23.000Z
|
terminal/System.py
|
tagadvance/Terminal-Save-and-Restore
|
53c99ddd59886f50d21250c7f41b99c0bb1323b1
|
[
"MIT"
] | 1
|
2018-06-01T18:05:30.000Z
|
2018-06-02T07:51:24.000Z
|
terminal/System.py
|
tagadvance/Terminal-Save-and-Restore
|
53c99ddd59886f50d21250c7f41b99c0bb1323b1
|
[
"MIT"
] | null | null | null |
import os
class System:
@staticmethod
def isRoot():
return os.geteuid() == 0
| 11.111111
| 32
| 0.56
| 11
| 100
| 5.090909
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.34
| 100
| 8
| 33
| 12.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.2
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9a8ce63a90523e238db33621886cd1976e1d2dd9
| 8,440
|
py
|
Python
|
tdameritrade/tdameritrade/tests/AlgoTrading.py
|
ziran137/td_api
|
1b7fad9a1a47d28ae6c6ed64c4787ac94f40ac7a
|
[
"MIT"
] | null | null | null |
tdameritrade/tdameritrade/tests/AlgoTrading.py
|
ziran137/td_api
|
1b7fad9a1a47d28ae6c6ed64c4787ac94f40ac7a
|
[
"MIT"
] | null | null | null |
tdameritrade/tdameritrade/tests/AlgoTrading.py
|
ziran137/td_api
|
1b7fad9a1a47d28ae6c6ed64c4787ac94f40ac7a
|
[
"MIT"
] | null | null | null |
import requests
endpoint = r'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format('GOOG')
payload = {'apikey': URLENCODED_Consumer_Key,
'periodType': 'minute',
'frequency': 1,
'period':2,
'endDate':'155615824000',
'startDate':'1554535854000',
'needExtendedHoursData':'true'
}
content = requests.get(url=endpoint, params=payload)
data=content.json()
URLENCODED_REDIRECT_URI = 'http%3A%2F%2Flocalhost'
URLENCODED_Consumer_Key = 'HYCCBHZVNLQNFY4N9BB0GUUJYWOMZVWM'
'https://auth.tdameritrade.com/auth?response_type=code&redirect_uri={}&client_id={}%40AMER.OAUTHAP'.format(URLENCODED_REDIRECT_URI, URLENCODED_Consumer_Key)
############### test functions ###############
import tdameritrade as td
import pandas as pd
token={
"access_token": "as+I4Am/9jXgvNQZdQUTjjv14e1IMk6LwuI/RT6WVaoHt2FXVetix6Faw7pyMZYoGICqh5Vec0rwsck57TQ8/trzR9E2sf06fW+NheCeELWldW1yJaTklTMntkmhiOrXSU+2lb4Tco2I13zfhnnGSc4s6WlpqaqrRth3WMqAh0a1FZlCKNnLeJiaid089+Zm8t9WJ13syGp0QvbDTkKFbOWaU1FhQXs9/gz6lSrxTudiPkiLWyZmSyNtQWzRU3TldI4jYWjfrUxBzTjxaRxtZp1MfrkVmJPVI5IOkcNeD/yVdRuUULg2Wwoo2wru8lhI3ncXrglDOp1keOV3prn1nShWtqllPDSosFMcUEZXvsSsicOEAgKNyiCWbaHqjM5SoMlyAzEFeqYBgx3lNtvdt0RkaOCtJfxVRkKvZQOy2kq3v23R+aHnvXeBQ7Ve8eQAge/DTp1ExdVqbtW1SKduF/tq92gD/fvHqFWSwx7vDVgP3paoj64CmOo1/vQCLbyucOKQDgLwu3KoBnnabdJBU4otTRcD8yKAx83fpq7zSHDcbAkpa100MQuG4LYrgoVi/JHHvlP0pkRWfyFDazgom5gI8/yrOwlg1cC3cwPLdvz+UYbKu3NzZ9LTWIloV7XR4nFBZ4RTYUNuK2WmLxopddSu/VegBIqsPc/JoWxpQ3UQzx8VzU/Qi9NzWaqyubpaCYRzQmeuNGruzR5gjDt4xo9WOIFhZNVPmb+YpErLZq+X3m9bbNJomWOwnAGBfZTELZbAfQYq1Xx4uZxuOHPj6ATDe3qVFotioEt6zhho5RyPF+t4VzLuWwurzlEij40aeLkCMF7D4C5+w/y9L4U52tl7yCJ+xEm04N+T/RtZ9xrWaM9dVvqnH1M3e+ptCj/r0RPu1JQo3EqUOUMjvTCNuXvrdBySDEZImhXFCacGyHQBsLTn72FeOTAvxDD86v2Q9wZPpOOyG83iUsIpGojEuG3nF4fLp8TX8v8rdHqmTar9dO/i5wyIUALLIMEGht1w/P70JI4OY1qf6A6NDEqLwBQtS6soOIex857qI7K5iTSiOQZsqtDZMOKC4+ktRXleefgj3/BMBkL0sYmDr1f9T1zf9+pce/ndJ+lK+prT+4Bd5j65FX8/Fg==212FD3x19z9sWBHDJACbC00B75E",
"refresh_token": "TBYollq7DQfWrx91AsKm3d+MG5ENpP00LDmdTGwE/iOQ69hEn4X9A8c1yfUiVujZrTUnaUpNJJW5BpCLHZV5BQS6SLziplEOXuViXYU+VgC9GEC58YqhWeXl9IatFXKOqd2K7fYoJbkKLOcp14M45WNC7pwr2pwE3mRyaJYYSipcArTZ3ULNNsoQb5Lcfm0yanyYafIwrAzYf4JM9MhxiTp4YtYgaMRF8YuHpP7oReOUcgfjLB4cX2TYPQXtG7NT2frjJGZ/v3V5OWi9b5pratWiNIW5eOGEswj2n0Xub7VHOXuiYbBn1dsj342ZiK3r3NxifH07ce29XK8B/dQaA7jsB/zXM/rzBsXQf90tOCjL3Bozgc58TWsLP8jc9FlStsax2dHBKKU5I8SVtmS0BPORR0xfXki2iNdjcg9sbx6GbTp6qBf2312itfx100MQuG4LYrgoVi/JHHvlhn5AWowPg/YMBH7QyaB84FkL1J16BrmffNTwDK/NDoYSXZMndvhyMEMmhQqazLSxwP7MOZuGR5e6/E7nqvwRFKumJcf5WGZVAMYrp1RzA4j/jzn+DLWeEMC2lPUt4/kI2jKKrBVnAgmYjvEUeuOPOyOD9YhfDsAeka80FmdZHfeJpbeLuY2yX9/VLGklHNHF/Nys+25/1RSvg46Z8vGo17+6kImFLNnYPjL+4HDal6sITObYjh/Bei1+7jzo6NLwRGpYopb2jeq5bahaKBYLEfWyGgOHRx1xZRO5XwYC2HoPNx5oK4AcCfChuO+0doGzQD5uvGHrPODEXib/Zo4CbqXhaKVCILExyL/95uWjM/RofZUu4hb9A+3yDG2ROk7WmKbNkoA5lQnQBscoLeAgJ03FI3Y8huPJooc6dJPFlnTagJeZGAy2GPsYFls=212FD3x19z9sWBHDJACbC00B75E",
"scope": "PlaceTrades AccountAccess MoveMoney",
"expires_in": 1800,
"refresh_token_expires_in": 7776000,
"token_type": "Bearer"
}
token['accountIds'] = ['ziran1206']
c = td.TDClient(access_token=token['access_token'], accountIds=token['accountIds'])
## 1. Search instrument
symbols = ['AAPL', 'GOOG']
c.search(symbols)
c.searchDF(symbols)
## 2. Fundamentals
symbols = ['AAPL', 'GOOG']
c.fundamental(symbols)
x = c.fundamentalDF(symbols)
x['fundamental']
## 3. Get Instrument by CUSIP
cusip = '88160R101'
c.fundamental(cusip)
c.fundamentalDF(cusip)
## 4. Get Quotes by symbol
symbol = 'AAPL'
c.quote(symbol)
c.quoteDF(symbol)
## 5. Get Historical Price
""""
periodType: The type of period to show. Valid values are day, month, year, or ytd (year to date). Default is day
period: Valid periods by periodType (defaults marked with an asterisk):
day: 1, 2, 3, 4, 5, 10*
month: 1*, 2, 3, 6
year: 1*, 2, 3, 5, 10, 15, 20
ytd: 1*
frequencyType: The type of frequency with which a new candle is formed. Valid frequencyTypes by periodType (defaults marked with an asterisk):
day: minute*
month: daily, weekly*
year: daily, weekly, monthly*
ytd: daily, weekly*
frequency: The number of the frequencyType to be included in each candle. Valid frequencies by frequencyType (defaults marked with an asterisk):
minute: 1*, 5, 10, 15, 30
daily: 1*
weekly: 1*
monthly: 1*
endDate: End date as milliseconds since epoch. If startDate and endDate are provided, period should not be provided. Default is previous trading day.
startDate: Start date as milliseconds since epoch. If startDate and endDate are provided, period should not be provided.
needExtendedHoursData: true to return extended hours data, false for regular market hours only. Default is true
""""
symbol = 'SPY'
params = {'period': 10,
'periodType': 'year',
'frequency': 1,
'frequencyType': 'daily',
'needExtendedHoursData': 'true'
}
c.history(symbol, period=params['period'], periodType=params['periodType'], frequency=params['frequency'], frequencyType=params['frequencyType'])
c.historyDF(symbol, period=params['period'], periodType=params['periodType'], frequency=params['frequency'], frequencyType=params['frequencyType'])
## 6. Get option chain for an optionable Symbol
""""
contractType: Can be CALL, PUT, or ALL. Default is ALL.
strikeCount: The number of strikes to return above and below the at-the-money price.
includeQuotes: Include quotes for options in the option chain. Can be TRUE or FALSE. Default is FALSE.
strategy: Passing a value returns a Strategy Chain. Possible values are SINGLE, ANALYTICAL
(allows use of the volatility, underlyingPrice, interestRate, and daysToExpiration params to calculate theoretical values),
COVERED, VERTICAL, CALENDAR, STRANGLE, STRADDLE, BUTTERFLY, CONDOR, DIAGONAL, COLLAR, or ROLL. Default is SINGLE.
interval: Strike interval for spread strategy chains (see strategy param).
strike: Provide a strike price to return options only at that strike price.
range: Returns options for the given range. Possible values are:
ITM: In-the-money
NTM: Near-the-money
OTM: Out-of-the-money
SAK: Strikes Above Market
SBK: Strikes Below Market
SNK: Strikes Near Market
ALL: All Strikes
Default is ALL.
fromDate: 'Only return expirations after this date. For strategies, expiration refers to the nearest term expiration in the strategy.
Valid ISO-8601 formats are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.'
toDate: 'Only return expirations before this date. For strategies, expiration refers to the nearest term expiration in the strategy.
Valid ISO-8601 formats are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.'
volatility: Volatility to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
underlyingPrice: Underlying price to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
interestRate: Interest rate to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
daysToExpiration: Days to expiration to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
expMonth: 'Return only options expiring in the specified month. Month is given in the three character format.
Example: JAN
Default is ALL.''
optionType: 'Type of contracts to return. Possible values are:
S: Standard contracts
NS: Non-standard contracts
ALL: All contracts
Default is ALL.''
""""
symbol = 'SPY'
params = {'contractType': 'CALL',
'strikeCount': '5',
'includeQuotes': 'FALSE',
'strategy': 'SINGLE',
'fromDate': '2020-01-20',
'toDate': '2020-01-31'
}
c.options('AAPL', contractType=params['contractType'], strikeCount=params['strikeCount'],fromDate=params['fromDate'], toDate=params['toDate'])
c.optionsDF('AAPL')
## 7. Movers
index = '$DJI' ## The index symbol to get movers from. Can be $COMPX, $DJI, or $SPX.X. Click to edit the value.
direction = 'up' ## up or down
change_type='percent' # percent or value
c.movers(index, direction=direction, change_type=change_type)
| 55.89404
| 1,193
| 0.757109
| 875
| 8,440
| 7.274286
| 0.392
| 0.012726
| 0.013354
| 0.019639
| 0.167164
| 0.158052
| 0.158052
| 0.158052
| 0.14454
| 0.14454
| 0
| 0.064268
| 0.153791
| 8,440
| 150
| 1,194
| 56.266667
| 0.826939
| 0.036137
| 0
| 0.123077
| 0
| 0.076923
| 0.656636
| 0.499888
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.046154
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9a90d313357c0eae87f6d61505cd984c83344a95
| 170
|
py
|
Python
|
audiomate/annotations/__init__.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 133
|
2018-05-18T13:54:10.000Z
|
2022-02-15T02:14:20.000Z
|
audiomate/annotations/__init__.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 68
|
2018-06-03T16:42:09.000Z
|
2021-01-29T10:58:30.000Z
|
audiomate/annotations/__init__.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 37
|
2018-11-02T02:40:29.000Z
|
2021-11-30T07:44:50.000Z
|
"""
This module contains classes to describe
the content of an audio-segment.
"""
from .label import Label # noqa: F401
from .label_list import LabelList # noqa: F401
| 21.25
| 47
| 0.741176
| 25
| 170
| 5
| 0.76
| 0.144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042857
| 0.176471
| 170
| 7
| 48
| 24.285714
| 0.85
| 0.564706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9ad85997d789daed48deeac90d084cb0cc0dea54
| 67
|
py
|
Python
|
demo.py
|
Zerl1990/2021_02_git_training
|
2761d323b9aea32230441a3ccd4f0562737e83ff
|
[
"MIT"
] | null | null | null |
demo.py
|
Zerl1990/2021_02_git_training
|
2761d323b9aea32230441a3ccd4f0562737e83ff
|
[
"MIT"
] | null | null | null |
demo.py
|
Zerl1990/2021_02_git_training
|
2761d323b9aea32230441a3ccd4f0562737e83ff
|
[
"MIT"
] | null | null | null |
"""Demo script."""
import os
print('Demo Hello')
print('commit..')
| 13.4
| 19
| 0.641791
| 9
| 67
| 4.777778
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 67
| 4
| 20
| 16.75
| 0.716667
| 0.179104
| 0
| 0
| 0
| 0
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
b10e7ac7baeb3ca610a27258dc048c131589e525
| 91
|
py
|
Python
|
enthought/graphcanvas/graph_view.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/graphcanvas/graph_view.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/graphcanvas/graph_view.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from graphcanvas.graph_view import *
| 22.75
| 38
| 0.846154
| 12
| 91
| 5.916667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120879
| 91
| 3
| 39
| 30.333333
| 0.8875
| 0.131868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.